summaryrefslogtreecommitdiffstats
path: root/library/std
diff options
context:
space:
mode:
Diffstat (limited to 'library/std')
-rw-r--r--library/std/Cargo.toml86
-rw-r--r--library/std/benches/hash/map.rs103
-rw-r--r--library/std/benches/hash/mod.rs2
-rw-r--r--library/std/benches/hash/set_ops.rs42
-rw-r--r--library/std/benches/lib.rs5
-rw-r--r--library/std/build.rs52
-rw-r--r--library/std/primitive_docs/box_into_raw.md1
-rw-r--r--library/std/primitive_docs/fs_file.md1
-rw-r--r--library/std/primitive_docs/io_bufread.md1
-rw-r--r--library/std/primitive_docs/io_read.md1
-rw-r--r--library/std/primitive_docs/io_seek.md1
-rw-r--r--library/std/primitive_docs/io_write.md1
-rw-r--r--library/std/primitive_docs/net_tosocketaddrs.md1
-rw-r--r--library/std/primitive_docs/process_exit.md1
-rw-r--r--library/std/primitive_docs/string_string.md1
-rw-r--r--library/std/src/alloc.rs413
-rw-r--r--library/std/src/ascii.rs208
-rw-r--r--library/std/src/backtrace.rs499
-rw-r--r--library/std/src/backtrace/tests.rs95
-rw-r--r--library/std/src/collections/hash/map.rs3276
-rw-r--r--library/std/src/collections/hash/map/tests.rs1113
-rw-r--r--library/std/src/collections/hash/mod.rs4
-rw-r--r--library/std/src/collections/hash/set.rs1844
-rw-r--r--library/std/src/collections/hash/set/tests.rs498
-rw-r--r--library/std/src/collections/mod.rs446
-rw-r--r--library/std/src/env.rs982
-rw-r--r--library/std/src/env/tests.rs102
-rw-r--r--library/std/src/error.rs1746
-rw-r--r--library/std/src/error/tests.rs443
-rw-r--r--library/std/src/f32.rs923
-rw-r--r--library/std/src/f32/tests.rs771
-rw-r--r--library/std/src/f64.rs949
-rw-r--r--library/std/src/f64/tests.rs755
-rw-r--r--library/std/src/ffi/mod.rs174
-rw-r--r--library/std/src/ffi/os_str.rs1447
-rw-r--r--library/std/src/ffi/os_str/tests.rs179
-rw-r--r--library/std/src/fs.rs2428
-rw-r--r--library/std/src/fs/tests.rs1553
-rw-r--r--library/std/src/io/buffered/bufreader.rs496
-rw-r--r--library/std/src/io/buffered/bufreader/buffer.rs105
-rw-r--r--library/std/src/io/buffered/bufwriter.rs674
-rw-r--r--library/std/src/io/buffered/linewriter.rs232
-rw-r--r--library/std/src/io/buffered/linewritershim.rs276
-rw-r--r--library/std/src/io/buffered/mod.rs196
-rw-r--r--library/std/src/io/buffered/tests.rs1039
-rw-r--r--library/std/src/io/copy.rs161
-rw-r--r--library/std/src/io/cursor.rs640
-rw-r--r--library/std/src/io/cursor/tests.rs567
-rw-r--r--library/std/src/io/error.rs960
-rw-r--r--library/std/src/io/error/repr_bitpacked.rs409
-rw-r--r--library/std/src/io/error/repr_unpacked.rs54
-rw-r--r--library/std/src/io/error/tests.rs194
-rw-r--r--library/std/src/io/impls.rs458
-rw-r--r--library/std/src/io/impls/tests.rs57
-rw-r--r--library/std/src/io/mod.rs2827
-rw-r--r--library/std/src/io/prelude.rs14
-rw-r--r--library/std/src/io/readbuf.rs249
-rw-r--r--library/std/src/io/readbuf/tests.rs181
-rw-r--r--library/std/src/io/stdio.rs1042
-rw-r--r--library/std/src/io/stdio/tests.rs166
-rw-r--r--library/std/src/io/tests.rs623
-rw-r--r--library/std/src/io/util.rs270
-rw-r--r--library/std/src/io/util/tests.rs147
-rw-r--r--library/std/src/keyword_docs.rs2362
-rw-r--r--library/std/src/lazy.rs1
-rw-r--r--library/std/src/lib.rs633
-rw-r--r--library/std/src/macros.rs333
-rw-r--r--library/std/src/net/addr.rs988
-rw-r--r--library/std/src/net/addr/tests.rs237
-rw-r--r--library/std/src/net/ip.rs2040
-rw-r--r--library/std/src/net/ip/tests.rs969
-rw-r--r--library/std/src/net/mod.rs90
-rw-r--r--library/std/src/net/parser.rs388
-rw-r--r--library/std/src/net/parser/tests.rs149
-rw-r--r--library/std/src/net/tcp.rs1050
-rw-r--r--library/std/src/net/tcp/tests.rs876
-rw-r--r--library/std/src/net/test.rs60
-rw-r--r--library/std/src/net/udp.rs813
-rw-r--r--library/std/src/net/udp/tests.rs365
-rw-r--r--library/std/src/num.rs53
-rw-r--r--library/std/src/num/benches.rs9
-rw-r--r--library/std/src/num/tests.rs230
-rw-r--r--library/std/src/os/android/fs.rs117
-rw-r--r--library/std/src/os/android/mod.rs6
-rw-r--r--library/std/src/os/android/raw.rs219
-rw-r--r--library/std/src/os/dragonfly/fs.rs132
-rw-r--r--library/std/src/os/dragonfly/mod.rs6
-rw-r--r--library/std/src/os/dragonfly/raw.rs83
-rw-r--r--library/std/src/os/emscripten/fs.rs117
-rw-r--r--library/std/src/os/emscripten/mod.rs6
-rw-r--r--library/std/src/os/emscripten/raw.rs80
-rw-r--r--library/std/src/os/espidf/fs.rs117
-rw-r--r--library/std/src/os/espidf/mod.rs6
-rw-r--r--library/std/src/os/espidf/raw.rs69
-rw-r--r--library/std/src/os/fd/mod.rs16
-rw-r--r--library/std/src/os/fd/net.rs46
-rw-r--r--library/std/src/os/fd/owned.rs388
-rw-r--r--library/std/src/os/fd/raw.rs259
-rw-r--r--library/std/src/os/fd/tests.rs53
-rw-r--r--library/std/src/os/fortanix_sgx/arch.rs80
-rw-r--r--library/std/src/os/fortanix_sgx/ffi.rs41
-rw-r--r--library/std/src/os/fortanix_sgx/io.rs144
-rw-r--r--library/std/src/os/fortanix_sgx/mod.rs53
-rw-r--r--library/std/src/os/freebsd/fs.rs154
-rw-r--r--library/std/src/os/freebsd/mod.rs6
-rw-r--r--library/std/src/os/freebsd/raw.rs86
-rw-r--r--library/std/src/os/fuchsia/fs.rs95
-rw-r--r--library/std/src/os/fuchsia/mod.rs6
-rw-r--r--library/std/src/os/fuchsia/raw.rs293
-rw-r--r--library/std/src/os/haiku/fs.rs127
-rw-r--r--library/std/src/os/haiku/mod.rs6
-rw-r--r--library/std/src/os/haiku/raw.rs79
-rw-r--r--library/std/src/os/hermit/ffi.rs41
-rw-r--r--library/std/src/os/hermit/mod.rs13
-rw-r--r--library/std/src/os/horizon/fs.rs95
-rw-r--r--library/std/src/os/horizon/mod.rs6
-rw-r--r--library/std/src/os/horizon/raw.rs70
-rw-r--r--library/std/src/os/illumos/fs.rs116
-rw-r--r--library/std/src/os/illumos/mod.rs6
-rw-r--r--library/std/src/os/illumos/raw.rs74
-rw-r--r--library/std/src/os/ios/fs.rs142
-rw-r--r--library/std/src/os/ios/mod.rs6
-rw-r--r--library/std/src/os/ios/raw.rs83
-rw-r--r--library/std/src/os/l4re/fs.rs382
-rw-r--r--library/std/src/os/l4re/mod.rs7
-rw-r--r--library/std/src/os/l4re/raw.rs365
-rw-r--r--library/std/src/os/linux/fs.rs397
-rw-r--r--library/std/src/os/linux/mod.rs8
-rw-r--r--library/std/src/os/linux/process.rs165
-rw-r--r--library/std/src/os/linux/raw.rs366
-rw-r--r--library/std/src/os/macos/fs.rs148
-rw-r--r--library/std/src/os/macos/mod.rs6
-rw-r--r--library/std/src/os/macos/raw.rs83
-rw-r--r--library/std/src/os/mod.rs150
-rw-r--r--library/std/src/os/netbsd/fs.rs137
-rw-r--r--library/std/src/os/netbsd/mod.rs6
-rw-r--r--library/std/src/os/netbsd/raw.rs83
-rw-r--r--library/std/src/os/openbsd/fs.rs137
-rw-r--r--library/std/src/os/openbsd/mod.rs6
-rw-r--r--library/std/src/os/openbsd/raw.rs81
-rw-r--r--library/std/src/os/raw/mod.rs31
-rw-r--r--library/std/src/os/raw/tests.rs15
-rw-r--r--library/std/src/os/redox/fs.rs382
-rw-r--r--library/std/src/os/redox/mod.rs6
-rw-r--r--library/std/src/os/redox/raw.rs78
-rw-r--r--library/std/src/os/solaris/fs.rs117
-rw-r--r--library/std/src/os/solaris/mod.rs6
-rw-r--r--library/std/src/os/solaris/raw.rs76
-rw-r--r--library/std/src/os/solid/ffi.rs41
-rw-r--r--library/std/src/os/solid/io.rs113
-rw-r--r--library/std/src/os/solid/mod.rs17
-rw-r--r--library/std/src/os/unix/ffi/mod.rs42
-rw-r--r--library/std/src/os/unix/ffi/os_str.rs70
-rw-r--r--library/std/src/os/unix/fs.rs1022
-rw-r--r--library/std/src/os/unix/io/fd.rs8
-rw-r--r--library/std/src/os/unix/io/fd/tests.rs11
-rw-r--r--library/std/src/os/unix/io/mod.rs86
-rw-r--r--library/std/src/os/unix/io/raw.rs6
-rw-r--r--library/std/src/os/unix/mod.rs126
-rw-r--r--library/std/src/os/unix/net/addr.rs350
-rw-r--r--library/std/src/os/unix/net/ancillary.rs674
-rw-r--r--library/std/src/os/unix/net/datagram.rs987
-rw-r--r--library/std/src/os/unix/net/listener.rs385
-rw-r--r--library/std/src/os/unix/net/mod.rs26
-rw-r--r--library/std/src/os/unix/net/stream.rs711
-rw-r--r--library/std/src/os/unix/net/tests.rs753
-rw-r--r--library/std/src/os/unix/process.rs466
-rw-r--r--library/std/src/os/unix/raw.rs33
-rw-r--r--library/std/src/os/unix/thread.rs41
-rw-r--r--library/std/src/os/unix/ucred.rs136
-rw-r--r--library/std/src/os/unix/ucred/tests.rs39
-rw-r--r--library/std/src/os/vxworks/fs.rs99
-rw-r--r--library/std/src/os/vxworks/mod.rs6
-rw-r--r--library/std/src/os/vxworks/raw.rs10
-rw-r--r--library/std/src/os/wasi/ffi.rs11
-rw-r--r--library/std/src/os/wasi/fs.rs558
-rw-r--r--library/std/src/os/wasi/io/fd.rs9
-rw-r--r--library/std/src/os/wasi/io/fd/tests.rs11
-rw-r--r--library/std/src/os/wasi/io/mod.rs12
-rw-r--r--library/std/src/os/wasi/io/raw.rs20
-rw-r--r--library/std/src/os/wasi/mod.rs57
-rw-r--r--library/std/src/os/wasi/net/mod.rs23
-rw-r--r--library/std/src/os/windows/ffi.rs136
-rw-r--r--library/std/src/os/windows/fs.rs605
-rw-r--r--library/std/src/os/windows/io/handle.rs576
-rw-r--r--library/std/src/os/windows/io/mod.rs65
-rw-r--r--library/std/src/os/windows/io/raw.rs305
-rw-r--r--library/std/src/os/windows/io/socket.rs338
-rw-r--r--library/std/src/os/windows/io/tests.rs21
-rw-r--r--library/std/src/os/windows/mod.rs58
-rw-r--r--library/std/src/os/windows/process.rs259
-rw-r--r--library/std/src/os/windows/raw.rs16
-rw-r--r--library/std/src/os/windows/thread.rs25
-rw-r--r--library/std/src/panic.rs320
-rw-r--r--library/std/src/panic/tests.rs56
-rw-r--r--library/std/src/panicking.rs749
-rw-r--r--library/std/src/path.rs3259
-rw-r--r--library/std/src/path/tests.rs1873
-rw-r--r--library/std/src/prelude/mod.rs148
-rw-r--r--library/std/src/prelude/v1.rs97
-rw-r--r--library/std/src/primitive_docs.rs1508
-rw-r--r--library/std/src/process.rs2210
-rw-r--r--library/std/src/process/tests.rs458
-rw-r--r--library/std/src/rt.rs150
-rw-r--r--library/std/src/sync/barrier.rs174
-rw-r--r--library/std/src/sync/barrier/tests.rs35
-rw-r--r--library/std/src/sync/condvar.rs564
-rw-r--r--library/std/src/sync/condvar/tests.rs190
-rw-r--r--library/std/src/sync/lazy_lock.rs121
-rw-r--r--library/std/src/sync/lazy_lock/tests.rs143
-rw-r--r--library/std/src/sync/mod.rs189
-rw-r--r--library/std/src/sync/mpsc/blocking.rs82
-rw-r--r--library/std/src/sync/mpsc/cache_aligned.rs25
-rw-r--r--library/std/src/sync/mpsc/mod.rs1669
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue.rs117
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs47
-rw-r--r--library/std/src/sync/mpsc/oneshot.rs315
-rw-r--r--library/std/src/sync/mpsc/shared.rs501
-rw-r--r--library/std/src/sync/mpsc/spsc_queue.rs236
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs101
-rw-r--r--library/std/src/sync/mpsc/stream.rs457
-rw-r--r--library/std/src/sync/mpsc/sync.rs495
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs647
-rw-r--r--library/std/src/sync/mpsc/tests.rs706
-rw-r--r--library/std/src/sync/mutex.rs553
-rw-r--r--library/std/src/sync/mutex/tests.rs238
-rw-r--r--library/std/src/sync/once.rs580
-rw-r--r--library/std/src/sync/once/tests.rs116
-rw-r--r--library/std/src/sync/once_lock.rs496
-rw-r--r--library/std/src/sync/once_lock/tests.rs203
-rw-r--r--library/std/src/sync/poison.rs272
-rw-r--r--library/std/src/sync/rwlock.rs615
-rw-r--r--library/std/src/sync/rwlock/tests.rs259
-rw-r--r--library/std/src/sys/common/alloc.rs54
-rw-r--r--library/std/src/sys/common/mod.rs13
-rw-r--r--library/std/src/sys/hermit/alloc.rs31
-rw-r--r--library/std/src/sys/hermit/args.rs94
-rw-r--r--library/std/src/sys/hermit/condvar.rs90
-rw-r--r--library/std/src/sys/hermit/env.rs9
-rw-r--r--library/std/src/sys/hermit/fd.rs87
-rw-r--r--library/std/src/sys/hermit/fs.rs408
-rw-r--r--library/std/src/sys/hermit/memchr.rs1
-rw-r--r--library/std/src/sys/hermit/mod.rs156
-rw-r--r--library/std/src/sys/hermit/mutex.rs216
-rw-r--r--library/std/src/sys/hermit/net.rs492
-rw-r--r--library/std/src/sys/hermit/os.rs178
-rw-r--r--library/std/src/sys/hermit/rwlock.rs144
-rw-r--r--library/std/src/sys/hermit/stdio.rs120
-rw-r--r--library/std/src/sys/hermit/thread.rs112
-rw-r--r--library/std/src/sys/hermit/thread_local_dtor.rs36
-rw-r--r--library/std/src/sys/hermit/time.rs156
-rw-r--r--library/std/src/sys/itron/abi.rs197
-rw-r--r--library/std/src/sys/itron/condvar.rs297
-rw-r--r--library/std/src/sys/itron/error.rs159
-rw-r--r--library/std/src/sys/itron/mutex.rs93
-rw-r--r--library/std/src/sys/itron/spin.rs163
-rw-r--r--library/std/src/sys/itron/task.rs44
-rw-r--r--library/std/src/sys/itron/thread.rs349
-rw-r--r--library/std/src/sys/itron/time.rs114
-rw-r--r--library/std/src/sys/itron/time/tests.rs33
-rw-r--r--library/std/src/sys/itron/wait_flag.rs72
-rw-r--r--library/std/src/sys/mod.rs78
-rw-r--r--library/std/src/sys/sgx/abi/entry.S372
-rw-r--r--library/std/src/sys/sgx/abi/mem.rs93
-rw-r--r--library/std/src/sys/sgx/abi/mod.rs108
-rw-r--r--library/std/src/sys/sgx/abi/panic.rs42
-rw-r--r--library/std/src/sys/sgx/abi/reloc.rs32
-rw-r--r--library/std/src/sys/sgx/abi/thread.rs13
-rw-r--r--library/std/src/sys/sgx/abi/tls/mod.rs132
-rw-r--r--library/std/src/sys/sgx/abi/tls/sync_bitset.rs85
-rw-r--r--library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs25
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs732
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs323
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/raw.rs251
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs30
-rw-r--r--library/std/src/sys/sgx/alloc.rs98
-rw-r--r--library/std/src/sys/sgx/args.rs59
-rw-r--r--library/std/src/sys/sgx/condvar.rs45
-rw-r--r--library/std/src/sys/sgx/env.rs9
-rw-r--r--library/std/src/sys/sgx/fd.rs84
-rw-r--r--library/std/src/sys/sgx/memchr.rs1
-rw-r--r--library/std/src/sys/sgx/mod.rs167
-rw-r--r--library/std/src/sys/sgx/mutex.rs62
-rw-r--r--library/std/src/sys/sgx/net.rs541
-rw-r--r--library/std/src/sys/sgx/os.rs140
-rw-r--r--library/std/src/sys/sgx/path.rs25
-rw-r--r--library/std/src/sys/sgx/rwlock.rs212
-rw-r--r--library/std/src/sys/sgx/rwlock/tests.rs31
-rw-r--r--library/std/src/sys/sgx/stdio.rs88
-rw-r--r--library/std/src/sys/sgx/thread.rs152
-rw-r--r--library/std/src/sys/sgx/thread_local_key.rs28
-rw-r--r--library/std/src/sys/sgx/time.rs46
-rw-r--r--library/std/src/sys/sgx/waitqueue/mod.rs240
-rw-r--r--library/std/src/sys/sgx/waitqueue/spin_mutex.rs80
-rw-r--r--library/std/src/sys/sgx/waitqueue/spin_mutex/tests.rs23
-rw-r--r--library/std/src/sys/sgx/waitqueue/tests.rs20
-rw-r--r--library/std/src/sys/sgx/waitqueue/unsafe_list.rs156
-rw-r--r--library/std/src/sys/sgx/waitqueue/unsafe_list/tests.rs105
-rw-r--r--library/std/src/sys/solid/abi/fs.rs53
-rw-r--r--library/std/src/sys/solid/abi/mod.rs65
-rw-r--r--library/std/src/sys/solid/abi/sockets.rs277
-rw-r--r--library/std/src/sys/solid/alloc.rs32
-rw-r--r--library/std/src/sys/solid/env.rs9
-rw-r--r--library/std/src/sys/solid/error.rs55
-rw-r--r--library/std/src/sys/solid/fs.rs574
-rw-r--r--library/std/src/sys/solid/io.rs77
-rw-r--r--library/std/src/sys/solid/memchr.rs21
-rw-r--r--library/std/src/sys/solid/mod.rs92
-rw-r--r--library/std/src/sys/solid/net.rs469
-rw-r--r--library/std/src/sys/solid/os.rs193
-rw-r--r--library/std/src/sys/solid/path.rs25
-rw-r--r--library/std/src/sys/solid/rwlock.rs95
-rw-r--r--library/std/src/sys/solid/stdio.rs80
-rw-r--r--library/std/src/sys/solid/thread_local_dtor.rs50
-rw-r--r--library/std/src/sys/solid/thread_local_key.rs26
-rw-r--r--library/std/src/sys/solid/time.rs56
-rw-r--r--library/std/src/sys/unix/alloc.rs101
-rw-r--r--library/std/src/sys/unix/android.rs81
-rw-r--r--library/std/src/sys/unix/args.rs261
-rw-r--r--library/std/src/sys/unix/cmath.rs33
-rw-r--r--library/std/src/sys/unix/env.rs219
-rw-r--r--library/std/src/sys/unix/fd.rs330
-rw-r--r--library/std/src/sys/unix/fd/tests.rs10
-rw-r--r--library/std/src/sys/unix/fs.rs1878
-rw-r--r--library/std/src/sys/unix/futex.rs303
-rw-r--r--library/std/src/sys/unix/io.rs76
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs686
-rw-r--r--library/std/src/sys/unix/kernel_copy/tests.rs270
-rw-r--r--library/std/src/sys/unix/l4re.rs551
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs165
-rw-r--r--library/std/src/sys/unix/locks/futex_condvar.rs58
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs101
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs322
-rw-r--r--library/std/src/sys/unix/locks/mod.rs31
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs222
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs135
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs173
-rw-r--r--library/std/src/sys/unix/memchr.rs40
-rw-r--r--library/std/src/sys/unix/mod.rs361
-rw-r--r--library/std/src/sys/unix/net.rs512
-rw-r--r--library/std/src/sys/unix/os.rs680
-rw-r--r--library/std/src/sys/unix/os/tests.rs23
-rw-r--r--library/std/src/sys/unix/os_str.rs266
-rw-r--r--library/std/src/sys/unix/os_str/tests.rs10
-rw-r--r--library/std/src/sys/unix/path.rs63
-rw-r--r--library/std/src/sys/unix/pipe.rs151
-rw-r--r--library/std/src/sys/unix/process/mod.rs24
-rw-r--r--library/std/src/sys/unix/process/process_common.rs523
-rw-r--r--library/std/src/sys/unix/process/process_common/tests.rs124
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs327
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs836
-rw-r--r--library/std/src/sys/unix/process/process_unix/tests.rs62
-rw-r--r--library/std/src/sys/unix/process/process_unsupported.rs118
-rw-r--r--library/std/src/sys/unix/process/process_vxworks.rs262
-rw-r--r--library/std/src/sys/unix/process/zircon.rs309
-rw-r--r--library/std/src/sys/unix/rand.rs301
-rw-r--r--library/std/src/sys/unix/stack_overflow.rs208
-rw-r--r--library/std/src/sys/unix/stdio.rs141
-rw-r--r--library/std/src/sys/unix/thread.rs889
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs100
-rw-r--r--library/std/src/sys/unix/thread_local_key.rs34
-rw-r--r--library/std/src/sys/unix/thread_parker.rs281
-rw-r--r--library/std/src/sys/unix/time.rs346
-rw-r--r--library/std/src/sys/unix/weak.rs205
-rw-r--r--library/std/src/sys/unsupported/alloc.rs22
-rw-r--r--library/std/src/sys/unsupported/args.rs36
-rw-r--r--library/std/src/sys/unsupported/common.rs36
-rw-r--r--library/std/src/sys/unsupported/env.rs9
-rw-r--r--library/std/src/sys/unsupported/fs.rs324
-rw-r--r--library/std/src/sys/unsupported/io.rs47
-rw-r--r--library/std/src/sys/unsupported/locks/condvar.rs27
-rw-r--r--library/std/src/sys/unsupported/locks/mod.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/mutex.rs36
-rw-r--r--library/std/src/sys/unsupported/locks/rwlock.rs66
-rw-r--r--library/std/src/sys/unsupported/mod.rs27
-rw-r--r--library/std/src/sys/unsupported/net.rs366
-rw-r--r--library/std/src/sys/unsupported/os.rs105
-rw-r--r--library/std/src/sys/unsupported/pipe.rs37
-rw-r--r--library/std/src/sys/unsupported/process.rs211
-rw-r--r--library/std/src/sys/unsupported/stdio.rs59
-rw-r--r--library/std/src/sys/unsupported/thread.rs46
-rw-r--r--library/std/src/sys/unsupported/thread_local_dtor.rs9
-rw-r--r--library/std/src/sys/unsupported/thread_local_key.rs26
-rw-r--r--library/std/src/sys/unsupported/time.rs45
-rw-r--r--library/std/src/sys/wasi/args.rs62
-rw-r--r--library/std/src/sys/wasi/env.rs9
-rw-r--r--library/std/src/sys/wasi/fd.rs307
-rw-r--r--library/std/src/sys/wasi/fs.rs798
-rw-r--r--library/std/src/sys/wasi/io.rs73
-rw-r--r--library/std/src/sys/wasi/mod.rs100
-rw-r--r--library/std/src/sys/wasi/net.rs527
-rw-r--r--library/std/src/sys/wasi/os.rs243
-rw-r--r--library/std/src/sys/wasi/stdio.rs112
-rw-r--r--library/std/src/sys/wasi/thread.rs81
-rw-r--r--library/std/src/sys/wasi/time.rs65
-rw-r--r--library/std/src/sys/wasm/alloc.rs166
-rw-r--r--library/std/src/sys/wasm/atomics/futex.rs34
-rw-r--r--library/std/src/sys/wasm/atomics/thread.rs55
-rw-r--r--library/std/src/sys/wasm/env.rs9
-rw-r--r--library/std/src/sys/wasm/mod.rs77
-rw-r--r--library/std/src/sys/windows/alloc.rs246
-rw-r--r--library/std/src/sys/windows/alloc/tests.rs9
-rw-r--r--library/std/src/sys/windows/args.rs406
-rw-r--r--library/std/src/sys/windows/args/tests.rs91
-rw-r--r--library/std/src/sys/windows/c.rs1340
-rw-r--r--library/std/src/sys/windows/c/errors.rs1883
-rw-r--r--library/std/src/sys/windows/cmath.rs92
-rw-r--r--library/std/src/sys/windows/compat.rs273
-rw-r--r--library/std/src/sys/windows/env.rs9
-rw-r--r--library/std/src/sys/windows/fs.rs1399
-rw-r--r--library/std/src/sys/windows/handle.rs335
-rw-r--r--library/std/src/sys/windows/handle/tests.rs22
-rw-r--r--library/std/src/sys/windows/io.rs80
-rw-r--r--library/std/src/sys/windows/locks/condvar.rs52
-rw-r--r--library/std/src/sys/windows/locks/mod.rs6
-rw-r--r--library/std/src/sys/windows/locks/mutex.rs57
-rw-r--r--library/std/src/sys/windows/locks/rwlock.rs42
-rw-r--r--library/std/src/sys/windows/memchr.rs5
-rw-r--r--library/std/src/sys/windows/mod.rs323
-rw-r--r--library/std/src/sys/windows/net.rs476
-rw-r--r--library/std/src/sys/windows/os.rs328
-rw-r--r--library/std/src/sys/windows/os/tests.rs13
-rw-r--r--library/std/src/sys/windows/os_str.rs226
-rw-r--r--library/std/src/sys/windows/path.rs333
-rw-r--r--library/std/src/sys/windows/path/tests.rs137
-rw-r--r--library/std/src/sys/windows/pipe.rs538
-rw-r--r--library/std/src/sys/windows/process.rs847
-rw-r--r--library/std/src/sys/windows/process/tests.rs222
-rw-r--r--library/std/src/sys/windows/rand.rs35
-rw-r--r--library/std/src/sys/windows/stack_overflow.rs42
-rw-r--r--library/std/src/sys/windows/stack_overflow_uwp.rs11
-rw-r--r--library/std/src/sys/windows/stdio.rs422
-rw-r--r--library/std/src/sys/windows/stdio_uwp.rs87
-rw-r--r--library/std/src/sys/windows/thread.rs125
-rw-r--r--library/std/src/sys/windows/thread_local_dtor.rs28
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs238
-rw-r--r--library/std/src/sys/windows/thread_parker.rs255
-rw-r--r--library/std/src/sys/windows/time.rs224
-rw-r--r--library/std/src/sys_common/backtrace.rs183
-rw-r--r--library/std/src/sys_common/condvar.rs56
-rw-r--r--library/std/src/sys_common/condvar/check.rs57
-rw-r--r--library/std/src/sys_common/fs.rs51
-rw-r--r--library/std/src/sys_common/io.rs49
-rw-r--r--library/std/src/sys_common/lazy_box.rs90
-rw-r--r--library/std/src/sys_common/memchr.rs51
-rw-r--r--library/std/src/sys_common/memchr/tests.rs86
-rw-r--r--library/std/src/sys_common/mod.rs89
-rw-r--r--library/std/src/sys_common/mutex.rs93
-rw-r--r--library/std/src/sys_common/net.rs737
-rw-r--r--library/std/src/sys_common/net/tests.rs19
-rw-r--r--library/std/src/sys_common/process.rs119
-rw-r--r--library/std/src/sys_common/remutex.rs200
-rw-r--r--library/std/src/sys_common/remutex/tests.rs77
-rw-r--r--library/std/src/sys_common/rwlock.rs130
-rw-r--r--library/std/src/sys_common/tests.rs6
-rw-r--r--library/std/src/sys_common/thread.rs18
-rw-r--r--library/std/src/sys_common/thread_info.rs47
-rw-r--r--library/std/src/sys_common/thread_local_dtor.rs49
-rw-r--r--library/std/src/sys_common/thread_local_key.rs237
-rw-r--r--library/std/src/sys_common/thread_local_key/tests.rs34
-rw-r--r--library/std/src/sys_common/thread_parker/futex.rs97
-rw-r--r--library/std/src/sys_common/thread_parker/generic.rs125
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs22
-rw-r--r--library/std/src/sys_common/thread_parker/wait_flag.rs102
-rw-r--r--library/std/src/sys_common/wtf8.rs926
-rw-r--r--library/std/src/sys_common/wtf8/tests.rs409
-rw-r--r--library/std/src/thread/local.rs1141
-rw-r--r--library/std/src/thread/local/dynamic_tests.rs40
-rw-r--r--library/std/src/thread/local/tests.rs317
-rw-r--r--library/std/src/thread/mod.rs1621
-rw-r--r--library/std/src/thread/scoped.rs343
-rw-r--r--library/std/src/thread/tests.rs331
-rw-r--r--library/std/src/time.rs694
-rw-r--r--library/std/src/time/tests.rs236
-rw-r--r--library/std/tests/env.rs140
-rw-r--r--library/std/tests/run-time-detect.rs153
-rw-r--r--library/std/tests/thread.rs16
477 files changed, 130750 insertions, 0 deletions
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
new file mode 100644
index 000000000..229e546e0
--- /dev/null
+++ b/library/std/Cargo.toml
@@ -0,0 +1,86 @@
+[package]
+name = "std"
+version = "0.0.0"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/rust.git"
+description = "The Rust Standard Library"
+edition = "2021"
+
+[lib]
+crate-type = ["dylib", "rlib"]
+
+[dependencies]
+alloc = { path = "../alloc" }
+cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] }
+panic_unwind = { path = "../panic_unwind", optional = true }
+panic_abort = { path = "../panic_abort" }
+core = { path = "../core" }
+libc = { version = "0.2.126", default-features = false, features = ['rustc-dep-of-std'] }
+compiler_builtins = { version = "0.1.73" }
+profiler_builtins = { path = "../profiler_builtins", optional = true }
+unwind = { path = "../unwind" }
+hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] }
+std_detect = { path = "../stdarch/crates/std_detect", default-features = false, features = ['rustc-dep-of-std'] }
+
+# Dependencies of the `backtrace` crate
+addr2line = { version = "0.16.0", optional = true, default-features = false }
+rustc-demangle = { version = "0.1.21", features = ['rustc-dep-of-std'] }
+miniz_oxide = { version = "0.4.0", optional = true, default-features = false }
+[dependencies.object]
+version = "0.26.1"
+optional = true
+default-features = false
+features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
+
+[dev-dependencies]
+rand = "0.7"
+
+[target.'cfg(any(all(target_family = "wasm", not(target_os = "emscripten")), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
+dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
+
+[target.x86_64-fortanix-unknown-sgx.dependencies]
+fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] }
+
+[target.'cfg(target_os = "hermit")'.dependencies]
+hermit-abi = { version = "0.2.0", features = ['rustc-dep-of-std'] }
+
+[target.wasm32-wasi.dependencies]
+wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features = false }
+
+[features]
+backtrace = [
+ "gimli-symbolize",
+ 'addr2line/rustc-dep-of-std',
+ 'object/rustc-dep-of-std',
+ 'miniz_oxide/rustc-dep-of-std',
+]
+gimli-symbolize = []
+
+panic-unwind = ["panic_unwind"]
+profiler = ["profiler_builtins"]
+compiler-builtins-c = ["alloc/compiler-builtins-c"]
+compiler-builtins-mem = ["alloc/compiler-builtins-mem"]
+compiler-builtins-no-asm = ["alloc/compiler-builtins-no-asm"]
+compiler-builtins-mangled-names = ["alloc/compiler-builtins-mangled-names"]
+llvm-libunwind = ["unwind/llvm-libunwind"]
+system-llvm-libunwind = ["unwind/system-llvm-libunwind"]
+
+# Make panics and failed asserts immediately abort without formatting any message
+panic_immediate_abort = ["core/panic_immediate_abort"]
+
+# Enable std_detect default features for stdarch/crates/std_detect:
+# https://github.com/rust-lang/stdarch/blob/master/crates/std_detect/Cargo.toml
+std_detect_file_io = ["std_detect/std_detect_file_io"]
+std_detect_dlsym_getauxval = ["std_detect/std_detect_dlsym_getauxval"]
+std_detect_env_override = ["std_detect/std_detect_env_override"]
+
+[package.metadata.fortanix-sgx]
+# Maximum possible number of threads when testing
+threads = 125
+# Maximum heap size
+heap_size = 0x8000000
+
+[[bench]]
+name = "stdbenches"
+path = "benches/lib.rs"
+test = true
diff --git a/library/std/benches/hash/map.rs b/library/std/benches/hash/map.rs
new file mode 100644
index 000000000..bf646cbae
--- /dev/null
+++ b/library/std/benches/hash/map.rs
@@ -0,0 +1,103 @@
+#![cfg(test)]
+
+use std::collections::HashMap;
+use test::Bencher;
+
+#[bench]
+fn new_drop(b: &mut Bencher) {
+ b.iter(|| {
+ let m: HashMap<i32, i32> = HashMap::new();
+ assert_eq!(m.len(), 0);
+ })
+}
+
+#[bench]
+fn new_insert_drop(b: &mut Bencher) {
+ b.iter(|| {
+ let mut m = HashMap::new();
+ m.insert(0, 0);
+ assert_eq!(m.len(), 1);
+ })
+}
+
+#[bench]
+fn grow_by_insertion(b: &mut Bencher) {
+ let mut m = HashMap::new();
+
+ for i in 1..1001 {
+ m.insert(i, i);
+ }
+
+ let mut k = 1001;
+
+ b.iter(|| {
+ m.insert(k, k);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_existing(b: &mut Bencher) {
+ let mut m = HashMap::new();
+
+ for i in 1..1001 {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in 1..1001 {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn find_nonexisting(b: &mut Bencher) {
+ let mut m = HashMap::new();
+
+ for i in 1..1001 {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in 1001..2001 {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn hashmap_as_queue(b: &mut Bencher) {
+ let mut m = HashMap::new();
+
+ for i in 1..1001 {
+ m.insert(i, i);
+ }
+
+ let mut k = 1;
+
+ b.iter(|| {
+ m.remove(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ });
+}
+
+#[bench]
+fn get_remove_insert(b: &mut Bencher) {
+ let mut m = HashMap::new();
+
+ for i in 1..1001 {
+ m.insert(i, i);
+ }
+
+ let mut k = 1;
+
+ b.iter(|| {
+ m.get(&(k + 400));
+ m.get(&(k + 2000));
+ m.remove(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ })
+}
diff --git a/library/std/benches/hash/mod.rs b/library/std/benches/hash/mod.rs
new file mode 100644
index 000000000..42401a21b
--- /dev/null
+++ b/library/std/benches/hash/mod.rs
@@ -0,0 +1,2 @@
+mod map;
+mod set_ops;
diff --git a/library/std/benches/hash/set_ops.rs b/library/std/benches/hash/set_ops.rs
new file mode 100644
index 000000000..1a4c4a66e
--- /dev/null
+++ b/library/std/benches/hash/set_ops.rs
@@ -0,0 +1,42 @@
+use std::collections::HashSet;
+use test::Bencher;
+
+#[bench]
+fn set_difference(b: &mut Bencher) {
+ let small: HashSet<_> = (0..10).collect();
+ let large: HashSet<_> = (0..100).collect();
+
+ b.iter(|| small.difference(&large).count());
+}
+
+#[bench]
+fn set_is_subset(b: &mut Bencher) {
+ let small: HashSet<_> = (0..10).collect();
+ let large: HashSet<_> = (0..100).collect();
+
+ b.iter(|| small.is_subset(&large));
+}
+
+#[bench]
+fn set_intersection(b: &mut Bencher) {
+ let small: HashSet<_> = (0..10).collect();
+ let large: HashSet<_> = (0..100).collect();
+
+ b.iter(|| small.intersection(&large).count());
+}
+
+#[bench]
+fn set_symmetric_difference(b: &mut Bencher) {
+ let small: HashSet<_> = (0..10).collect();
+ let large: HashSet<_> = (0..100).collect();
+
+ b.iter(|| small.symmetric_difference(&large).count());
+}
+
+#[bench]
+fn set_union(b: &mut Bencher) {
+ let small: HashSet<_> = (0..10).collect();
+ let large: HashSet<_> = (0..100).collect();
+
+ b.iter(|| small.union(&large).count());
+}
diff --git a/library/std/benches/lib.rs b/library/std/benches/lib.rs
new file mode 100644
index 000000000..4d1cf7fab
--- /dev/null
+++ b/library/std/benches/lib.rs
@@ -0,0 +1,5 @@
+#![feature(test)]
+
+extern crate test;
+
+mod hash;
diff --git a/library/std/build.rs b/library/std/build.rs
new file mode 100644
index 000000000..8b1a06ee7
--- /dev/null
+++ b/library/std/build.rs
@@ -0,0 +1,52 @@
+use std::env;
+
+fn main() {
+ println!("cargo:rerun-if-changed=build.rs");
+ let target = env::var("TARGET").expect("TARGET was not set");
+ if target.contains("freebsd") {
+ if env::var("RUST_STD_FREEBSD_12_ABI").is_ok() {
+ println!("cargo:rustc-cfg=freebsd12");
+ }
+ } else if target.contains("linux")
+ || target.contains("netbsd")
+ || target.contains("dragonfly")
+ || target.contains("openbsd")
+ || target.contains("solaris")
+ || target.contains("illumos")
+ || target.contains("apple-darwin")
+ || target.contains("apple-ios")
+ || target.contains("apple-watchos")
+ || target.contains("uwp")
+ || target.contains("windows")
+ || target.contains("fuchsia")
+ || (target.contains("sgx") && target.contains("fortanix"))
+ || target.contains("hermit")
+ || target.contains("l4re")
+ || target.contains("redox")
+ || target.contains("haiku")
+ || target.contains("vxworks")
+ || target.contains("wasm32")
+ || target.contains("wasm64")
+ || target.contains("asmjs")
+ || target.contains("espidf")
+ || target.contains("solid")
+ || target.contains("nintendo-3ds")
+ {
+ // These platforms don't have any special requirements.
+ } else {
+ // This is for Cargo's build-std support, to mark std as unstable for
+ // typically no_std platforms.
+ // This covers:
+ // - os=none ("bare metal" targets)
+ // - mipsel-sony-psp
+ // - nvptx64-nvidia-cuda
+ // - arch=avr
+ // - tvos (aarch64-apple-tvos, x86_64-apple-tvos)
+ // - uefi (x86_64-unknown-uefi, i686-unknown-uefi)
+ // - JSON targets
+ // - Any new targets that have not been explicitly added above.
+ println!("cargo:rustc-cfg=feature=\"restricted-std\"");
+ }
+ println!("cargo:rustc-env=STD_ENV_ARCH={}", env::var("CARGO_CFG_TARGET_ARCH").unwrap());
+ println!("cargo:rustc-cfg=backtrace_in_libstd");
+}
diff --git a/library/std/primitive_docs/box_into_raw.md b/library/std/primitive_docs/box_into_raw.md
new file mode 100644
index 000000000..307b9c85b
--- /dev/null
+++ b/library/std/primitive_docs/box_into_raw.md
@@ -0,0 +1 @@
+Box::into_raw
diff --git a/library/std/primitive_docs/fs_file.md b/library/std/primitive_docs/fs_file.md
new file mode 100644
index 000000000..13e454083
--- /dev/null
+++ b/library/std/primitive_docs/fs_file.md
@@ -0,0 +1 @@
+fs::File
diff --git a/library/std/primitive_docs/io_bufread.md b/library/std/primitive_docs/io_bufread.md
new file mode 100644
index 000000000..bb688e3a5
--- /dev/null
+++ b/library/std/primitive_docs/io_bufread.md
@@ -0,0 +1 @@
+io::BufRead
diff --git a/library/std/primitive_docs/io_read.md b/library/std/primitive_docs/io_read.md
new file mode 100644
index 000000000..5118d7c48
--- /dev/null
+++ b/library/std/primitive_docs/io_read.md
@@ -0,0 +1 @@
+io::Read
diff --git a/library/std/primitive_docs/io_seek.md b/library/std/primitive_docs/io_seek.md
new file mode 100644
index 000000000..122e6df77
--- /dev/null
+++ b/library/std/primitive_docs/io_seek.md
@@ -0,0 +1 @@
+io::Seek
diff --git a/library/std/primitive_docs/io_write.md b/library/std/primitive_docs/io_write.md
new file mode 100644
index 000000000..15dfc907a
--- /dev/null
+++ b/library/std/primitive_docs/io_write.md
@@ -0,0 +1 @@
+io::Write
diff --git a/library/std/primitive_docs/net_tosocketaddrs.md b/library/std/primitive_docs/net_tosocketaddrs.md
new file mode 100644
index 000000000..a01f318e8
--- /dev/null
+++ b/library/std/primitive_docs/net_tosocketaddrs.md
@@ -0,0 +1 @@
+net::ToSocketAddrs
diff --git a/library/std/primitive_docs/process_exit.md b/library/std/primitive_docs/process_exit.md
new file mode 100644
index 000000000..565a71375
--- /dev/null
+++ b/library/std/primitive_docs/process_exit.md
@@ -0,0 +1 @@
+process::exit
diff --git a/library/std/primitive_docs/string_string.md b/library/std/primitive_docs/string_string.md
new file mode 100644
index 000000000..ce7815ff9
--- /dev/null
+++ b/library/std/primitive_docs/string_string.md
@@ -0,0 +1 @@
+string::String
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
new file mode 100644
index 000000000..a05e0db3a
--- /dev/null
+++ b/library/std/src/alloc.rs
@@ -0,0 +1,413 @@
+//! Memory allocation APIs.
+//!
+//! In a given program, the standard library has one “global” memory allocator
+//! that is used for example by `Box<T>` and `Vec<T>`.
+//!
+//! Currently the default global allocator is unspecified. Libraries, however,
+//! like `cdylib`s and `staticlib`s are guaranteed to use the [`System`] by
+//! default.
+//!
+//! # The `#[global_allocator]` attribute
+//!
+//! This attribute allows configuring the choice of global allocator.
+//! You can use this to implement a completely custom global allocator
+//! to route all default allocation requests to a custom object.
+//!
+//! ```rust
+//! use std::alloc::{GlobalAlloc, System, Layout};
+//!
+//! struct MyAllocator;
+//!
+//! unsafe impl GlobalAlloc for MyAllocator {
+//! unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+//! System.alloc(layout)
+//! }
+//!
+//! unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+//! System.dealloc(ptr, layout)
+//! }
+//! }
+//!
+//! #[global_allocator]
+//! static GLOBAL: MyAllocator = MyAllocator;
+//!
+//! fn main() {
+//! // This `Vec` will allocate memory through `GLOBAL` above
+//! let mut v = Vec::new();
+//! v.push(1);
+//! }
+//! ```
+//!
+//! The attribute is used on a `static` item whose type implements the
+//! [`GlobalAlloc`] trait. This type can be provided by an external library:
+//!
+//! ```rust,ignore (demonstrates crates.io usage)
+//! use jemallocator::Jemalloc;
+//!
+//! #[global_allocator]
+//! static GLOBAL: Jemalloc = Jemalloc;
+//!
+//! fn main() {}
+//! ```
+//!
+//! The `#[global_allocator]` can only be used once in a crate
+//! or its recursive dependencies.
+
+#![deny(unsafe_op_in_unsafe_fn)]
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+use core::intrinsics;
+use core::ptr::NonNull;
+use core::sync::atomic::{AtomicPtr, Ordering};
+use core::{mem, ptr};
+
+#[stable(feature = "alloc_module", since = "1.28.0")]
+#[doc(inline)]
+pub use alloc_crate::alloc::*;
+
+/// The default memory allocator provided by the operating system.
+///
+/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
+/// plus related functions.
+///
+/// This type implements the `GlobalAlloc` trait and Rust programs by default
+/// work as if they had this definition:
+///
+/// ```rust
+/// use std::alloc::System;
+///
+/// #[global_allocator]
+/// static A: System = System;
+///
+/// fn main() {
+/// let a = Box::new(4); // Allocates from the system allocator.
+/// println!("{a}");
+/// }
+/// ```
+///
+/// You can also define your own wrapper around `System` if you'd like, such as
+/// keeping track of the number of all bytes allocated:
+///
+/// ```rust
+/// use std::alloc::{System, GlobalAlloc, Layout};
+/// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+///
+/// struct Counter;
+///
+/// static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
+///
+/// unsafe impl GlobalAlloc for Counter {
+/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+/// let ret = System.alloc(layout);
+/// if !ret.is_null() {
+/// ALLOCATED.fetch_add(layout.size(), SeqCst);
+/// }
+/// ret
+/// }
+///
+/// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+/// System.dealloc(ptr, layout);
+/// ALLOCATED.fetch_sub(layout.size(), SeqCst);
+/// }
+/// }
+///
+/// #[global_allocator]
+/// static A: Counter = Counter;
+///
+/// fn main() {
+/// println!("allocated bytes before main: {}", ALLOCATED.load(SeqCst));
+/// }
+/// ```
+///
+/// It can also be used directly to allocate memory independently of whatever
+/// global allocator has been selected for a Rust program. For example if a Rust
+/// program opts in to using jemalloc as the global allocator, `System` will
+/// still allocate memory using `malloc` and `HeapAlloc`.
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct System;
+
+impl System {
+ #[inline]
+ fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed {
+ GlobalAlloc::alloc_zeroed(self, layout)
+ } else {
+ GlobalAlloc::alloc(self, layout)
+ };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, size))
+ },
+ }
+ }
+
+ // SAFETY: Same as `Allocator::grow`
+ #[inline]
+ unsafe fn grow_impl(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => self.alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `new_size` is greater than or equal to `old_size`
+ // as required by safety conditions and the `old_size == 0` case was handled in the
+ // previous match arm. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ intrinsics::assume(new_size >= old_layout.size());
+
+ let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = self.alloc_impl(new_layout, zeroed)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
+ Allocator::deallocate(self, ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+// The Allocator impl checks the layout size to be non-zero and forwards to the GlobalAlloc impl,
+// which is in `std::sys::*::alloc`.
+#[unstable(feature = "allocator_api", issue = "32838")]
+unsafe impl Allocator for System {
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ Allocator::deallocate(self, ptr, old_layout);
+ Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ intrinsics::assume(new_size <= old_layout.size());
+
+ let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = Allocator::allocate(self, new_layout)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
+ Allocator::deallocate(self, ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
+
+/// Registers a custom allocation error hook, replacing any that was previously registered.
+///
+/// The allocation error hook is invoked when an infallible memory allocation fails, before
+/// the runtime aborts. The default hook prints a message to standard error,
+/// but this behavior can be customized with the [`set_alloc_error_hook`] and
+/// [`take_alloc_error_hook`] functions.
+///
+/// The hook is provided with a `Layout` struct which contains information
+/// about the allocation that failed.
+///
+/// The allocation error hook is a global resource.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(alloc_error_hook)]
+///
+/// use std::alloc::{Layout, set_alloc_error_hook};
+///
+/// fn custom_alloc_error_hook(layout: Layout) {
+/// panic!("memory allocation of {} bytes failed", layout.size());
+/// }
+///
+/// set_alloc_error_hook(custom_alloc_error_hook);
+/// ```
+#[unstable(feature = "alloc_error_hook", issue = "51245")]
+pub fn set_alloc_error_hook(hook: fn(Layout)) {
+ HOOK.store(hook as *mut (), Ordering::SeqCst);
+}
+
+/// Unregisters the current allocation error hook, returning it.
+///
+/// *See also the function [`set_alloc_error_hook`].*
+///
+/// If no custom hook is registered, the default hook will be returned.
+#[unstable(feature = "alloc_error_hook", issue = "51245")]
+pub fn take_alloc_error_hook() -> fn(Layout) {
+ let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
+ if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
+}
+
+fn default_alloc_error_hook(layout: Layout) {
+ extern "Rust" {
+ // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+ // Its value depends on the -Zoom={panic,abort} compiler option.
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+
+ #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {} bytes failed\n", layout.size());
+ } else {
+ rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
+ }
+}
+
+#[cfg(not(test))]
+#[doc(hidden)]
+#[alloc_error_handler]
+#[unstable(feature = "alloc_internals", issue = "none")]
+pub fn rust_oom(layout: Layout) -> ! {
+ let hook = HOOK.load(Ordering::SeqCst);
+ let hook: fn(Layout) =
+ if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
+ hook(layout);
+ crate::process::abort()
+}
+
+#[cfg(not(test))]
+#[doc(hidden)]
+#[allow(unused_attributes)]
+#[unstable(feature = "alloc_internals", issue = "none")]
+pub mod __default_lib_allocator {
+ use super::{GlobalAlloc, Layout, System};
+ // These magic symbol names are used as a fallback for implementing the
+ // `__rust_alloc` etc symbols (see `src/liballoc/alloc.rs`) when there is
+ // no `#[global_allocator]` attribute.
+
+ // for symbol names src/librustc_ast/expand/allocator.rs
+ // for signatures src/librustc_allocator/lib.rs
+
+ // linkage directives are provided as part of the current compiler allocator
+ // ABI
+
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rdl_alloc(size: usize, align: usize) -> *mut u8 {
+ // SAFETY: see the guarantees expected by `Layout::from_size_align` and
+ // `GlobalAlloc::alloc`.
+ unsafe {
+ let layout = Layout::from_size_align_unchecked(size, align);
+ System.alloc(layout)
+ }
+ }
+
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rdl_dealloc(ptr: *mut u8, size: usize, align: usize) {
+ // SAFETY: see the guarantees expected by `Layout::from_size_align` and
+ // `GlobalAlloc::dealloc`.
+ unsafe { System.dealloc(ptr, Layout::from_size_align_unchecked(size, align)) }
+ }
+
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rdl_realloc(
+ ptr: *mut u8,
+ old_size: usize,
+ align: usize,
+ new_size: usize,
+ ) -> *mut u8 {
+ // SAFETY: see the guarantees expected by `Layout::from_size_align` and
+ // `GlobalAlloc::realloc`.
+ unsafe {
+ let old_layout = Layout::from_size_align_unchecked(old_size, align);
+ System.realloc(ptr, old_layout, new_size)
+ }
+ }
+
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C" fn __rdl_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
+ // SAFETY: see the guarantees expected by `Layout::from_size_align` and
+ // `GlobalAlloc::alloc_zeroed`.
+ unsafe {
+ let layout = Layout::from_size_align_unchecked(size, align);
+ System.alloc_zeroed(layout)
+ }
+ }
+}
diff --git a/library/std/src/ascii.rs b/library/std/src/ascii.rs
new file mode 100644
index 000000000..c29f01577
--- /dev/null
+++ b/library/std/src/ascii.rs
@@ -0,0 +1,208 @@
+//! Operations on ASCII strings and characters.
+//!
+//! Most string operations in Rust act on UTF-8 strings. However, at times it
+//! makes more sense to only consider the ASCII character set for a specific
+//! operation.
+//!
+//! The [`AsciiExt`] trait provides methods that allow for character
+//! operations that only act on the ASCII subset and leave non-ASCII characters
+//! alone.
+//!
+//! The [`escape_default`] function provides an iterator over the bytes of an
+//! escaped version of the character given.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::ascii::{escape_default, EscapeDefault};
+
+/// Extension methods for ASCII-subset only operations.
+///
+/// Be aware that operations on seemingly non-ASCII characters can sometimes
+/// have unexpected results. Consider this example:
+///
+/// ```
+/// use std::ascii::AsciiExt;
+///
+/// assert_eq!(AsciiExt::to_ascii_uppercase("café"), "CAFÉ");
+/// assert_eq!(AsciiExt::to_ascii_uppercase("café"), "CAFé");
+/// ```
+///
+/// In the first example, the lowercased string is represented `"cafe\u{301}"`
+/// (the last character is an acute accent [combining character]). Unlike the
+/// other characters in the string, the combining character will not get mapped
+/// to an uppercase variant, resulting in `"CAFE\u{301}"`. In the second
+/// example, the lowercased string is represented `"caf\u{e9}"` (the last
+/// character is a single Unicode character representing an 'e' with an acute
+/// accent). Since the last character is defined outside the scope of ASCII,
+/// it will not get mapped to an uppercase variant, resulting in `"CAF\u{e9}"`.
+///
+/// [combining character]: https://en.wikipedia.org/wiki/Combining_character
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.26.0", note = "use inherent methods instead")]
+pub trait AsciiExt {
+ /// Container type for copied ASCII characters.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Owned;
+
+ /// Checks if the value is within the ASCII range.
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn is_ascii(&self) -> bool;
+
+ /// Makes a copy of the value in its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`str::to_uppercase`].
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ ///
+ /// [`make_ascii_uppercase`]: AsciiExt::make_ascii_uppercase
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(deprecated)]
+ fn to_ascii_uppercase(&self) -> Self::Owned;
+
+ /// Makes a copy of the value in its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`str::to_lowercase`].
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ ///
+ /// [`make_ascii_lowercase`]: AsciiExt::make_ascii_lowercase
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(deprecated)]
+ fn to_ascii_lowercase(&self) -> Self::Owned;
+
+ /// Checks that two values are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn eq_ignore_ascii_case(&self, other: &Self) -> bool;
+
+ /// Converts this type to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ ///
+ /// [`to_ascii_uppercase`]: AsciiExt::to_ascii_uppercase
+ #[stable(feature = "ascii", since = "1.9.0")]
+ fn make_ascii_uppercase(&mut self);
+
+ /// Converts this type to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// # Note
+ ///
+ /// This method is deprecated in favor of the identically-named
+ /// inherent methods on `u8`, `char`, `[u8]` and `str`.
+ ///
+ /// [`to_ascii_lowercase`]: AsciiExt::to_ascii_lowercase
+ #[stable(feature = "ascii", since = "1.9.0")]
+ fn make_ascii_lowercase(&mut self);
+}
+
+macro_rules! delegating_ascii_methods {
+ () => {
+ #[inline]
+ fn is_ascii(&self) -> bool {
+ self.is_ascii()
+ }
+
+ #[inline]
+ fn to_ascii_uppercase(&self) -> Self::Owned {
+ self.to_ascii_uppercase()
+ }
+
+ #[inline]
+ fn to_ascii_lowercase(&self) -> Self::Owned {
+ self.to_ascii_lowercase()
+ }
+
+ #[inline]
+ fn eq_ignore_ascii_case(&self, o: &Self) -> bool {
+ self.eq_ignore_ascii_case(o)
+ }
+
+ #[inline]
+ fn make_ascii_uppercase(&mut self) {
+ self.make_ascii_uppercase();
+ }
+
+ #[inline]
+ fn make_ascii_lowercase(&mut self) {
+ self.make_ascii_lowercase();
+ }
+ };
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl AsciiExt for u8 {
+ type Owned = u8;
+
+ delegating_ascii_methods!();
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl AsciiExt for char {
+ type Owned = char;
+
+ delegating_ascii_methods!();
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl AsciiExt for [u8] {
+ type Owned = Vec<u8>;
+
+ delegating_ascii_methods!();
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl AsciiExt for str {
+ type Owned = String;
+
+ delegating_ascii_methods!();
+}
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs
new file mode 100644
index 000000000..05e9b2eb6
--- /dev/null
+++ b/library/std/src/backtrace.rs
@@ -0,0 +1,499 @@
+//! Support for capturing a stack backtrace of an OS thread
+//!
+//! This module contains the support necessary to capture a stack backtrace of a
+//! running OS thread from the OS thread itself. The `Backtrace` type supports
+//! capturing a stack trace via the `Backtrace::capture` and
+//! `Backtrace::force_capture` functions.
+//!
+//! A backtrace is typically quite handy to attach to errors (e.g. types
+//! implementing `std::error::Error`) to get a causal chain of where an error
+//! was generated.
+//!
+//! > **Note**: this module is unstable and is designed in [RFC 2504], and you
+//! > can learn more about its status in the [tracking issue].
+//!
+//! [RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md
+//! [tracking issue]: https://github.com/rust-lang/rust/issues/53487
+//!
+//! ## Accuracy
+//!
+//! Backtraces are attempted to be as accurate as possible, but no guarantees
+//! are provided about the exact accuracy of a backtrace. Instruction pointers,
+//! symbol names, filenames, line numbers, etc, may all be incorrect when
+//! reported. Accuracy is attempted on a best-effort basis, however, and bugs
+//! are always welcome to indicate areas of improvement!
+//!
+//! For most platforms a backtrace with a filename/line number requires that
+//! programs be compiled with debug information. Without debug information
+//! filenames/line numbers will not be reported.
+//!
+//! ## Platform support
+//!
+//! Not all platforms that libstd compiles for support capturing backtraces.
+//! Some platforms simply do nothing when capturing a backtrace. To check
+//! whether the platform supports capturing backtraces you can consult the
+//! `BacktraceStatus` enum as a result of `Backtrace::status`.
+//!
+//! Like above with accuracy platform support is done on a best effort basis.
+//! Sometimes libraries might not be available at runtime or something may go
+//! wrong which would cause a backtrace to not be captured. Please feel free to
+//! report issues with platforms where a backtrace cannot be captured though!
+//!
+//! ## Environment Variables
+//!
+//! The `Backtrace::capture` function might not actually capture a backtrace by
+//! default. Its behavior is governed by two environment variables:
+//!
+//! * `RUST_LIB_BACKTRACE` - if this is set to `0` then `Backtrace::capture`
+//! will never capture a backtrace. Any other value this is set to will enable
+//! `Backtrace::capture`.
+//!
+//! * `RUST_BACKTRACE` - if `RUST_LIB_BACKTRACE` is not set, then this variable
+//! is consulted with the same rules of `RUST_LIB_BACKTRACE`.
+//!
+//! * If neither of the above env vars are set, then `Backtrace::capture` will
+//! be disabled.
+//!
+//! Capturing a backtrace can be a quite expensive runtime operation, so the
+//! environment variables allow either forcibly disabling this runtime
+//! performance hit or allow selectively enabling it in some programs.
+//!
+//! Note that the `Backtrace::force_capture` function can be used to ignore
+//! these environment variables. Also note that the state of environment
+//! variables is cached once the first backtrace is created, so altering
+//! `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` at runtime might not actually change
+//! how backtraces are captured.
+
+#![unstable(feature = "backtrace", issue = "53487")]
+
+#[cfg(test)]
+mod tests;
+
+// NB: A note on resolution of a backtrace:
+//
+// Backtraces primarily happen in two steps, one is where we actually capture
+// the stack backtrace, giving us a list of instruction pointers corresponding
+// to stack frames. Next we take these instruction pointers and, one-by-one,
+// turn them into a human readable name (like `main`).
+//
+// The first phase can be somewhat expensive (walking the stack), especially
+// on MSVC where debug information is consulted to return inline frames each as
+// their own frame. The second phase, however, is almost always extremely
+// expensive (on the order of milliseconds sometimes) when it's consulting debug
+// information.
+//
+// We attempt to amortize this cost as much as possible by delaying resolution
+// of an address to a human readable name for as long as possible. When
+// `Backtrace::create` is called to capture a backtrace it doesn't actually
+// perform any symbol resolution, but rather we lazily resolve symbols only just
+// before they're needed for printing. This way we can make capturing a
+// backtrace and throwing it away much cheaper, but actually printing a
+// backtrace is still basically the same cost.
+//
+// This strategy comes at the cost of some synchronization required inside of a
+// `Backtrace`, but that's a relatively small price to pay relative to capturing
+// a backtrace or actually symbolizing it.
+
+use crate::backtrace_rs::{self, BytesOrWideString};
+use crate::cell::UnsafeCell;
+use crate::env;
+use crate::ffi::c_void;
+use crate::fmt;
+use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use crate::sync::Once;
+use crate::sys_common::backtrace::{lock, output_filename};
+use crate::vec::Vec;
+
+/// A captured OS thread stack backtrace.
+///
+/// This type represents a stack backtrace for an OS thread captured at a
+/// previous point in time. In some instances the `Backtrace` type may
+/// internally be empty due to configuration. For more information see
+/// `Backtrace::capture`.
+#[must_use]
+pub struct Backtrace {
+ inner: Inner,
+}
+
+/// The current status of a backtrace, indicating whether it was captured or
+/// whether it is empty for some other reason.
+#[non_exhaustive]
+#[derive(Debug, PartialEq, Eq)]
+pub enum BacktraceStatus {
+ /// Capturing a backtrace is not supported, likely because it's not
+ /// implemented for the current platform.
+ Unsupported,
+ /// Capturing a backtrace has been disabled through either the
+ /// `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` environment variables.
+ Disabled,
+ /// A backtrace has been captured and the `Backtrace` should print
+ /// reasonable information when rendered.
+ Captured,
+}
+
+enum Inner {
+ Unsupported,
+ Disabled,
+ Captured(LazilyResolvedCapture),
+}
+
+struct Capture {
+ actual_start: usize,
+ resolved: bool,
+ frames: Vec<BacktraceFrame>,
+}
+
+fn _assert_send_sync() {
+ fn _assert<T: Send + Sync>() {}
+ _assert::<Backtrace>();
+}
+
+/// A single frame of a backtrace.
+#[unstable(feature = "backtrace_frames", issue = "79676")]
+pub struct BacktraceFrame {
+ frame: RawFrame,
+ symbols: Vec<BacktraceSymbol>,
+}
+
+#[derive(Debug)]
+enum RawFrame {
+ Actual(backtrace_rs::Frame),
+ #[cfg(test)]
+ Fake,
+}
+
+struct BacktraceSymbol {
+ name: Option<Vec<u8>>,
+ filename: Option<BytesOrWide>,
+ lineno: Option<u32>,
+ colno: Option<u32>,
+}
+
+enum BytesOrWide {
+ Bytes(Vec<u8>),
+ Wide(Vec<u16>),
+}
+
+impl fmt::Debug for Backtrace {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let capture = match &self.inner {
+ Inner::Unsupported => return fmt.write_str("<unsupported>"),
+ Inner::Disabled => return fmt.write_str("<disabled>"),
+ Inner::Captured(c) => c.force(),
+ };
+
+ let frames = &capture.frames[capture.actual_start..];
+
+ write!(fmt, "Backtrace ")?;
+
+ let mut dbg = fmt.debug_list();
+
+ for frame in frames {
+ if frame.frame.ip().is_null() {
+ continue;
+ }
+
+ dbg.entries(&frame.symbols);
+ }
+
+ dbg.finish()
+ }
+}
+
+impl fmt::Debug for BacktraceFrame {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut dbg = fmt.debug_list();
+ dbg.entries(&self.symbols);
+ dbg.finish()
+ }
+}
+
+impl fmt::Debug for BacktraceSymbol {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // FIXME: improve formatting: https://github.com/rust-lang/rust/issues/65280
+ // FIXME: Also, include column numbers into the debug format as Display already has them.
+ // Until there are stable per-frame accessors, the format shouldn't be changed:
+ // https://github.com/rust-lang/rust/issues/65280#issuecomment-638966585
+ write!(fmt, "{{ ")?;
+
+ if let Some(fn_name) = self.name.as_ref().map(|b| backtrace_rs::SymbolName::new(b)) {
+ write!(fmt, "fn: \"{:#}\"", fn_name)?;
+ } else {
+ write!(fmt, "fn: <unknown>")?;
+ }
+
+ if let Some(fname) = self.filename.as_ref() {
+ write!(fmt, ", file: \"{:?}\"", fname)?;
+ }
+
+ if let Some(line) = self.lineno {
+ write!(fmt, ", line: {:?}", line)?;
+ }
+
+ write!(fmt, " }}")
+ }
+}
+
+impl fmt::Debug for BytesOrWide {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ output_filename(
+ fmt,
+ match self {
+ BytesOrWide::Bytes(w) => BytesOrWideString::Bytes(w),
+ BytesOrWide::Wide(w) => BytesOrWideString::Wide(w),
+ },
+ backtrace_rs::PrintFmt::Short,
+ crate::env::current_dir().as_ref().ok(),
+ )
+ }
+}
+
+impl Backtrace {
+ /// Returns whether backtrace captures are enabled through environment
+ /// variables.
+ fn enabled() -> bool {
+ // Cache the result of reading the environment variables to make
+ // backtrace captures speedy, because otherwise reading environment
+ // variables every time can be somewhat slow.
+ static ENABLED: AtomicUsize = AtomicUsize::new(0);
+ match ENABLED.load(Relaxed) {
+ 0 => {}
+ 1 => return false,
+ _ => return true,
+ }
+ let enabled = match env::var("RUST_LIB_BACKTRACE") {
+ Ok(s) => s != "0",
+ Err(_) => match env::var("RUST_BACKTRACE") {
+ Ok(s) => s != "0",
+ Err(_) => false,
+ },
+ };
+ ENABLED.store(enabled as usize + 1, Relaxed);
+ enabled
+ }
+
+ /// Capture a stack backtrace of the current thread.
+ ///
+ /// This function will capture a stack backtrace of the current OS thread of
+ /// execution, returning a `Backtrace` type which can be later used to print
+ /// the entire stack trace or render it to a string.
+ ///
+ /// This function will be a noop if the `RUST_BACKTRACE` or
+ /// `RUST_LIB_BACKTRACE` backtrace variables are both not set. If either
+ /// environment variable is set and enabled then this function will actually
+ /// capture a backtrace. Capturing a backtrace can be both memory intensive
+ /// and slow, so these environment variables allow liberally using
+ /// `Backtrace::capture` and only incurring a slowdown when the environment
+ /// variables are set.
+ ///
+ /// To forcibly capture a backtrace regardless of environment variables, use
+ /// the `Backtrace::force_capture` function.
+ #[inline(never)] // want to make sure there's a frame here to remove
+ pub fn capture() -> Backtrace {
+ if !Backtrace::enabled() {
+ return Backtrace { inner: Inner::Disabled };
+ }
+ Backtrace::create(Backtrace::capture as usize)
+ }
+
+ /// Forcibly captures a full backtrace, regardless of environment variable
+ /// configuration.
+ ///
+ /// This function behaves the same as `capture` except that it ignores the
+ /// values of the `RUST_BACKTRACE` and `RUST_LIB_BACKTRACE` environment
+ /// variables, always capturing a backtrace.
+ ///
+ /// Note that capturing a backtrace can be an expensive operation on some
+ /// platforms, so this should be used with caution in performance-sensitive
+ /// parts of code.
+ #[inline(never)] // want to make sure there's a frame here to remove
+ pub fn force_capture() -> Backtrace {
+ Backtrace::create(Backtrace::force_capture as usize)
+ }
+
+ /// Forcibly captures a disabled backtrace, regardless of environment
+ /// variable configuration.
+ pub const fn disabled() -> Backtrace {
+ Backtrace { inner: Inner::Disabled }
+ }
+
+ // Capture a backtrace which start just before the function addressed by
+ // `ip`
+ fn create(ip: usize) -> Backtrace {
+ // SAFETY: We don't attempt to lock this reentrantly.
+ let _lock = unsafe { lock() };
+ let mut frames = Vec::new();
+ let mut actual_start = None;
+ unsafe {
+ backtrace_rs::trace_unsynchronized(|frame| {
+ frames.push(BacktraceFrame {
+ frame: RawFrame::Actual(frame.clone()),
+ symbols: Vec::new(),
+ });
+ if frame.symbol_address().addr() == ip && actual_start.is_none() {
+ actual_start = Some(frames.len());
+ }
+ true
+ });
+ }
+
+ // If no frames came out assume that this is an unsupported platform
+ // since `backtrace` doesn't provide a way of learning this right now,
+ // and this should be a good enough approximation.
+ let inner = if frames.is_empty() {
+ Inner::Unsupported
+ } else {
+ Inner::Captured(LazilyResolvedCapture::new(Capture {
+ actual_start: actual_start.unwrap_or(0),
+ frames,
+ resolved: false,
+ }))
+ };
+
+ Backtrace { inner }
+ }
+
+ /// Returns the status of this backtrace, indicating whether this backtrace
+ /// request was unsupported, disabled, or a stack trace was actually
+ /// captured.
+ #[must_use]
+ pub fn status(&self) -> BacktraceStatus {
+ match self.inner {
+ Inner::Unsupported => BacktraceStatus::Unsupported,
+ Inner::Disabled => BacktraceStatus::Disabled,
+ Inner::Captured(_) => BacktraceStatus::Captured,
+ }
+ }
+}
+
+impl<'a> Backtrace {
+ /// Returns an iterator over the backtrace frames.
+ #[must_use]
+ #[unstable(feature = "backtrace_frames", issue = "79676")]
+ pub fn frames(&'a self) -> &'a [BacktraceFrame] {
+ if let Inner::Captured(c) = &self.inner { &c.force().frames } else { &[] }
+ }
+}
+
+impl fmt::Display for Backtrace {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let capture = match &self.inner {
+ Inner::Unsupported => return fmt.write_str("unsupported backtrace"),
+ Inner::Disabled => return fmt.write_str("disabled backtrace"),
+ Inner::Captured(c) => c.force(),
+ };
+
+ let full = fmt.alternate();
+ let (frames, style) = if full {
+ (&capture.frames[..], backtrace_rs::PrintFmt::Full)
+ } else {
+ (&capture.frames[capture.actual_start..], backtrace_rs::PrintFmt::Short)
+ };
+
+ // When printing paths we try to strip the cwd if it exists, otherwise
+ // we just print the path as-is. Note that we also only do this for the
+ // short format, because if it's full we presumably want to print
+ // everything.
+ let cwd = crate::env::current_dir();
+ let mut print_path = move |fmt: &mut fmt::Formatter<'_>, path: BytesOrWideString<'_>| {
+ output_filename(fmt, path, style, cwd.as_ref().ok())
+ };
+
+ let mut f = backtrace_rs::BacktraceFmt::new(fmt, style, &mut print_path);
+ f.add_context()?;
+ for frame in frames {
+ if frame.symbols.is_empty() {
+ f.frame().print_raw(frame.frame.ip(), None, None, None)?;
+ } else {
+ for symbol in frame.symbols.iter() {
+ f.frame().print_raw_with_column(
+ frame.frame.ip(),
+ symbol.name.as_ref().map(|b| backtrace_rs::SymbolName::new(b)),
+ symbol.filename.as_ref().map(|b| match b {
+ BytesOrWide::Bytes(w) => BytesOrWideString::Bytes(w),
+ BytesOrWide::Wide(w) => BytesOrWideString::Wide(w),
+ }),
+ symbol.lineno,
+ symbol.colno,
+ )?;
+ }
+ }
+ }
+ f.finish()?;
+ Ok(())
+ }
+}
+
+struct LazilyResolvedCapture {
+ sync: Once,
+ capture: UnsafeCell<Capture>,
+}
+
+impl LazilyResolvedCapture {
+ fn new(capture: Capture) -> Self {
+ LazilyResolvedCapture { sync: Once::new(), capture: UnsafeCell::new(capture) }
+ }
+
+ fn force(&self) -> &Capture {
+ self.sync.call_once(|| {
+ // SAFETY: This exclusive reference can't overlap with any others
+ // `Once` guarantees callers will block until this closure returns
+ // `Once` also guarantees only a single caller will enter this closure
+ unsafe { &mut *self.capture.get() }.resolve();
+ });
+
+ // SAFETY: This shared reference can't overlap with the exclusive reference above
+ unsafe { &*self.capture.get() }
+ }
+}
+
+// SAFETY: Access to the inner value is synchronized using a thread-safe `Once`
+// So long as `Capture` is `Sync`, `LazilyResolvedCapture` is too
+unsafe impl Sync for LazilyResolvedCapture where Capture: Sync {}
+
+impl Capture {
+ fn resolve(&mut self) {
+ // If we're already resolved, nothing to do!
+ if self.resolved {
+ return;
+ }
+ self.resolved = true;
+
+ // Use the global backtrace lock to synchronize this as it's a
+ // requirement of the `backtrace` crate, and then actually resolve
+ // everything.
+ // SAFETY: We don't attempt to lock this reentrantly.
+ let _lock = unsafe { lock() };
+ for frame in self.frames.iter_mut() {
+ let symbols = &mut frame.symbols;
+ let frame = match &frame.frame {
+ RawFrame::Actual(frame) => frame,
+ #[cfg(test)]
+ RawFrame::Fake => unimplemented!(),
+ };
+ unsafe {
+ backtrace_rs::resolve_frame_unsynchronized(frame, |symbol| {
+ symbols.push(BacktraceSymbol {
+ name: symbol.name().map(|m| m.as_bytes().to_vec()),
+ filename: symbol.filename_raw().map(|b| match b {
+ BytesOrWideString::Bytes(b) => BytesOrWide::Bytes(b.to_owned()),
+ BytesOrWideString::Wide(b) => BytesOrWide::Wide(b.to_owned()),
+ }),
+ lineno: symbol.lineno(),
+ colno: symbol.colno(),
+ });
+ });
+ }
+ }
+ }
+}
+
+impl RawFrame {
+ fn ip(&self) -> *mut c_void {
+ match self {
+ RawFrame::Actual(frame) => frame.ip(),
+ #[cfg(test)]
+ RawFrame::Fake => crate::ptr::invalid_mut(1),
+ }
+ }
+}
diff --git a/library/std/src/backtrace/tests.rs b/library/std/src/backtrace/tests.rs
new file mode 100644
index 000000000..4dfbf88e8
--- /dev/null
+++ b/library/std/src/backtrace/tests.rs
@@ -0,0 +1,95 @@
+use super::*;
+
+fn generate_fake_frames() -> Vec<BacktraceFrame> {
+ vec![
+ BacktraceFrame {
+ frame: RawFrame::Fake,
+ symbols: vec![BacktraceSymbol {
+ name: Some(b"std::backtrace::Backtrace::create".to_vec()),
+ filename: Some(BytesOrWide::Bytes(b"rust/backtrace.rs".to_vec())),
+ lineno: Some(100),
+ colno: None,
+ }],
+ },
+ BacktraceFrame {
+ frame: RawFrame::Fake,
+ symbols: vec![BacktraceSymbol {
+ name: Some(b"__rust_maybe_catch_panic".to_vec()),
+ filename: None,
+ lineno: None,
+ colno: None,
+ }],
+ },
+ BacktraceFrame {
+ frame: RawFrame::Fake,
+ symbols: vec![
+ BacktraceSymbol {
+ name: Some(b"std::rt::lang_start_internal".to_vec()),
+ filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())),
+ lineno: Some(300),
+ colno: Some(5),
+ },
+ BacktraceSymbol {
+ name: Some(b"std::rt::lang_start".to_vec()),
+ filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())),
+ lineno: Some(400),
+ colno: None,
+ },
+ ],
+ },
+ ]
+}
+
+#[test]
+fn test_debug() {
+ let backtrace = Backtrace {
+ inner: Inner::Captured(LazilyResolvedCapture::new(Capture {
+ actual_start: 1,
+ resolved: true,
+ frames: generate_fake_frames(),
+ })),
+ };
+
+ #[rustfmt::skip]
+ let expected = "Backtrace [\
+ \n { fn: \"__rust_maybe_catch_panic\" },\
+ \n { fn: \"std::rt::lang_start_internal\", file: \"rust/rt.rs\", line: 300 },\
+ \n { fn: \"std::rt::lang_start\", file: \"rust/rt.rs\", line: 400 },\
+ \n]";
+
+ assert_eq!(format!("{backtrace:#?}"), expected);
+
+ // Format the backtrace a second time, just to make sure lazily resolved state is stable
+ assert_eq!(format!("{backtrace:#?}"), expected);
+}
+
+#[test]
+fn test_frames() {
+ let backtrace = Backtrace {
+ inner: Inner::Captured(LazilyResolvedCapture::new(Capture {
+ actual_start: 1,
+ resolved: true,
+ frames: generate_fake_frames(),
+ })),
+ };
+
+ let frames = backtrace.frames();
+
+ #[rustfmt::skip]
+ let expected = vec![
+ "[
+ { fn: \"std::backtrace::Backtrace::create\", file: \"rust/backtrace.rs\", line: 100 },
+]",
+ "[
+ { fn: \"__rust_maybe_catch_panic\" },
+]",
+ "[
+ { fn: \"std::rt::lang_start_internal\", file: \"rust/rt.rs\", line: 300 },
+ { fn: \"std::rt::lang_start\", file: \"rust/rt.rs\", line: 400 },
+]"
+ ];
+
+ let mut iter = frames.iter().zip(expected.iter());
+
+ assert!(iter.all(|(f, e)| format!("{f:#?}") == *e));
+}
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
new file mode 100644
index 000000000..db811343f
--- /dev/null
+++ b/library/std/src/collections/hash/map.rs
@@ -0,0 +1,3276 @@
+#[cfg(test)]
+mod tests;
+
+use self::Entry::*;
+
+use hashbrown::hash_map as base;
+
+use crate::borrow::Borrow;
+use crate::cell::Cell;
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind;
+use crate::fmt::{self, Debug};
+#[allow(deprecated)]
+use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13};
+use crate::iter::FusedIterator;
+use crate::ops::Index;
+use crate::sys;
+
+/// A [hash map] implemented with quadratic probing and SIMD lookup.
+///
+/// By default, `HashMap` uses a hashing algorithm selected to provide
+/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
+/// reasonable best-effort is made to generate this seed from a high quality,
+/// secure source of randomness provided by the host without blocking the
+/// program. Because of this, the randomness of the seed depends on the output
+/// quality of the system's random number generator when the seed is created.
+/// In particular, seeds generated when the system's entropy pool is abnormally
+/// low such as during system boot may be of a lower quality.
+///
+/// The default hashing algorithm is currently SipHash 1-3, though this is
+/// subject to change at any point in the future. While its performance is very
+/// competitive for medium sized keys, other hashing algorithms will outperform
+/// it for small keys such as integers as well as large keys such as long
+/// strings, though those algorithms will typically *not* protect against
+/// attacks such as HashDoS.
+///
+/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
+/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods.
+/// There are many alternative [hashing algorithms available on crates.io].
+///
+/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
+/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
+/// If you implement these yourself, it is important that the following
+/// property holds:
+///
+/// ```text
+/// k1 == k2 -> hash(k1) == hash(k2)
+/// ```
+///
+/// In other words, if two keys are equal, their hashes must be equal.
+///
+/// It is a logic error for a key to be modified in such a way that the key's
+/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
+/// the [`Eq`] trait, changes while it is in the map. This is normally only
+/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `HashMap` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// The hash table implementation is a Rust port of Google's [SwissTable].
+/// The original C++ version of SwissTable can be found [here], and this
+/// [CppCon talk] gives an overview of how the algorithm works.
+///
+/// [hash map]: crate::collections#use-a-hashmap-when
+/// [hashing algorithms available on crates.io]: https://crates.io/keywords/hasher
+/// [SwissTable]: https://abseil.io/blog/20180927-swisstables
+/// [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
+/// [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `HashMap<String, String>` in this example).
+/// let mut book_reviews = HashMap::new();
+///
+/// // Review some books.
+/// book_reviews.insert(
+/// "Adventures of Huckleberry Finn".to_string(),
+/// "My favorite book.".to_string(),
+/// );
+/// book_reviews.insert(
+/// "Grimms' Fairy Tales".to_string(),
+/// "Masterpiece.".to_string(),
+/// );
+/// book_reviews.insert(
+/// "Pride and Prejudice".to_string(),
+/// "Very enjoyable.".to_string(),
+/// );
+/// book_reviews.insert(
+/// "The Adventures of Sherlock Holmes".to_string(),
+/// "Eye lyked it alot.".to_string(),
+/// );
+///
+/// // Check for a specific one.
+/// // When collections store owned values (String), they can still be
+/// // queried using references (&str).
+/// if !book_reviews.contains_key("Les Misérables") {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// book_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// book_reviews.remove("The Adventures of Sherlock Holmes");
+///
+/// // Look up the values associated with some keys.
+/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
+/// for &book in &to_find {
+/// match book_reviews.get(book) {
+/// Some(review) => println!("{book}: {review}"),
+/// None => println!("{book} is unreviewed.")
+/// }
+/// }
+///
+/// // Look up the value for a key (will panic if the key is not found).
+/// println!("Review for Jane: {}", book_reviews["Pride and Prejudice"]);
+///
+/// // Iterate over everything.
+/// for (book, review) in &book_reviews {
+/// println!("{book}: \"{review}\"");
+/// }
+/// ```
+///
+/// A `HashMap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let solar_distance = HashMap::from([
+/// ("Mercury", 0.4),
+/// ("Venus", 0.7),
+/// ("Earth", 1.0),
+/// ("Mars", 1.5),
+/// ]);
+/// ```
+///
+/// `HashMap` implements an [`Entry` API](#method.entry), which allows
+/// for complex methods of getting, setting, updating and removing keys and
+/// their values:
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `HashMap<&str, u8>` in this example).
+/// let mut player_stats = HashMap::new();
+///
+/// fn random_stat_buff() -> u8 {
+/// // could actually return some random value here - let's just return
+/// // some fixed value for now
+/// 42
+/// }
+///
+/// // insert a key only if it doesn't already exist
+/// player_stats.entry("health").or_insert(100);
+///
+/// // insert a key using a function that provides a new value only if it
+/// // doesn't already exist
+/// player_stats.entry("defence").or_insert_with(random_stat_buff);
+///
+/// // update a key, guarding against the key possibly not being set
+/// let stat = player_stats.entry("attack").or_insert(100);
+/// *stat += random_stat_buff();
+///
+/// // modify an entry before an insert with in-place mutation
+/// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100);
+/// ```
+///
+/// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`].
+/// We must also derive [`PartialEq`].
+///
+/// [`RefCell`]: crate::cell::RefCell
+/// [`Cell`]: crate::cell::Cell
+/// [`default`]: Default::default
+/// [`with_hasher`]: Self::with_hasher
+/// [`with_capacity_and_hasher`]: Self::with_capacity_and_hasher
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// #[derive(Hash, Eq, PartialEq, Debug)]
+/// struct Viking {
+/// name: String,
+/// country: String,
+/// }
+///
+/// impl Viking {
+/// /// Creates a new Viking.
+/// fn new(name: &str, country: &str) -> Viking {
+/// Viking { name: name.to_string(), country: country.to_string() }
+/// }
+/// }
+///
+/// // Use a HashMap to store the vikings' health points.
+/// let vikings = HashMap::from([
+/// (Viking::new("Einar", "Norway"), 25),
+/// (Viking::new("Olaf", "Denmark"), 24),
+/// (Viking::new("Harald", "Iceland"), 12),
+/// ]);
+///
+/// // Use derived implementation to print the status of the vikings.
+/// for (viking, health) in &vikings {
+/// println!("{viking:?} has {health} hp");
+/// }
+/// ```
+
+#[cfg_attr(not(test), rustc_diagnostic_item = "HashMap")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct HashMap<K, V, S = RandomState> {
+ base: base::HashMap<K, V, S>,
+}
+
+impl<K, V> HashMap<K, V, RandomState> {
+ /// Creates an empty `HashMap`.
+ ///
+ /// The hash map is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, i32> = HashMap::new();
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new() -> HashMap<K, V, RandomState> {
+ Default::default()
+ }
+
+ /// Creates an empty `HashMap` with at least the specified capacity.
+ ///
+ /// The hash map will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the hash set will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize) -> HashMap<K, V, RandomState> {
+ HashMap::with_capacity_and_hasher(capacity, Default::default())
+ }
+}
+
+impl<K, V, S> HashMap<K, V, S> {
+ /// Creates an empty `HashMap` which will use the given hash builder to hash
+ /// keys.
+ ///
+ /// The created map has the default initial capacity.
+ ///
+ /// Warning: `hash_builder` is normally randomly generated, and
+ /// is designed to allow HashMaps to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashMap to be useful, see its documentation for details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// let mut map = HashMap::with_hasher(s);
+ /// map.insert(1, 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn with_hasher(hash_builder: S) -> HashMap<K, V, S> {
+ HashMap { base: base::HashMap::with_hasher(hash_builder) }
+ }
+
+ /// Creates an empty `HashMap` with at least the specified capacity, using
+ /// `hasher` to hash the keys.
+ ///
+ /// The hash map will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the hash map will not allocate.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow HashMaps to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// The `hasher` passed should implement the [`BuildHasher`] trait for
+ /// the HashMap to be useful, see its documentation for details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// let mut map = HashMap::with_capacity_and_hasher(10, s);
+ /// map.insert(1, 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> HashMap<K, V, S> {
+ HashMap { base: base::HashMap::with_capacity_and_hasher(capacity, hasher) }
+ }
+
+ /// Returns the number of elements the map can hold without reallocating.
+ ///
+ /// This number is a lower bound; the `HashMap<K, V>` might be able to hold
+ /// more, but is guaranteed to be able to hold at least this many.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let map: HashMap<i32, i32> = HashMap::with_capacity(100);
+ /// assert!(map.capacity() >= 100);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.base.capacity()
+ }
+
+ /// An iterator visiting all keys in arbitrary order.
+ /// The iterator element type is `&'a K`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// for key in map.keys() {
+ /// println!("{key}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over keys takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys { inner: self.iter() }
+ }
+
+ /// Creates a consuming iterator visiting all the keys in arbitrary order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `K`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// let mut vec: Vec<&str> = map.into_keys().collect();
+ /// // The `IntoKeys` iterator produces keys in arbitrary order, so the
+ /// // keys must be sorted to test them against a sorted array.
+ /// vec.sort_unstable();
+ /// assert_eq!(vec, ["a", "b", "c"]);
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over keys takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_keys(self) -> IntoKeys<K, V> {
+ IntoKeys { inner: self.into_iter() }
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ /// The iterator element type is `&'a V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// for val in map.values() {
+ /// println!("{val}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over values takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values { inner: self.iter() }
+ }
+
+ /// An iterator visiting all values mutably in arbitrary order.
+ /// The iterator element type is `&'a mut V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// for val in map.values_mut() {
+ /// *val = *val + 10;
+ /// }
+ ///
+ /// for val in map.values() {
+ /// println!("{val}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over values takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[stable(feature = "map_values_mut", since = "1.10.0")]
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut { inner: self.iter_mut() }
+ }
+
+ /// Creates a consuming iterator visiting all the values in arbitrary order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// let mut vec: Vec<i32> = map.into_values().collect();
+ /// // The `IntoValues` iterator produces values in arbitrary order, so
+ /// // the values must be sorted to test them against a sorted array.
+ /// vec.sort_unstable();
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over values takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_values(self) -> IntoValues<K, V> {
+ IntoValues { inner: self.into_iter() }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order.
+ /// The iterator element type is `(&'a K, &'a V)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {key} val: {val}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over map takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter { base: self.base.iter() }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order,
+ /// with mutable references to the values.
+ /// The iterator element type is `(&'a K, &'a mut V)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// // Update all values
+ /// for (_, val) in map.iter_mut() {
+ /// *val *= 2;
+ /// }
+ ///
+ /// for (key, val) in &map {
+ /// println!("key: {key} val: {val}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over map takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut { base: self.base.iter_mut() }
+ }
+
+ /// Returns the number of elements in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut a = HashMap::new();
+ /// assert_eq!(a.len(), 0);
+ /// a.insert(1, "a");
+ /// assert_eq!(a.len(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.base.len()
+ }
+
+ /// Returns `true` if the map contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut a = HashMap::new();
+ /// assert!(a.is_empty());
+ /// a.insert(1, "a");
+ /// assert!(!a.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.base.is_empty()
+ }
+
+ /// Clears the map, returning all key-value pairs as an iterator. Keeps the
+ /// allocated memory for reuse.
+ ///
+ /// If the returned iterator is dropped before being fully consumed, it
+ /// drops the remaining key-value pairs. The returned iterator keeps a
+ /// mutable borrow on the map to optimize its implementation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut a = HashMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ ///
+ /// for (k, v) in a.drain().take(1) {
+ /// assert!(k == 1 || k == 2);
+ /// assert!(v == "a" || v == "b");
+ /// }
+ ///
+ /// assert!(a.is_empty());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, K, V> {
+ Drain { base: self.base.drain() }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, the element is removed from the map and yielded.
+ /// If the closure returns false, or panics, the element remains in the map and will not be
+ /// yielded.
+ ///
+ /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of
+ /// whether you choose to keep or remove it.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+ /// elements will still be subjected to the closure and removed and dropped if it returns true.
+ ///
+ /// It is unspecified how many more elements will be subjected to the closure
+ /// if a panic occurs in the closure, or a panic occurs while dropping an element,
+ /// or if the `DrainFilter` value is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a map into even and odd keys, reusing the original map:
+ ///
+ /// ```
+ /// #![feature(hash_drain_filter)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
+ /// let drained: HashMap<i32, i32> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+ ///
+ /// let mut evens = drained.keys().copied().collect::<Vec<_>>();
+ /// let mut odds = map.keys().copied().collect::<Vec<_>>();
+ /// evens.sort();
+ /// odds.sort();
+ ///
+ /// assert_eq!(evens, vec![0, 2, 4, 6]);
+ /// assert_eq!(odds, vec![1, 3, 5, 7]);
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[unstable(feature = "hash_drain_filter", issue = "59618")]
+ pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F>
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ DrainFilter { base: self.base.drain_filter(pred) }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all pairs `(k, v)` for which `f(&k, &mut v)` returns `false`.
+ /// The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x*10)).collect();
+ /// map.retain(|&k, _| k % 2 == 0);
+ /// assert_eq!(map.len(), 4);
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, this operation takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "retain_hash_collection", since = "1.18.0")]
+ pub fn retain<F>(&mut self, f: F)
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ self.base.retain(f)
+ }
+
+ /// Clears the map, removing all key-value pairs. Keeps the allocated memory
+ /// for reuse.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut a = HashMap::new();
+ /// a.insert(1, "a");
+ /// a.clear();
+ /// assert!(a.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.base.clear();
+ }
+
+ /// Returns a reference to the map's [`BuildHasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let hasher = RandomState::new();
+ /// let map: HashMap<i32, i32> = HashMap::with_hasher(hasher);
+ /// let hasher: &RandomState = map.hasher();
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
+ pub fn hasher(&self) -> &S {
+ self.base.hasher()
+ }
+}
+
+impl<K, V, S> HashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher,
+{
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the `HashMap`. The collection may reserve more space to speculatively
+ /// avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new allocation size overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, i32> = HashMap::new();
+ /// map.reserve(10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.base.reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the `HashMap`. The collection may reserve more space to speculatively
+ /// avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional` if
+ /// it returns `Ok(())`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, isize> = HashMap::new();
+ /// map.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
+ /// ```
+ #[inline]
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.base.try_reserve(additional).map_err(map_try_reserve_error)
+ }
+
+ /// Shrinks the capacity of the map as much as possible. It will drop
+ /// down as much as possible while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
+ /// map.insert(1, 2);
+ /// map.insert(3, 4);
+ /// assert!(map.capacity() >= 100);
+ /// map.shrink_to_fit();
+ /// assert!(map.capacity() >= 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.base.shrink_to_fit();
+ }
+
+ /// Shrinks the capacity of the map with a lower limit. It will drop
+ /// down no lower than the supplied limit while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
+ /// map.insert(1, 2);
+ /// map.insert(3, 4);
+ /// assert!(map.capacity() >= 100);
+ /// map.shrink_to(10);
+ /// assert!(map.capacity() >= 10);
+ /// map.shrink_to(0);
+ /// assert!(map.capacity() >= 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.base.shrink_to(min_capacity);
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut letters = HashMap::new();
+ ///
+ /// for ch in "a short treatise on fungi".chars() {
+ /// letters.entry(ch).and_modify(|counter| *counter += 1).or_insert(1);
+ /// }
+ ///
+ /// assert_eq!(letters[&'s'], 2);
+ /// assert_eq!(letters[&'t'], 3);
+ /// assert_eq!(letters[&'u'], 1);
+ /// assert_eq!(letters.get(&'y'), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ map_entry(self.base.rustc_entry(key))
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get(&1), Some(&"a"));
+ /// assert_eq!(map.get(&2), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get(k)
+ }
+
+ /// Returns the key-value pair corresponding to the supplied key.
+ ///
+ /// The supplied key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get_key_value(&1), Some((&1, &"a")));
+ /// assert_eq!(map.get_key_value(&2), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_get_key_value", since = "1.40.0")]
+ pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get_key_value(k)
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once.
+ ///
+ /// Returns an array of length `N` with the results of each query. For soundness, at most one
+ /// mutable reference will be returned to any value. `None` will be returned if any of the
+ /// keys are duplicates or missing.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_many_mut)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut libraries = HashMap::new();
+ /// libraries.insert("Bodleian Library".to_string(), 1602);
+ /// libraries.insert("Athenæum".to_string(), 1807);
+ /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
+ /// libraries.insert("Library of Congress".to_string(), 1800);
+ ///
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "Library of Congress",
+ /// ]);
+ /// assert_eq!(
+ /// got,
+ /// Some([
+ /// &mut 1807,
+ /// &mut 1800,
+ /// ]),
+ /// );
+ ///
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "New York Public Library",
+ /// ]);
+ /// assert_eq!(got, None);
+ ///
+ /// // Duplicate keys result in None
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "Athenæum",
+ /// ]);
+ /// assert_eq!(got, None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "map_many_mut", issue = "97601")]
+ pub fn get_many_mut<Q: ?Sized, const N: usize>(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get_many_mut(ks)
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once, without validating that
+ /// the values are unique.
+ ///
+ /// Returns an array of length `N` with the results of each query. `None` will be returned if
+ /// any of the keys are missing.
+ ///
+ /// For a safe alternative see [`get_many_mut`](Self::get_many_mut).
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting
+ /// references are not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_many_mut)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut libraries = HashMap::new();
+ /// libraries.insert("Bodleian Library".to_string(), 1602);
+ /// libraries.insert("Athenæum".to_string(), 1807);
+ /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
+ /// libraries.insert("Library of Congress".to_string(), 1800);
+ ///
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "Library of Congress",
+ /// ]);
+ /// assert_eq!(
+ /// got,
+ /// Some([
+ /// &mut 1807,
+ /// &mut 1800,
+ /// ]),
+ /// );
+ ///
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "New York Public Library",
+ /// ]);
+ /// assert_eq!(got, None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "map_many_mut", issue = "97601")]
+ pub unsafe fn get_many_unchecked_mut<Q: ?Sized, const N: usize>(
+ &mut self,
+ ks: [&Q; N],
+ ) -> Option<[&'_ mut V; N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get_many_unchecked_mut(ks)
+ }
+
+ /// Returns `true` if the map contains a value for the specified key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.contains_key(&1), true);
+ /// assert_eq!(map.contains_key(&2), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.contains_key(k)
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// if let Some(x) = map.get_mut(&1) {
+ /// *x = "b";
+ /// }
+ /// assert_eq!(map[&1], "b");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get_mut(k)
+ }
+
+ /// Inserts a key-value pair into the map.
+ ///
+ /// If the map did not have this key present, [`None`] is returned.
+ ///
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
+ ///
+ /// [module-level documentation]: crate::collections#insert-and-complex-keys
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// assert_eq!(map.insert(37, "a"), None);
+ /// assert_eq!(map.is_empty(), false);
+ ///
+ /// map.insert(37, "b");
+ /// assert_eq!(map.insert(37, "c"), Some("b"));
+ /// assert_eq!(map[&37], "c");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+ self.base.insert(k, v)
+ }
+
+ /// Tries to insert a key-value pair into the map, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// If the map already had this key present, nothing is updated, and
+ /// an error containing the occupied entry and the value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_try_insert)]
+ ///
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a");
+ ///
+ /// let err = map.try_insert(37, "b").unwrap_err();
+ /// assert_eq!(err.entry.key(), &37);
+ /// assert_eq!(err.entry.get(), &"a");
+ /// assert_eq!(err.value, "b");
+ /// ```
+ #[unstable(feature = "map_try_insert", issue = "82766")]
+ pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V>> {
+ match self.entry(key) {
+ Occupied(entry) => Err(OccupiedError { entry, value }),
+ Vacant(entry) => Ok(entry.insert(value)),
+ }
+ }
+
+ /// Removes a key from the map, returning the value at the key if the key
+ /// was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove(&1), Some("a"));
+ /// assert_eq!(map.remove(&1), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.remove(k)
+ }
+
+ /// Removes a key from the map, returning the stored key and value if the
+ /// key was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// # fn main() {
+ /// let mut map = HashMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove_entry(&1), Some((1, "a")));
+ /// assert_eq!(map.remove(&1), None);
+ /// # }
+ /// ```
+ #[inline]
+ #[stable(feature = "hash_map_remove_entry", since = "1.27.0")]
+ pub fn remove_entry<Q: ?Sized>(&mut self, k: &Q) -> Option<(K, V)>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.remove_entry(k)
+ }
+}
+
+impl<K, V, S> HashMap<K, V, S>
+where
+ S: BuildHasher,
+{
+ /// Creates a raw entry builder for the HashMap.
+ ///
+ /// Raw entries provide the lowest level of control for searching and
+ /// manipulating a map. They must be manually initialized with a hash and
+ /// then manually searched. After this, insertions into a vacant entry
+ /// still require an owned key to be provided.
+ ///
+ /// Raw entries are useful for such exotic situations as:
+ ///
+ /// * Hash memoization
+ /// * Deferring the creation of an owned key until it is known to be required
+ /// * Using a search key that doesn't work with the Borrow trait
+ /// * Using custom comparison logic without newtype wrappers
+ ///
+ /// Because raw entries provide much more low-level control, it's much easier
+ /// to put the HashMap into an inconsistent state which, while memory-safe,
+ /// will cause the map to produce seemingly random results. Higher-level and
+ /// more foolproof APIs like `entry` should be preferred when possible.
+ ///
+ /// In particular, the hash used to initialized the raw entry must still be
+ /// consistent with the hash of the key that is ultimately stored in the entry.
+ /// This is because implementations of HashMap may need to recompute hashes
+ /// when resizing, at which point only the keys are available.
+ ///
+ /// Raw entries give mutable access to the keys. This must not be used
+ /// to modify how the key would compare or hash, as the map will not re-evaluate
+ /// where the key should go, meaning the keys may become "lost" if their
+ /// location does not reflect their state. For instance, if you change a key
+ /// so that the map now contains keys which compare equal, search may start
+ /// acting erratically, with two keys randomly masking each other. Implementations
+ /// are free to assume this doesn't happen (within the limits of memory-safety).
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S> {
+ RawEntryBuilderMut { map: self }
+ }
+
+ /// Creates a raw immutable entry builder for the HashMap.
+ ///
+ /// Raw entries provide the lowest level of control for searching and
+ /// manipulating a map. They must be manually initialized with a hash and
+ /// then manually searched.
+ ///
+ /// This is useful for
+ /// * Hash memoization
+ /// * Using a search key that doesn't work with the Borrow trait
+ /// * Using custom comparison logic without newtype wrappers
+ ///
+ /// Unless you are in such a situation, higher-level and more foolproof APIs like
+ /// `get` should be preferred.
+ ///
+ /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S> {
+ RawEntryBuilder { map: self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Clone for HashMap<K, V, S>
+where
+ K: Clone,
+ V: Clone,
+ S: Clone,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self { base: self.base.clone() }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.base.clone_from(&other.base);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> PartialEq for HashMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: PartialEq,
+ S: BuildHasher,
+{
+ fn eq(&self, other: &HashMap<K, V, S>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Eq for HashMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: Eq,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Debug for HashMap<K, V, S>
+where
+ K: Debug,
+ V: Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_map().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Default for HashMap<K, V, S>
+where
+ S: Default,
+{
+ /// Creates an empty `HashMap<K, V, S>`, with the `Default` value for the hasher.
+ #[inline]
+ fn default() -> HashMap<K, V, S> {
+ HashMap::with_hasher(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, Q: ?Sized, V, S> Index<&Q> for HashMap<K, V, S>
+where
+ K: Eq + Hash + Borrow<Q>,
+ Q: Eq + Hash,
+ S: BuildHasher,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `HashMap`.
+ #[inline]
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("no entry found for key")
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+// Note: as what is currently the most convenient built-in way to construct
+// a HashMap, a simple usage of this function must not *require* the user
+// to provide a type annotation in order to infer the third type parameter
+// (the hasher parameter, conventionally "S").
+// To that end, this impl is defined using RandomState as the concrete
+// type of S, rather than being generic over `S: BuildHasher + Default`.
+// It is expected that users who want to specify a hasher will manually use
+// `with_capacity_and_hasher`.
+// If type parameter defaults worked on impls, and if type parameter
+// defaults could be mixed with const generics, then perhaps
+// this could be generalized.
+// See also the equivalent impl on HashSet.
+impl<K, V, const N: usize> From<[(K, V); N]> for HashMap<K, V, RandomState>
+where
+ K: Eq + Hash,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map1 = HashMap::from([(1, 2), (3, 4)]);
+ /// let map2: HashMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(arr: [(K, V); N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+/// An iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: HashMap::iter
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter = map.iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, K: 'a, V: 'a> {
+ base: base::Iter<'a, K, V>,
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Iter<'_, K, V> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { base: self.base.clone() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: Debug, V: Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: HashMap::iter_mut
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let mut map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter = map.iter_mut();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, K: 'a, V: 'a> {
+ base: base::IterMut<'a, K, V>,
+}
+
+impl<'a, K, V> IterMut<'a, K, V> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { base: self.base.rustc_iter() }
+ }
+}
+
+/// An owning iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`HashMap`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: IntoIterator::into_iter
+/// [`IntoIterator`]: crate::iter::IntoIterator
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter = map.into_iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<K, V> {
+ base: base::IntoIter<K, V>,
+}
+
+impl<K, V> IntoIter<K, V> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { base: self.base.rustc_iter() }
+ }
+}
+
+/// An iterator over the keys of a `HashMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: HashMap::keys
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter_keys = map.keys();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Keys<'a, K: 'a, V: 'a> {
+ inner: Iter<'a, K, V>,
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Keys<'_, K, V> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Keys { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// An iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`values`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: HashMap::values
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter_values = map.values();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Values<'a, K: 'a, V: 'a> {
+ inner: Iter<'a, K, V>,
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Values<'_, K, V> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Values { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K, V: Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A draining iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: HashMap::drain
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let mut map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter = map.drain();
+/// ```
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<'a, K: 'a, V: 'a> {
+ base: base::Drain<'a, K, V>,
+}
+
+impl<'a, K, V> Drain<'a, K, V> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { base: self.base.rustc_iter() }
+ }
+}
+
+/// A draining, filtering iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`drain_filter`] method on [`HashMap`].
+///
+/// [`drain_filter`]: HashMap::drain_filter
+///
+/// # Example
+///
+/// ```
+/// #![feature(hash_drain_filter)]
+///
+/// use std::collections::HashMap;
+///
+/// let mut map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter = map.drain_filter(|_k, v| *v % 2 == 0);
+/// ```
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+pub struct DrainFilter<'a, K, V, F>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ base: base::DrainFilter<'a, K, V, F>,
+}
+
+/// A mutable iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: HashMap::values_mut
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let mut map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter_values = map.values_mut();
+/// ```
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+pub struct ValuesMut<'a, K: 'a, V: 'a> {
+ inner: IterMut<'a, K, V>,
+}
+
+/// An owning iterator over the keys of a `HashMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`HashMap`].
+/// See its documentation for more.
+///
+/// [`into_keys`]: HashMap::into_keys
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter_keys = map.into_keys();
+/// ```
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoKeys<K, V> {
+ inner: IntoIter<K, V>,
+}
+
+/// An owning iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`HashMap`].
+/// See its documentation for more.
+///
+/// [`into_values`]: HashMap::into_values
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// let map = HashMap::from([
+/// ("a", 1),
+/// ]);
+/// let iter_keys = map.into_values();
+/// ```
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoValues<K, V> {
+ inner: IntoIter<K, V>,
+}
+
+/// A builder for computing where in a HashMap a key-value pair would be stored.
+///
+/// See the [`HashMap::raw_entry_mut`] docs for usage examples.
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+pub struct RawEntryBuilderMut<'a, K: 'a, V: 'a, S: 'a> {
+ map: &'a mut HashMap<K, V, S>,
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This is a lower-level version of [`Entry`].
+///
+/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`],
+/// then calling one of the methods of that [`RawEntryBuilderMut`].
+///
+/// [`raw_entry_mut`]: HashMap::raw_entry_mut
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+pub enum RawEntryMut<'a, K: 'a, V: 'a, S: 'a> {
+ /// An occupied entry.
+ Occupied(RawOccupiedEntryMut<'a, K, V, S>),
+ /// A vacant entry.
+ Vacant(RawVacantEntryMut<'a, K, V, S>),
+}
+
+/// A view into an occupied entry in a `HashMap`.
+/// It is part of the [`RawEntryMut`] enum.
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+pub struct RawOccupiedEntryMut<'a, K: 'a, V: 'a, S: 'a> {
+ base: base::RawOccupiedEntryMut<'a, K, V, S>,
+}
+
+/// A view into a vacant entry in a `HashMap`.
+/// It is part of the [`RawEntryMut`] enum.
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+pub struct RawVacantEntryMut<'a, K: 'a, V: 'a, S: 'a> {
+ base: base::RawVacantEntryMut<'a, K, V, S>,
+}
+
+/// A builder for computing where in a HashMap a key-value pair would be stored.
+///
+/// See the [`HashMap::raw_entry`] docs for usage examples.
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+pub struct RawEntryBuilder<'a, K: 'a, V: 'a, S: 'a> {
+ map: &'a HashMap<K, V, S>,
+}
+
+impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S>
+where
+ S: BuildHasher,
+{
+ /// Creates a `RawEntryMut` from the given key.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ map_raw_entry(self.map.base.raw_entry_mut().from_key(k))
+ }
+
+ /// Creates a `RawEntryMut` from the given key and its hash.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S>
+ where
+ K: Borrow<Q>,
+ Q: Eq,
+ {
+ map_raw_entry(self.map.base.raw_entry_mut().from_key_hashed_nocheck(hash, k))
+ }
+
+ /// Creates a `RawEntryMut` from the given hash.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_hash<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
+ where
+ for<'b> F: FnMut(&'b K) -> bool,
+ {
+ map_raw_entry(self.map.base.raw_entry_mut().from_hash(hash, is_match))
+ }
+}
+
+impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S>
+where
+ S: BuildHasher,
+{
+ /// Access an entry by key.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.map.base.raw_entry().from_key(k)
+ }
+
+ /// Access an entry by a key and its hash.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.map.base.raw_entry().from_key_hashed_nocheck(hash, k)
+ }
+
+ /// Access an entry by hash.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn from_hash<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
+ where
+ F: FnMut(&K) -> bool,
+ {
+ self.map.base.raw_entry().from_hash(hash, is_match)
+ }
+}
+
+impl<'a, K, V, S> RawEntryMut<'a, K, V, S> {
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// mutable references to the key and value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3);
+ /// assert_eq!(map["poneyland"], 3);
+ ///
+ /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2;
+ /// assert_eq!(map["poneyland"], 6);
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V)
+ where
+ K: Hash,
+ S: BuildHasher,
+ {
+ match self {
+ RawEntryMut::Occupied(entry) => entry.into_key_value(),
+ RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns mutable references to the key and value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, String> = HashMap::new();
+ ///
+ /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| {
+ /// ("poneyland", "hoho".to_string())
+ /// });
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn or_insert_with<F>(self, default: F) -> (&'a mut K, &'a mut V)
+ where
+ F: FnOnce() -> (K, V),
+ K: Hash,
+ S: BuildHasher,
+ {
+ match self {
+ RawEntryMut::Occupied(entry) => entry.into_key_value(),
+ RawEntryMut::Vacant(entry) => {
+ let (k, v) = default();
+ entry.insert(k, v)
+ }
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_raw_entry)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// map.raw_entry_mut()
+ /// .from_key("poneyland")
+ /// .and_modify(|_k, v| { *v += 1 })
+ /// .or_insert("poneyland", 42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.raw_entry_mut()
+ /// .from_key("poneyland")
+ /// .and_modify(|_k, v| { *v += 1 })
+ /// .or_insert("poneyland", 0);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut K, &mut V),
+ {
+ match self {
+ RawEntryMut::Occupied(mut entry) => {
+ {
+ let (k, v) = entry.get_key_value_mut();
+ f(k, v);
+ }
+ RawEntryMut::Occupied(entry)
+ }
+ RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry),
+ }
+ }
+}
+
+impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> {
+ /// Gets a reference to the key in the entry.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn key(&self) -> &K {
+ self.base.key()
+ }
+
+ /// Gets a mutable reference to the key in the entry.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn key_mut(&mut self) -> &mut K {
+ self.base.key_mut()
+ }
+
+ /// Converts the entry into a mutable reference to the key in the entry
+ /// with a lifetime bound to the map itself.
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn into_key(self) -> &'a mut K {
+ self.base.into_key()
+ }
+
+ /// Gets a reference to the value in the entry.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn get(&self) -> &V {
+ self.base.get()
+ }
+
+ /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry
+ /// with a lifetime bound to the map itself.
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn into_mut(self) -> &'a mut V {
+ self.base.into_mut()
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn get_mut(&mut self) -> &mut V {
+ self.base.get_mut()
+ }
+
+ /// Gets a reference to the key and value in the entry.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn get_key_value(&mut self) -> (&K, &V) {
+ self.base.get_key_value()
+ }
+
+ /// Gets a mutable reference to the key and value in the entry.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) {
+ self.base.get_key_value_mut()
+ }
+
+ /// Converts the `OccupiedEntry` into a mutable reference to the key and value in the entry
+ /// with a lifetime bound to the map itself.
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn into_key_value(self) -> (&'a mut K, &'a mut V) {
+ self.base.into_key_value()
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn insert(&mut self, value: V) -> V {
+ self.base.insert(value)
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn insert_key(&mut self, key: K) -> K {
+ self.base.insert_key(key)
+ }
+
+ /// Takes the value out of the entry, and returns it.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn remove(self) -> V {
+ self.base.remove()
+ }
+
+ /// Take the ownership of the key and value from the map.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn remove_entry(self) -> (K, V) {
+ self.base.remove_entry()
+ }
+}
+
+impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> {
+ /// Sets the value of the entry with the `VacantEntry`'s key,
+ /// and returns a mutable reference to it.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V)
+ where
+ K: Hash,
+ S: BuildHasher,
+ {
+ self.base.insert(key, value)
+ }
+
+ /// Sets the value of the entry with the VacantEntry's key,
+ /// and returns a mutable reference to it.
+ #[inline]
+ #[unstable(feature = "hash_raw_entry", issue = "56167")]
+ pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V)
+ where
+ K: Hash,
+ S: BuildHasher,
+ {
+ self.base.insert_hashed_nocheck(hash, key, value)
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+impl<K, V, S> Debug for RawEntryBuilderMut<'_, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RawEntryBuilder").finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+impl<K: Debug, V: Debug, S> Debug for RawEntryMut<'_, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(),
+ RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(),
+ }
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+impl<K: Debug, V: Debug, S> Debug for RawOccupiedEntryMut<'_, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RawOccupiedEntryMut")
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+impl<K, V, S> Debug for RawVacantEntryMut<'_, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RawVacantEntryMut").finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "hash_raw_entry", issue = "56167")]
+impl<K, V, S> Debug for RawEntryBuilder<'_, K, V, S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RawEntryBuilder").finish_non_exhaustive()
+ }
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
+///
+/// [`entry`]: HashMap::entry
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "HashMapEntry")]
+pub enum Entry<'a, K: 'a, V: 'a> {
+ /// An occupied entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Occupied(#[stable(feature = "rust1", since = "1.0.0")] OccupiedEntry<'a, K, V>),
+
+ /// A vacant entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Vacant(#[stable(feature = "rust1", since = "1.0.0")] VacantEntry<'a, K, V>),
+}
+
+#[stable(feature = "debug_hash_map", since = "1.12.0")]
+impl<K: Debug, V: Debug> Debug for Entry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
+ Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
+ }
+ }
+}
+
+/// A view into an occupied entry in a `HashMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
+ base: base::RustcOccupiedEntry<'a, K, V>,
+}
+
+#[stable(feature = "debug_hash_map", since = "1.12.0")]
+impl<K: Debug, V: Debug> Debug for OccupiedEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedEntry")
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish_non_exhaustive()
+ }
+}
+
+/// A view into a vacant entry in a `HashMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct VacantEntry<'a, K: 'a, V: 'a> {
+ base: base::RustcVacantEntry<'a, K, V>,
+}
+
+#[stable(feature = "debug_hash_map", since = "1.12.0")]
+impl<K: Debug, V> Debug for VacantEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("VacantEntry").field(self.key()).finish()
+ }
+}
+
+/// The error returned by [`try_insert`](HashMap::try_insert) when the key already exists.
+///
+/// Contains the occupied entry, and the value that was not inserted.
+#[unstable(feature = "map_try_insert", issue = "82766")]
+pub struct OccupiedError<'a, K: 'a, V: 'a> {
+ /// The entry in the map that was already occupied.
+ pub entry: OccupiedEntry<'a, K, V>,
+ /// The value which was not inserted, because the entry was already occupied.
+ pub value: V,
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<K: Debug, V: Debug> Debug for OccupiedError<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedError")
+ .field("key", self.entry.key())
+ .field("old_value", self.entry.get())
+ .field("new_value", &self.value)
+ .finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: Debug, V: Debug> fmt::Display for OccupiedError<'a, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "failed to insert {:?}, key {:?} already exists with value {:?}",
+ self.value,
+ self.entry.key(),
+ self.entry.get(),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, S> IntoIterator for &'a HashMap<K, V, S> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ #[inline]
+ #[rustc_lint_query_instability]
+ fn into_iter(self) -> Iter<'a, K, V> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, S> IntoIterator for &'a mut HashMap<K, V, S> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ #[inline]
+ #[rustc_lint_query_instability]
+ fn into_iter(self) -> IterMut<'a, K, V> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> IntoIterator for HashMap<K, V, S> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V>;
+
+ /// Creates a consuming iterator, that is, one that moves each key-value
+ /// pair out of the map in arbitrary order. The map cannot be used after
+ /// calling this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let map = HashMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// // Not possible with .iter()
+ /// let vec: Vec<(&str, i32)> = map.into_iter().collect();
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ fn into_iter(self) -> IntoIter<K, V> {
+ IntoIter { base: self.base.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K, V> fmt::Debug for IterMut<'_, K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Iterator for IntoIter<K, V> {
+ type Item = (K, V);
+
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for IntoIter<K, V> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: Debug, V: Debug> fmt::Debug for IntoIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> Iterator for IntoKeys<K, V> {
+ type Item = K;
+
+ #[inline]
+ fn next(&mut self) -> Option<K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> FusedIterator for IntoKeys<K, V> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K: Debug, V> fmt::Debug for IntoKeys<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(k, _)| k)).finish()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> Iterator for IntoValues<K, V> {
+ type Item = V;
+
+ #[inline]
+ fn next(&mut self) -> Option<V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> ExactSizeIterator for IntoValues<K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V> FusedIterator for IntoValues<K, V> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V: Debug> fmt::Debug for IntoValues<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, v)| v)).finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<'a, K, V> Iterator for Drain<'a, K, V> {
+ type Item = (K, V);
+
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "drain", since = "1.6.0")]
+impl<K, V> ExactSizeIterator for Drain<'_, K, V> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Drain<'_, K, V> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K, V> fmt::Debug for Drain<'_, K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<K, V, F> Iterator for DrainFilter<'_, K, V, F>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ type Item = (K, V);
+
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<'a, K, V, F> fmt::Debug for DrainFilter<'a, K, V, F>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DrainFilter").finish_non_exhaustive()
+ }
+}
+
+impl<'a, K, V> Entry<'a, K, V> {
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// map.entry("poneyland").or_insert(3);
+ /// assert_eq!(map["poneyland"], 3);
+ ///
+ /// *map.entry("poneyland").or_insert(10) *= 2;
+ /// assert_eq!(map["poneyland"], 6);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, String> = HashMap::new();
+ /// let s = "hoho".to_string();
+ ///
+ /// map.entry("poneyland").or_insert_with(|| s);
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default()),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting, if empty, the result of the default function.
+ /// This method allows for generating key-derived values for insertion by providing the default
+ /// function a reference to the key that was moved during the `.entry(key)` method call.
+ ///
+ /// The reference to the moved key is provided so that cloning or copying the key is
+ /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, usize> = HashMap::new();
+ ///
+ /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count());
+ ///
+ /// assert_eq!(map["poneyland"], 9);
+ /// ```
+ #[inline]
+ #[stable(feature = "or_insert_with_key", since = "1.50.0")]
+ pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => {
+ let value = default(entry.key());
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Returns a reference to this entry's key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[inline]
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ match *self {
+ Occupied(ref entry) => entry.key(),
+ Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[inline]
+ #[stable(feature = "entry_and_modify", since = "1.26.0")]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Occupied(mut entry) => {
+ f(entry.get_mut());
+ Occupied(entry)
+ }
+ Vacant(entry) => Vacant(entry),
+ }
+ }
+
+ /// Sets the value of the entry, and returns an `OccupiedEntry`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(entry_insert)]
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, String> = HashMap::new();
+ /// let entry = map.entry("poneyland").insert_entry("hoho".to_string());
+ ///
+ /// assert_eq!(entry.key(), &"poneyland");
+ /// ```
+ #[inline]
+ #[unstable(feature = "entry_insert", issue = "65225")]
+ pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> {
+ match self {
+ Occupied(mut entry) => {
+ entry.insert(value);
+ entry
+ }
+ Vacant(entry) => entry.insert_entry(value),
+ }
+ }
+}
+
+impl<'a, K, V: Default> Entry<'a, K, V> {
+ /// Ensures a value is in the entry by inserting the default value if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
+ /// map.entry("poneyland").or_default();
+ ///
+ /// assert_eq!(map["poneyland"], None);
+ /// # }
+ /// ```
+ #[inline]
+ #[stable(feature = "entry_or_default", since = "1.28.0")]
+ pub fn or_default(self) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(Default::default()),
+ }
+ }
+}
+
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the key in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[inline]
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ self.base.key()
+ }
+
+ /// Take the ownership of the key and value from the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// // We delete the entry from the map.
+ /// o.remove_entry();
+ /// }
+ ///
+ /// assert_eq!(map.contains_key("poneyland"), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn remove_entry(self) -> (K, V) {
+ self.base.remove_entry()
+ }
+
+ /// Gets a reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.get(), &12);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self) -> &V {
+ self.base.get()
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ ///
+ /// If you need a reference to the `OccupiedEntry` which may outlive the
+ /// destruction of the `Entry` value, see [`into_mut`].
+ ///
+ /// [`into_mut`]: Self::into_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// *o.get_mut() += 10;
+ /// assert_eq!(*o.get(), 22);
+ ///
+ /// // We can use the same Entry multiple times.
+ /// *o.get_mut() += 2;
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 24);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut V {
+ self.base.get_mut()
+ }
+
+ /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry
+ /// with a lifetime bound to the map itself.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
+ ///
+ /// [`get_mut`]: Self::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// *o.into_mut() += 10;
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 22);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_mut(self) -> &'a mut V {
+ self.base.into_mut()
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// assert_eq!(o.insert(15), 12);
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 15);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: V) -> V {
+ self.base.insert(value)
+ }
+
+ /// Takes the value out of the entry, and returns it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.remove(), 12);
+ /// }
+ ///
+ /// assert_eq!(map.contains_key("poneyland"), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(self) -> V {
+ self.base.remove()
+ }
+
+ /// Replaces the entry, returning the old key and value. The new key in the hash map will be
+ /// the key used to create this entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_entry_replace)]
+ /// use std::collections::hash_map::{Entry, HashMap};
+ /// use std::rc::Rc;
+ ///
+ /// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
+ /// map.insert(Rc::new("Stringthing".to_string()), 15);
+ ///
+ /// let my_key = Rc::new("Stringthing".to_string());
+ ///
+ /// if let Entry::Occupied(entry) = map.entry(my_key) {
+ /// // Also replace the key with a handle to our other key.
+ /// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
+ /// }
+ ///
+ /// ```
+ #[inline]
+ #[unstable(feature = "map_entry_replace", issue = "44286")]
+ pub fn replace_entry(self, value: V) -> (K, V) {
+ self.base.replace_entry(value)
+ }
+
+ /// Replaces the key in the hash map with the key used to create this entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_entry_replace)]
+ /// use std::collections::hash_map::{Entry, HashMap};
+ /// use std::rc::Rc;
+ ///
+ /// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
+ /// let known_strings: Vec<Rc<String>> = Vec::new();
+ ///
+ /// // Initialise known strings, run program, etc.
+ ///
+ /// reclaim_memory(&mut map, &known_strings);
+ ///
+ /// fn reclaim_memory(map: &mut HashMap<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
+ /// for s in known_strings {
+ /// if let Entry::Occupied(entry) = map.entry(Rc::clone(s)) {
+ /// // Replaces the entry's key with our version of it in `known_strings`.
+ /// entry.replace_key();
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "map_entry_replace", issue = "44286")]
+ pub fn replace_key(self) -> K {
+ self.base.replace_key()
+ }
+}
+
+impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
+ /// Gets a reference to the key that would be used when inserting a value
+ /// through the `VacantEntry`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[inline]
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ self.base.key()
+ }
+
+ /// Take ownership of the key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// if let Entry::Vacant(v) = map.entry("poneyland") {
+ /// v.into_key();
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn into_key(self) -> K {
+ self.base.into_key()
+ }
+
+ /// Sets the value of the entry with the `VacantEntry`'s key,
+ /// and returns a mutable reference to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// if let Entry::Vacant(o) = map.entry("poneyland") {
+ /// o.insert(37);
+ /// }
+ /// assert_eq!(map["poneyland"], 37);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(self, value: V) -> &'a mut V {
+ self.base.insert(value)
+ }
+
+ /// Sets the value of the entry with the `VacantEntry`'s key,
+ /// and returns an `OccupiedEntry`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(entry_insert)]
+ /// use std::collections::HashMap;
+ /// use std::collections::hash_map::Entry;
+ ///
+ /// let mut map: HashMap<&str, u32> = HashMap::new();
+ ///
+ /// if let Entry::Vacant(o) = map.entry("poneyland") {
+ /// o.insert_entry(37);
+ /// }
+ /// assert_eq!(map["poneyland"], 37);
+ /// ```
+ #[inline]
+ #[unstable(feature = "entry_insert", issue = "65225")]
+ pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> {
+ let base = self.base.insert_entry(value);
+ OccupiedEntry { base }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> FromIterator<(K, V)> for HashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> HashMap<K, V, S> {
+ let mut map = HashMap::with_hasher(Default::default());
+ map.extend(iter);
+ map
+ }
+}
+
+/// Inserts all new key-values from the iterator and replaces values with existing
+/// keys with new values returned from the iterator.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, S> Extend<(K, V)> for HashMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ self.base.extend(iter)
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (k, v): (K, V)) {
+ self.base.insert(k, v);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.base.extend_reserve(additional);
+ }
+}
+
+#[stable(feature = "hash_extend_copy", since = "1.4.0")]
+impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap<K, V, S>
+where
+ K: Eq + Hash + Copy,
+ V: Copy,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
+ self.base.extend(iter)
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+ self.base.insert(k, v);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ Extend::<(K, V)>::extend_reserve(self, additional)
+ }
+}
+
+/// `RandomState` is the default state for [`HashMap`] types.
+///
+/// A particular instance `RandomState` will create the same instances of
+/// [`Hasher`], but the hashers created by two different `RandomState`
+/// instances are unlikely to produce the same result for the same values.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+/// use std::collections::hash_map::RandomState;
+///
+/// let s = RandomState::new();
+/// let mut map = HashMap::with_hasher(s);
+/// map.insert(1, 2);
+/// ```
+#[derive(Clone)]
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+pub struct RandomState {
+ k0: u64,
+ k1: u64,
+}
+
+impl RandomState {
+ /// Constructs a new `RandomState` that is initialized with random keys.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// ```
+ #[inline]
+ #[allow(deprecated)]
+ // rand
+ #[must_use]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn new() -> RandomState {
+ // Historically this function did not cache keys from the OS and instead
+ // simply always called `rand::thread_rng().gen()` twice. In #31356 it
+ // was discovered, however, that because we re-seed the thread-local RNG
+ // from the OS periodically that this can cause excessive slowdown when
+ // many hash maps are created on a thread. To solve this performance
+ // trap we cache the first set of randomly generated keys per-thread.
+ //
+ // Later in #36481 it was discovered that exposing a deterministic
+ // iteration order allows a form of DOS attack. To counter that we
+ // increment one of the seeds on every RandomState creation, giving
+ // every corresponding HashMap a different iteration order.
+ thread_local!(static KEYS: Cell<(u64, u64)> = {
+ Cell::new(sys::hashmap_random_keys())
+ });
+
+ KEYS.with(|keys| {
+ let (k0, k1) = keys.get();
+ keys.set((k0.wrapping_add(1), k1));
+ RandomState { k0, k1 }
+ })
+ }
+}
+
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+impl BuildHasher for RandomState {
+ type Hasher = DefaultHasher;
+ #[inline]
+ #[allow(deprecated)]
+ fn build_hasher(&self) -> DefaultHasher {
+ DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
+ }
+}
+
+/// The default [`Hasher`] used by [`RandomState`].
+///
+/// The internal algorithm is not specified, and so it and its hashes should
+/// not be relied upon over releases.
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+#[allow(deprecated)]
+#[derive(Clone, Debug)]
+pub struct DefaultHasher(SipHasher13);
+
+impl DefaultHasher {
+ /// Creates a new `DefaultHasher`.
+ ///
+ /// This hasher is not guaranteed to be the same as all other
+ /// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
+ /// instances created through `new` or `default`.
+ #[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+ #[inline]
+ #[allow(deprecated)]
+ #[must_use]
+ pub fn new() -> DefaultHasher {
+ DefaultHasher(SipHasher13::new_with_keys(0, 0))
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+impl Default for DefaultHasher {
+ /// Creates a new `DefaultHasher` using [`new`].
+ /// See its documentation for more.
+ ///
+ /// [`new`]: DefaultHasher::new
+ #[inline]
+ fn default() -> DefaultHasher {
+ DefaultHasher::new()
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+impl Hasher for DefaultHasher {
+ // The underlying `SipHasher13` doesn't override the other
+ // `write_*` methods, so it's ok not to forward them here.
+
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.write(msg)
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ self.0.write_str(s);
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.finish()
+ }
+}
+
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+impl Default for RandomState {
+ /// Constructs a new `RandomState`.
+ #[inline]
+ fn default() -> RandomState {
+ RandomState::new()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for RandomState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RandomState").finish_non_exhaustive()
+ }
+}
+
+#[inline]
+fn map_entry<'a, K: 'a, V: 'a>(raw: base::RustcEntry<'a, K, V>) -> Entry<'a, K, V> {
+ match raw {
+ base::RustcEntry::Occupied(base) => Entry::Occupied(OccupiedEntry { base }),
+ base::RustcEntry::Vacant(base) => Entry::Vacant(VacantEntry { base }),
+ }
+}
+
+#[inline]
+pub(super) fn map_try_reserve_error(err: hashbrown::TryReserveError) -> TryReserveError {
+ match err {
+ hashbrown::TryReserveError::CapacityOverflow => {
+ TryReserveErrorKind::CapacityOverflow.into()
+ }
+ hashbrown::TryReserveError::AllocError { layout } => {
+ TryReserveErrorKind::AllocError { layout, non_exhaustive: () }.into()
+ }
+ }
+}
+
+#[inline]
+fn map_raw_entry<'a, K: 'a, V: 'a, S: 'a>(
+ raw: base::RawEntryMut<'a, K, V, S>,
+) -> RawEntryMut<'a, K, V, S> {
+ match raw {
+ base::RawEntryMut::Occupied(base) => RawEntryMut::Occupied(RawOccupiedEntryMut { base }),
+ base::RawEntryMut::Vacant(base) => RawEntryMut::Vacant(RawVacantEntryMut { base }),
+ }
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> {
+ v
+ }
+ fn map_val<'new>(v: HashMap<u8, &'static str>) -> HashMap<u8, &'new str> {
+ v
+ }
+ fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> {
+ v
+ }
+ fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> {
+ v
+ }
+ fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> {
+ v
+ }
+ fn into_iter_val<'new>(v: IntoIter<u8, &'static str>) -> IntoIter<u8, &'new str> {
+ v
+ }
+ fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> {
+ v
+ }
+ fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> {
+ v
+ }
+ fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> {
+ v
+ }
+ fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> {
+ v
+ }
+ fn drain<'new>(
+ d: Drain<'static, &'static str, &'static str>,
+ ) -> Drain<'new, &'new str, &'new str> {
+ d
+ }
+}
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs
new file mode 100644
index 000000000..7ebc41588
--- /dev/null
+++ b/library/std/src/collections/hash/map/tests.rs
@@ -0,0 +1,1113 @@
+use super::Entry::{Occupied, Vacant};
+use super::HashMap;
+use super::RandomState;
+use crate::assert_matches::assert_matches;
+use crate::cell::RefCell;
+use rand::{thread_rng, Rng};
+use realstd::collections::TryReserveErrorKind::*;
+
+// https://github.com/rust-lang/rust/issues/62301
+fn _assert_hashmap_is_unwind_safe() {
+ fn assert_unwind_safe<T: crate::panic::UnwindSafe>() {}
+ assert_unwind_safe::<HashMap<(), crate::cell::UnsafeCell<()>>>();
+}
+
+#[test]
+fn test_zero_capacities() {
+ type HM = HashMap<i32, i32>;
+
+ let m = HM::new();
+ assert_eq!(m.capacity(), 0);
+
+ let m = HM::default();
+ assert_eq!(m.capacity(), 0);
+
+ let m = HM::with_hasher(RandomState::new());
+ assert_eq!(m.capacity(), 0);
+
+ let m = HM::with_capacity(0);
+ assert_eq!(m.capacity(), 0);
+
+ let m = HM::with_capacity_and_hasher(0, RandomState::new());
+ assert_eq!(m.capacity(), 0);
+
+ let mut m = HM::new();
+ m.insert(1, 1);
+ m.insert(2, 2);
+ m.remove(&1);
+ m.remove(&2);
+ m.shrink_to_fit();
+ assert_eq!(m.capacity(), 0);
+
+ let mut m = HM::new();
+ m.reserve(0);
+ assert_eq!(m.capacity(), 0);
+}
+
+#[test]
+fn test_create_capacity_zero() {
+ let mut m = HashMap::with_capacity(0);
+
+ assert!(m.insert(1, 1).is_none());
+
+ assert!(m.contains_key(&1));
+ assert!(!m.contains_key(&0));
+}
+
+#[test]
+fn test_insert() {
+ let mut m = HashMap::new();
+ assert_eq!(m.len(), 0);
+ assert!(m.insert(1, 2).is_none());
+ assert_eq!(m.len(), 1);
+ assert!(m.insert(2, 4).is_none());
+ assert_eq!(m.len(), 2);
+ assert_eq!(*m.get(&1).unwrap(), 2);
+ assert_eq!(*m.get(&2).unwrap(), 4);
+}
+
+#[test]
+fn test_clone() {
+ let mut m = HashMap::new();
+ assert_eq!(m.len(), 0);
+ assert!(m.insert(1, 2).is_none());
+ assert_eq!(m.len(), 1);
+ assert!(m.insert(2, 4).is_none());
+ assert_eq!(m.len(), 2);
+ let m2 = m.clone();
+ assert_eq!(*m2.get(&1).unwrap(), 2);
+ assert_eq!(*m2.get(&2).unwrap(), 4);
+ assert_eq!(m2.len(), 2);
+}
+
+thread_local! { static DROP_VECTOR: RefCell<Vec<i32>> = RefCell::new(Vec::new()) }
+
+#[derive(Hash, PartialEq, Eq)]
+struct Droppable {
+ k: usize,
+}
+
+impl Droppable {
+ fn new(k: usize) -> Droppable {
+ DROP_VECTOR.with(|slot| {
+ slot.borrow_mut()[k] += 1;
+ });
+
+ Droppable { k }
+ }
+}
+
+impl Drop for Droppable {
+ fn drop(&mut self) {
+ DROP_VECTOR.with(|slot| {
+ slot.borrow_mut()[self.k] -= 1;
+ });
+ }
+}
+
+impl Clone for Droppable {
+ fn clone(&self) -> Droppable {
+ Droppable::new(self.k)
+ }
+}
+
+#[test]
+fn test_drops() {
+ DROP_VECTOR.with(|slot| {
+ *slot.borrow_mut() = vec![0; 200];
+ });
+
+ {
+ let mut m = HashMap::new();
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 0);
+ }
+ });
+
+ for i in 0..100 {
+ let d1 = Droppable::new(i);
+ let d2 = Droppable::new(i + 100);
+ m.insert(d1, d2);
+ }
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 1);
+ }
+ });
+
+ for i in 0..50 {
+ let k = Droppable::new(i);
+ let v = m.remove(&k);
+
+ assert!(v.is_some());
+
+ DROP_VECTOR.with(|v| {
+ assert_eq!(v.borrow()[i], 1);
+ assert_eq!(v.borrow()[i + 100], 1);
+ });
+ }
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..50 {
+ assert_eq!(v.borrow()[i], 0);
+ assert_eq!(v.borrow()[i + 100], 0);
+ }
+
+ for i in 50..100 {
+ assert_eq!(v.borrow()[i], 1);
+ assert_eq!(v.borrow()[i + 100], 1);
+ }
+ });
+ }
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 0);
+ }
+ });
+}
+
+#[test]
+fn test_into_iter_drops() {
+ DROP_VECTOR.with(|v| {
+ *v.borrow_mut() = vec![0; 200];
+ });
+
+ let hm = {
+ let mut hm = HashMap::new();
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 0);
+ }
+ });
+
+ for i in 0..100 {
+ let d1 = Droppable::new(i);
+ let d2 = Droppable::new(i + 100);
+ hm.insert(d1, d2);
+ }
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 1);
+ }
+ });
+
+ hm
+ };
+
+ // By the way, ensure that cloning doesn't screw up the dropping.
+ drop(hm.clone());
+
+ {
+ let mut half = hm.into_iter().take(50);
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 1);
+ }
+ });
+
+ for _ in half.by_ref() {}
+
+ DROP_VECTOR.with(|v| {
+ let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count();
+
+ let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count();
+
+ assert_eq!(nk, 50);
+ assert_eq!(nv, 50);
+ });
+ };
+
+ DROP_VECTOR.with(|v| {
+ for i in 0..200 {
+ assert_eq!(v.borrow()[i], 0);
+ }
+ });
+}
+
+#[test]
+fn test_empty_remove() {
+ let mut m: HashMap<i32, bool> = HashMap::new();
+ assert_eq!(m.remove(&0), None);
+}
+
+#[test]
+fn test_empty_entry() {
+ let mut m: HashMap<i32, bool> = HashMap::new();
+ match m.entry(0) {
+ Occupied(_) => panic!(),
+ Vacant(_) => {}
+ }
+ assert!(*m.entry(0).or_insert(true));
+ assert_eq!(m.len(), 1);
+}
+
+#[test]
+fn test_empty_iter() {
+ let mut m: HashMap<i32, bool> = HashMap::new();
+ assert_eq!(m.drain().next(), None);
+ assert_eq!(m.keys().next(), None);
+ assert_eq!(m.values().next(), None);
+ assert_eq!(m.values_mut().next(), None);
+ assert_eq!(m.iter().next(), None);
+ assert_eq!(m.iter_mut().next(), None);
+ assert_eq!(m.len(), 0);
+ assert!(m.is_empty());
+ assert_eq!(m.into_iter().next(), None);
+}
+
+#[test]
+fn test_lots_of_insertions() {
+ let mut m = HashMap::new();
+
+ // Try this a few times to make sure we never screw up the hashmap's
+ // internal state.
+ for _ in 0..10 {
+ assert!(m.is_empty());
+
+ for i in 1..1001 {
+ assert!(m.insert(i, i).is_none());
+
+ for j in 1..=i {
+ let r = m.get(&j);
+ assert_eq!(r, Some(&j));
+ }
+
+ for j in i + 1..1001 {
+ let r = m.get(&j);
+ assert_eq!(r, None);
+ }
+ }
+
+ for i in 1001..2001 {
+ assert!(!m.contains_key(&i));
+ }
+
+ // remove forwards
+ for i in 1..1001 {
+ assert!(m.remove(&i).is_some());
+
+ for j in 1..=i {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in i + 1..1001 {
+ assert!(m.contains_key(&j));
+ }
+ }
+
+ for i in 1..1001 {
+ assert!(!m.contains_key(&i));
+ }
+
+ for i in 1..1001 {
+ assert!(m.insert(i, i).is_none());
+ }
+
+ // remove backwards
+ for i in (1..1001).rev() {
+ assert!(m.remove(&i).is_some());
+
+ for j in i..1001 {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in 1..i {
+ assert!(m.contains_key(&j));
+ }
+ }
+ }
+}
+
+#[test]
+fn test_find_mut() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1, 12).is_none());
+ assert!(m.insert(2, 8).is_none());
+ assert!(m.insert(5, 14).is_none());
+ let new = 100;
+ match m.get_mut(&5) {
+ None => panic!(),
+ Some(x) => *x = new,
+ }
+ assert_eq!(m.get(&5), Some(&new));
+}
+
+#[test]
+fn test_insert_overwrite() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1, 2).is_none());
+ assert_eq!(*m.get(&1).unwrap(), 2);
+ assert!(!m.insert(1, 3).is_none());
+ assert_eq!(*m.get(&1).unwrap(), 3);
+}
+
+#[test]
+fn test_insert_conflicts() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1, 2).is_none());
+ assert!(m.insert(5, 3).is_none());
+ assert!(m.insert(9, 4).is_none());
+ assert_eq!(*m.get(&9).unwrap(), 4);
+ assert_eq!(*m.get(&5).unwrap(), 3);
+ assert_eq!(*m.get(&1).unwrap(), 2);
+}
+
+#[test]
+fn test_conflict_remove() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1, 2).is_none());
+ assert_eq!(*m.get(&1).unwrap(), 2);
+ assert!(m.insert(5, 3).is_none());
+ assert_eq!(*m.get(&1).unwrap(), 2);
+ assert_eq!(*m.get(&5).unwrap(), 3);
+ assert!(m.insert(9, 4).is_none());
+ assert_eq!(*m.get(&1).unwrap(), 2);
+ assert_eq!(*m.get(&5).unwrap(), 3);
+ assert_eq!(*m.get(&9).unwrap(), 4);
+ assert!(m.remove(&1).is_some());
+ assert_eq!(*m.get(&9).unwrap(), 4);
+ assert_eq!(*m.get(&5).unwrap(), 3);
+}
+
+#[test]
+fn test_is_empty() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1, 2).is_none());
+ assert!(!m.is_empty());
+ assert!(m.remove(&1).is_some());
+ assert!(m.is_empty());
+}
+
+#[test]
+fn test_remove() {
+ let mut m = HashMap::new();
+ m.insert(1, 2);
+ assert_eq!(m.remove(&1), Some(2));
+ assert_eq!(m.remove(&1), None);
+}
+
+#[test]
+fn test_remove_entry() {
+ let mut m = HashMap::new();
+ m.insert(1, 2);
+ assert_eq!(m.remove_entry(&1), Some((1, 2)));
+ assert_eq!(m.remove(&1), None);
+}
+
+#[test]
+fn test_iterate() {
+ let mut m = HashMap::with_capacity(4);
+ for i in 0..32 {
+ assert!(m.insert(i, i * 2).is_none());
+ }
+ assert_eq!(m.len(), 32);
+
+ let mut observed: u32 = 0;
+
+ for (k, v) in &m {
+ assert_eq!(*v, *k * 2);
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+}
+
+#[test]
+fn test_keys() {
+ let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = pairs.into_iter().collect();
+ let keys: Vec<_> = map.keys().cloned().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn test_values() {
+ let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = pairs.into_iter().collect();
+ let values: Vec<_> = map.values().cloned().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn test_values_mut() {
+ let pairs = [(1, 1), (2, 2), (3, 3)];
+ let mut map: HashMap<_, _> = pairs.into_iter().collect();
+ for value in map.values_mut() {
+ *value = (*value) * 2
+ }
+ let values: Vec<_> = map.values().cloned().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+}
+
+#[test]
+fn test_into_keys() {
+ let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = pairs.into_iter().collect();
+ let keys: Vec<_> = map.into_keys().collect();
+
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn test_into_values() {
+ let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = pairs.into_iter().collect();
+ let values: Vec<_> = map.into_values().collect();
+
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn test_find() {
+ let mut m = HashMap::new();
+ assert!(m.get(&1).is_none());
+ m.insert(1, 2);
+ match m.get(&1) {
+ None => panic!(),
+ Some(v) => assert_eq!(*v, 2),
+ }
+}
+
+#[test]
+fn test_eq() {
+ let mut m1 = HashMap::new();
+ m1.insert(1, 2);
+ m1.insert(2, 3);
+ m1.insert(3, 4);
+
+ let mut m2 = HashMap::new();
+ m2.insert(1, 2);
+ m2.insert(2, 3);
+
+ assert!(m1 != m2);
+
+ m2.insert(3, 4);
+
+ assert_eq!(m1, m2);
+}
+
+#[test]
+fn test_show() {
+ let mut map = HashMap::new();
+ let empty: HashMap<i32, i32> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(3, 4);
+
+ let map_str = format!("{map:?}");
+
+ assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}");
+ assert_eq!(format!("{empty:?}"), "{}");
+}
+
+#[test]
+fn test_reserve_shrink_to_fit() {
+ let mut m = HashMap::new();
+ m.insert(0, 0);
+ m.remove(&0);
+ assert!(m.capacity() >= m.len());
+ for i in 0..128 {
+ m.insert(i, i);
+ }
+ m.reserve(256);
+
+ let usable_cap = m.capacity();
+ for i in 128..(128 + 256) {
+ m.insert(i, i);
+ assert_eq!(m.capacity(), usable_cap);
+ }
+
+ for i in 100..(128 + 256) {
+ assert_eq!(m.remove(&i), Some(i));
+ }
+ m.shrink_to_fit();
+
+ assert_eq!(m.len(), 100);
+ assert!(!m.is_empty());
+ assert!(m.capacity() >= m.len());
+
+ for i in 0..100 {
+ assert_eq!(m.remove(&i), Some(i));
+ }
+ m.shrink_to_fit();
+ m.insert(0, 0);
+
+ assert_eq!(m.len(), 1);
+ assert!(m.capacity() >= m.len());
+ assert_eq!(m.remove(&0), Some(0));
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ for &(k, v) in &xs {
+ assert_eq!(map.get(&k), Some(&v));
+ }
+
+ assert_eq!(map.iter().len(), xs.len() - 1);
+}
+
+#[test]
+fn test_size_hint() {
+ let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let mut iter = map.iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+}
+
+#[test]
+fn test_iter_len() {
+ let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let mut iter = map.iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.len(), 3);
+}
+
+#[test]
+fn test_mut_size_hint() {
+ let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let mut iter = map.iter_mut();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+}
+
+#[test]
+fn test_iter_mut_len() {
+ let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let mut iter = map.iter_mut();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.len(), 3);
+}
+
+#[test]
+fn test_index() {
+ let mut map = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ assert_eq!(map[&2], 1);
+}
+
+#[test]
+#[should_panic]
+fn test_index_nonexistent() {
+ let mut map = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ map[&4];
+}
+
+#[test]
+fn test_entry() {
+ let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ // Existing key (insert)
+ match map.entry(1) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ assert_eq!(map.get(&1).unwrap(), &100);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.entry(2) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ let v = view.get_mut();
+ let new_v = (*v) * 10;
+ *v = new_v;
+ }
+ }
+ assert_eq!(map.get(&2).unwrap(), &200);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (take)
+ match map.entry(3) {
+ Vacant(_) => unreachable!(),
+ Occupied(view) => {
+ assert_eq!(view.remove(), 30);
+ }
+ }
+ assert_eq!(map.get(&3), None);
+ assert_eq!(map.len(), 5);
+
+ // Inexistent key (insert)
+ match map.entry(10) {
+ Occupied(_) => unreachable!(),
+ Vacant(view) => {
+ assert_eq!(*view.insert(1000), 1000);
+ }
+ }
+ assert_eq!(map.get(&10).unwrap(), &1000);
+ assert_eq!(map.len(), 6);
+}
+
+#[test]
+fn test_entry_take_doesnt_corrupt() {
+ #![allow(deprecated)] //rand
+ // Test for #19292
+ fn check(m: &HashMap<i32, ()>) {
+ for k in m.keys() {
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
+ }
+ }
+
+ let mut m = HashMap::new();
+ let mut rng = thread_rng();
+
+ // Populate the map with some items.
+ for _ in 0..50 {
+ let x = rng.gen_range(-10, 10);
+ m.insert(x, ());
+ }
+
+ for _ in 0..1000 {
+ let x = rng.gen_range(-10, 10);
+ match m.entry(x) {
+ Vacant(_) => {}
+ Occupied(e) => {
+ e.remove();
+ }
+ }
+
+ check(&m);
+ }
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = HashMap::new();
+ a.insert(1, "one");
+ let mut b = HashMap::new();
+ b.insert(2, "two");
+ b.insert(3, "three");
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 3);
+ assert_eq!(a[&1], "one");
+ assert_eq!(a[&2], "two");
+ assert_eq!(a[&3], "three");
+}
+
+#[test]
+fn test_capacity_not_less_than_len() {
+ let mut a = HashMap::new();
+ let mut item = 0;
+
+ for _ in 0..116 {
+ a.insert(item, 0);
+ item += 1;
+ }
+
+ assert!(a.capacity() > a.len());
+
+ let free = a.capacity() - a.len();
+ for _ in 0..free {
+ a.insert(item, 0);
+ item += 1;
+ }
+
+ assert_eq!(a.len(), a.capacity());
+
+ // Insert at capacity should cause allocation.
+ a.insert(item, 0);
+ assert!(a.capacity() > a.len());
+}
+
+#[test]
+fn test_occupied_entry_key() {
+ let mut a = HashMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+ assert!(a.is_empty());
+ a.insert(key, value);
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+
+ match a.entry(key) {
+ Vacant(_) => panic!(),
+ Occupied(e) => assert_eq!(key, *e.key()),
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+}
+
+#[test]
+fn test_vacant_entry_key() {
+ let mut a = HashMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+
+ assert!(a.is_empty());
+ match a.entry(key) {
+ Occupied(_) => panic!(),
+ Vacant(e) => {
+ assert_eq!(key, *e.key());
+ e.insert(value);
+ }
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+}
+
+#[test]
+fn test_retain() {
+ let mut map: HashMap<i32, i32> = (0..100).map(|x| (x, x * 10)).collect();
+
+ map.retain(|&k, _| k % 2 == 0);
+ assert_eq!(map.len(), 50);
+ assert_eq!(map[&2], 20);
+ assert_eq!(map[&4], 40);
+ assert_eq!(map[&6], 60);
+}
+
+#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve() {
+ let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
+
+ const MAX_USIZE: usize = usize::MAX;
+
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16).map_err(|e| e.kind()) {
+ } else {
+ // This may succeed if there is enough free memory. Attempt to
+ // allocate a few more hashmaps to ensure the allocation will fail.
+ let mut empty_bytes2: HashMap<u8, u8> = HashMap::new();
+ let _ = empty_bytes2.try_reserve(MAX_USIZE / 16);
+ let mut empty_bytes3: HashMap<u8, u8> = HashMap::new();
+ let _ = empty_bytes3.try_reserve(MAX_USIZE / 16);
+ let mut empty_bytes4: HashMap<u8, u8> = HashMap::new();
+ assert_matches!(
+ empty_bytes4.try_reserve(MAX_USIZE / 16).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "usize::MAX / 16 should trigger an OOM!"
+ );
+ }
+}
+
+#[test]
+fn test_raw_entry() {
+ use super::RawEntryMut::{Occupied, Vacant};
+
+ let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
+ use core::hash::{BuildHasher, Hash, Hasher};
+
+ let mut hasher = map.hasher().build_hasher();
+ k.hash(&mut hasher);
+ hasher.finish()
+ };
+
+ // Existing key (insert)
+ match map.raw_entry_mut().from_key(&1) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ let hash1 = compute_hash(&map, 1);
+ assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100));
+ assert_eq!(map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), (&1, &100));
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), (&1, &100));
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.raw_entry_mut().from_key(&2) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ let v = view.get_mut();
+ let new_v = (*v) * 10;
+ *v = new_v;
+ }
+ }
+ let hash2 = compute_hash(&map, 2);
+ assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200));
+ assert_eq!(map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), (&2, &200));
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), (&2, &200));
+ assert_eq!(map.len(), 6);
+
+ // Existing key (take)
+ let hash3 = compute_hash(&map, 3);
+ match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) {
+ Vacant(_) => unreachable!(),
+ Occupied(view) => {
+ assert_eq!(view.remove_entry(), (3, 30));
+ }
+ }
+ assert_eq!(map.raw_entry().from_key(&3), None);
+ assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None);
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None);
+ assert_eq!(map.len(), 5);
+
+ // Nonexistent key (insert)
+ match map.raw_entry_mut().from_key(&10) {
+ Occupied(_) => unreachable!(),
+ Vacant(view) => {
+ assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000));
+ }
+ }
+ assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000));
+ assert_eq!(map.len(), 6);
+
+ // Ensure all lookup methods produce equivalent results.
+ for k in 0..12 {
+ let hash = compute_hash(&map, k);
+ let v = map.get(&k).cloned();
+ let kv = v.as_ref().map(|v| (&k, v));
+
+ assert_eq!(map.raw_entry().from_key(&k), kv);
+ assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv);
+ assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv);
+
+ match map.raw_entry_mut().from_key(&k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ match map.raw_entry_mut().from_hash(hash, |q| *q == k) {
+ Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv),
+ Vacant(_) => assert_eq!(v, None),
+ }
+ }
+}
+
+mod test_drain_filter {
+ use super::*;
+
+ use crate::panic::{catch_unwind, AssertUnwindSafe};
+ use crate::sync::atomic::{AtomicUsize, Ordering};
+
+ trait EqSorted: Iterator {
+ fn eq_sorted<I: IntoIterator<Item = Self::Item>>(self, other: I) -> bool;
+ }
+
+ impl<T: Iterator> EqSorted for T
+ where
+ T::Item: Eq + Ord,
+ {
+ fn eq_sorted<I: IntoIterator<Item = Self::Item>>(self, other: I) -> bool {
+ let mut v: Vec<_> = self.collect();
+ v.sort_unstable();
+ v.into_iter().eq(other)
+ }
+ }
+
+ #[test]
+ fn empty() {
+ let mut map: HashMap<i32, i32> = HashMap::new();
+ map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+ assert!(map.is_empty());
+ }
+
+ #[test]
+ fn consuming_nothing() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map: HashMap<_, _> = pairs.collect();
+ assert!(map.drain_filter(|_, _| false).eq_sorted(crate::iter::empty()));
+ assert_eq!(map.len(), 3);
+ }
+
+ #[test]
+ fn consuming_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map: HashMap<_, _> = pairs.clone().collect();
+ assert!(map.drain_filter(|_, _| true).eq_sorted(pairs));
+ assert!(map.is_empty());
+ }
+
+ #[test]
+ fn mutating_and_keeping() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map: HashMap<_, _> = pairs.collect();
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ false
+ })
+ .eq_sorted(crate::iter::empty())
+ );
+ assert!(map.keys().copied().eq_sorted(0..3));
+ assert!(map.values().copied().eq_sorted(6..9));
+ }
+
+ #[test]
+ fn mutating_and_removing() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map: HashMap<_, _> = pairs.collect();
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ true
+ })
+ .eq_sorted((0..3).map(|i| (i, i + 6)))
+ );
+ assert!(map.is_empty());
+ }
+
+ #[test]
+ fn drop_panic_leak() {
+ static PREDS: AtomicUsize = AtomicUsize::new(0);
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct D;
+ impl Drop for D {
+ fn drop(&mut self) {
+ if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut map = (0..3).map(|i| (i, D)).collect::<HashMap<_, _>>();
+
+ catch_unwind(move || {
+ drop(map.drain_filter(|_, _| {
+ PREDS.fetch_add(1, Ordering::SeqCst);
+ true
+ }))
+ })
+ .unwrap_err();
+
+ assert_eq!(PREDS.load(Ordering::SeqCst), 3);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+ }
+
+ #[test]
+ fn pred_panic_leak() {
+ static PREDS: AtomicUsize = AtomicUsize::new(0);
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct D;
+ impl Drop for D {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let mut map = (0..3).map(|i| (i, D)).collect::<HashMap<_, _>>();
+
+ catch_unwind(AssertUnwindSafe(|| {
+ drop(map.drain_filter(|_, _| match PREDS.fetch_add(1, Ordering::SeqCst) {
+ 0 => true,
+ _ => panic!(),
+ }))
+ }))
+ .unwrap_err();
+
+ assert_eq!(PREDS.load(Ordering::SeqCst), 2);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+ assert_eq!(map.len(), 2);
+ }
+
+ // Same as above, but attempt to use the iterator again after the panic in the predicate
+ #[test]
+ fn pred_panic_reuse() {
+ static PREDS: AtomicUsize = AtomicUsize::new(0);
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct D;
+ impl Drop for D {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let mut map = (0..3).map(|i| (i, D)).collect::<HashMap<_, _>>();
+
+ {
+ let mut it = map.drain_filter(|_, _| match PREDS.fetch_add(1, Ordering::SeqCst) {
+ 0 => true,
+ _ => panic!(),
+ });
+ catch_unwind(AssertUnwindSafe(|| while it.next().is_some() {})).unwrap_err();
+ // Iterator behaviour after a panic is explicitly unspecified,
+ // so this is just the current implementation:
+ let result = catch_unwind(AssertUnwindSafe(|| it.next()));
+ assert!(result.is_err());
+ }
+
+ assert_eq!(PREDS.load(Ordering::SeqCst), 3);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+ assert_eq!(map.len(), 2);
+ }
+}
+
+#[test]
+fn from_array() {
+ let map = HashMap::from([(1, 2), (3, 4)]);
+ let unordered_duplicates = HashMap::from([(3, 4), (1, 2), (1, 2)]);
+ assert_eq!(map, unordered_duplicates);
+
+ // This next line must infer the hasher type parameter.
+ // If you make a change that causes this line to no longer infer,
+ // that's a problem!
+ let _must_not_require_type_annotation = HashMap::from([(1, 2)]);
+}
diff --git a/library/std/src/collections/hash/mod.rs b/library/std/src/collections/hash/mod.rs
new file mode 100644
index 000000000..348820af5
--- /dev/null
+++ b/library/std/src/collections/hash/mod.rs
@@ -0,0 +1,4 @@
+//! Unordered containers, implemented as hash-tables
+
+pub mod map;
+pub mod set;
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
new file mode 100644
index 000000000..abff82788
--- /dev/null
+++ b/library/std/src/collections/hash/set.rs
@@ -0,0 +1,1844 @@
+#[cfg(test)]
+mod tests;
+
+use hashbrown::hash_set as base;
+
+use crate::borrow::Borrow;
+use crate::collections::TryReserveError;
+use crate::fmt;
+use crate::hash::{BuildHasher, Hash};
+use crate::iter::{Chain, FusedIterator};
+use crate::ops::{BitAnd, BitOr, BitXor, Sub};
+
+use super::map::{map_try_reserve_error, RandomState};
+
+// Future Optimization (FIXME!)
+// ============================
+//
+// Iteration over zero sized values is a noop. There is no need
+// for `bucket.val` in the case of HashSet. I suppose we would need HKT
+// to get rid of it properly.
+
+/// A [hash set] implemented as a `HashMap` where the value is `()`.
+///
+/// As with the [`HashMap`] type, a `HashSet` requires that the elements
+/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by
+/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself,
+/// it is important that the following property holds:
+///
+/// ```text
+/// k1 == k2 -> hash(k1) == hash(k2)
+/// ```
+///
+/// In other words, if two keys are equal, their hashes must be equal.
+///
+///
+/// It is a logic error for a key to be modified in such a way that the key's
+/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
+/// the [`Eq`] trait, changes while it is in the map. This is normally only
+/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `HashSet` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `HashSet<String>` in this example).
+/// let mut books = HashSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons".to_string());
+/// books.insert("To Kill a Mockingbird".to_string());
+/// books.insert("The Odyssey".to_string());
+/// books.insert("The Great Gatsby".to_string());
+///
+/// // Check for a specific one.
+/// if !books.contains("The Winds of Winter") {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove("The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in &books {
+/// println!("{book}");
+/// }
+/// ```
+///
+/// The easiest way to use `HashSet` with a custom type is to derive
+/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`], this will in the
+/// future be implied by [`Eq`].
+///
+/// ```
+/// use std::collections::HashSet;
+/// #[derive(Hash, Eq, PartialEq, Debug)]
+/// struct Viking {
+/// name: String,
+/// power: usize,
+/// }
+///
+/// let mut vikings = HashSet::new();
+///
+/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 });
+/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 });
+/// vikings.insert(Viking { name: "Olaf".to_string(), power: 4 });
+/// vikings.insert(Viking { name: "Harald".to_string(), power: 8 });
+///
+/// // Use derived implementation to print the vikings.
+/// for x in &vikings {
+/// println!("{x:?}");
+/// }
+/// ```
+///
+/// A `HashSet` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let viking_names = HashSet::from(["Einar", "Olaf", "Harald"]);
+/// ```
+///
+/// [hash set]: crate::collections#use-the-set-variant-of-any-of-these-maps-when
+/// [`HashMap`]: crate::collections::HashMap
+/// [`RefCell`]: crate::cell::RefCell
+/// [`Cell`]: crate::cell::Cell
+#[cfg_attr(not(test), rustc_diagnostic_item = "HashSet")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct HashSet<T, S = RandomState> {
+ base: base::HashSet<T, S>,
+}
+
+impl<T> HashSet<T, RandomState> {
+ /// Creates an empty `HashSet`.
+ ///
+ /// The hash set is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let set: HashSet<i32> = HashSet::new();
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new() -> HashSet<T, RandomState> {
+ Default::default()
+ }
+
+ /// Creates an empty `HashSet` with at least the specified capacity.
+ ///
+ /// The hash set will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the hash set will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let set: HashSet<i32> = HashSet::with_capacity(10);
+ /// assert!(set.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize) -> HashSet<T, RandomState> {
+ HashSet { base: base::HashSet::with_capacity_and_hasher(capacity, Default::default()) }
+ }
+}
+
+impl<T, S> HashSet<T, S> {
+ /// Returns the number of elements the set can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let set: HashSet<i32> = HashSet::with_capacity(100);
+ /// assert!(set.capacity() >= 100);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.base.capacity()
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// The iterator element type is `&'a T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a");
+ /// set.insert("b");
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in set.iter() {
+ /// println!("{x}");
+ /// }
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, iterating over set takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { base: self.base.iter() }
+ }
+
+ /// Returns the number of elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut v = HashSet::new();
+ /// assert_eq!(v.len(), 0);
+ /// v.insert(1);
+ /// assert_eq!(v.len(), 1);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.base.len()
+ }
+
+ /// Returns `true` if the set contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut v = HashSet::new();
+ /// assert!(v.is_empty());
+ /// v.insert(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.base.is_empty()
+ }
+
+ /// Clears the set, returning all elements as an iterator. Keeps the
+ /// allocated memory for reuse.
+ ///
+ /// If the returned iterator is dropped before being fully consumed, it
+ /// drops the remaining elements. The returned iterator keeps a mutable
+ /// borrow on the vector to optimize its implementation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::from([1, 2, 3]);
+ /// assert!(!set.is_empty());
+ ///
+ /// // print 1, 2, 3 in an arbitrary order
+ /// for i in set.drain() {
+ /// println!("{i}");
+ /// }
+ ///
+ /// assert!(set.is_empty());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, T> {
+ Drain { base: self.base.drain() }
+ }
+
+ /// Creates an iterator which uses a closure to determine if a value should be removed.
+ ///
+ /// If the closure returns true, then the value is removed and yielded.
+ /// If the closure returns false, the value will remain in the list and will not be yielded
+ /// by the iterator.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each of the remaining
+ /// values will still be subjected to the closure and removed and dropped if it returns true.
+ ///
+ /// It is unspecified how many more values will be subjected to the closure
+ /// if a panic occurs in the closure, or if a panic occurs while dropping a value, or if the
+ /// `DrainFilter` itself is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a set into even and odd values, reusing the original set:
+ ///
+ /// ```
+ /// #![feature(hash_drain_filter)]
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set: HashSet<i32> = (0..8).collect();
+ /// let drained: HashSet<i32> = set.drain_filter(|v| v % 2 == 0).collect();
+ ///
+ /// let mut evens = drained.into_iter().collect::<Vec<_>>();
+ /// let mut odds = set.into_iter().collect::<Vec<_>>();
+ /// evens.sort();
+ /// odds.sort();
+ ///
+ /// assert_eq!(evens, vec![0, 2, 4, 6]);
+ /// assert_eq!(odds, vec![1, 3, 5, 7]);
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[unstable(feature = "hash_drain_filter", issue = "59618")]
+ pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ DrainFilter { base: self.base.drain_filter(pred) }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::from([1, 2, 3, 4, 5, 6]);
+ /// set.retain(|&k| k % 2 == 0);
+ /// assert_eq!(set.len(), 3);
+ /// ```
+ ///
+ /// # Performance
+ ///
+ /// In the current implementation, this operation takes O(capacity) time
+ /// instead of O(len) because it internally visits empty buckets too.
+ #[rustc_lint_query_instability]
+ #[stable(feature = "retain_hash_collection", since = "1.18.0")]
+ pub fn retain<F>(&mut self, f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.base.retain(f)
+ }
+
+ /// Clears the set, removing all values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut v = HashSet::new();
+ /// v.insert(1);
+ /// v.clear();
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.base.clear()
+ }
+
+ /// Creates a new empty hash set which will use the given hasher to hash
+ /// keys.
+ ///
+ /// The hash set is also created with the default initial capacity.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow `HashSet`s to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashMap to be useful, see its documentation for details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// let mut set = HashSet::with_hasher(s);
+ /// set.insert(2);
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn with_hasher(hasher: S) -> HashSet<T, S> {
+ HashSet { base: base::HashSet::with_hasher(hasher) }
+ }
+
+ /// Creates an empty `HashSet` with at least the specified capacity, using
+ /// `hasher` to hash the keys.
+ ///
+ /// The hash set will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the hash set will not allocate.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow `HashSet`s to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashMap to be useful, see its documentation for details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let s = RandomState::new();
+ /// let mut set = HashSet::with_capacity_and_hasher(10, s);
+ /// set.insert(1);
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
+ pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> HashSet<T, S> {
+ HashSet { base: base::HashSet::with_capacity_and_hasher(capacity, hasher) }
+ }
+
+ /// Returns a reference to the set's [`BuildHasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::collections::hash_map::RandomState;
+ ///
+ /// let hasher = RandomState::new();
+ /// let set: HashSet<i32> = HashSet::with_hasher(hasher);
+ /// let hasher: &RandomState = set.hasher();
+ /// ```
+ #[inline]
+ #[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
+ pub fn hasher(&self) -> &S {
+ self.base.hasher()
+ }
+}
+
+impl<T, S> HashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the `HashSet`. The collection may reserve more space to speculatively
+ /// avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new allocation size overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<i32> = HashSet::new();
+ /// set.reserve(10);
+ /// assert!(set.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.base.reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the `HashSet`. The collection may reserve more space to speculatively
+ /// avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional` if
+ /// it returns `Ok(())`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<i32> = HashSet::new();
+ /// set.try_reserve(10).expect("why is the test harness OOMing on a handful of bytes?");
+ /// ```
+ #[inline]
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.base.try_reserve(additional).map_err(map_try_reserve_error)
+ }
+
+ /// Shrinks the capacity of the set as much as possible. It will drop
+ /// down as much as possible while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::with_capacity(100);
+ /// set.insert(1);
+ /// set.insert(2);
+ /// assert!(set.capacity() >= 100);
+ /// set.shrink_to_fit();
+ /// assert!(set.capacity() >= 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.base.shrink_to_fit()
+ }
+
+ /// Shrinks the capacity of the set with a lower limit. It will drop
+ /// down no lower than the supplied limit while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::with_capacity(100);
+ /// set.insert(1);
+ /// set.insert(2);
+ /// assert!(set.capacity() >= 100);
+ /// set.shrink_to(10);
+ /// assert!(set.capacity() >= 10);
+ /// set.shrink_to(0);
+ /// assert!(set.capacity() >= 2);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.base.shrink_to(min_capacity)
+ }
+
+ /// Visits the values representing the difference,
+ /// i.e., the values that are in `self` but not in `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([4, 2, 3, 4]);
+ ///
+ /// // Can be seen as `a - b`.
+ /// for x in a.difference(&b) {
+ /// println!("{x}"); // Print 1
+ /// }
+ ///
+ /// let diff: HashSet<_> = a.difference(&b).collect();
+ /// assert_eq!(diff, [1].iter().collect());
+ ///
+ /// // Note that difference is not symmetric,
+ /// // and `b - a` means something else:
+ /// let diff: HashSet<_> = b.difference(&a).collect();
+ /// assert_eq!(diff, [4].iter().collect());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn difference<'a>(&'a self, other: &'a HashSet<T, S>) -> Difference<'a, T, S> {
+ Difference { iter: self.iter(), other }
+ }
+
+ /// Visits the values representing the symmetric difference,
+ /// i.e., the values that are in `self` or in `other` but not in both.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([4, 2, 3, 4]);
+ ///
+ /// // Print 1, 4 in arbitrary order.
+ /// for x in a.symmetric_difference(&b) {
+ /// println!("{x}");
+ /// }
+ ///
+ /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect();
+ /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect();
+ ///
+ /// assert_eq!(diff1, diff2);
+ /// assert_eq!(diff1, [1, 4].iter().collect());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn symmetric_difference<'a>(
+ &'a self,
+ other: &'a HashSet<T, S>,
+ ) -> SymmetricDifference<'a, T, S> {
+ SymmetricDifference { iter: self.difference(other).chain(other.difference(self)) }
+ }
+
+ /// Visits the values representing the intersection,
+ /// i.e., the values that are both in `self` and `other`.
+ ///
+ /// When an equal element is present in `self` and `other`
+ /// then the resulting `Intersection` may yield references to
+ /// one or the other. This can be relevant if `T` contains fields which
+ /// are not compared by its `Eq` implementation, and may hold different
+ /// value between the two equal copies of `T` in the two sets.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([4, 2, 3, 4]);
+ ///
+ /// // Print 2, 3 in arbitrary order.
+ /// for x in a.intersection(&b) {
+ /// println!("{x}");
+ /// }
+ ///
+ /// let intersection: HashSet<_> = a.intersection(&b).collect();
+ /// assert_eq!(intersection, [2, 3].iter().collect());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn intersection<'a>(&'a self, other: &'a HashSet<T, S>) -> Intersection<'a, T, S> {
+ if self.len() <= other.len() {
+ Intersection { iter: self.iter(), other }
+ } else {
+ Intersection { iter: other.iter(), other: self }
+ }
+ }
+
+ /// Visits the values representing the union,
+ /// i.e., all the values in `self` or `other`, without duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([4, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order.
+ /// for x in a.union(&b) {
+ /// println!("{x}");
+ /// }
+ ///
+ /// let union: HashSet<_> = a.union(&b).collect();
+ /// assert_eq!(union, [1, 2, 3, 4].iter().collect());
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn union<'a>(&'a self, other: &'a HashSet<T, S>) -> Union<'a, T, S> {
+ if self.len() >= other.len() {
+ Union { iter: self.iter().chain(other.difference(self)) }
+ } else {
+ Union { iter: other.iter().chain(self.difference(other)) }
+ }
+ }
+
+ /// Returns `true` if the set contains a value.
+ ///
+ /// The value may be any borrowed form of the set's value type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the value type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let set = HashSet::from([1, 2, 3]);
+ /// assert_eq!(set.contains(&1), true);
+ /// assert_eq!(set.contains(&4), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.contains(value)
+ }
+
+ /// Returns a reference to the value in the set, if any, that is equal to the given value.
+ ///
+ /// The value may be any borrowed form of the set's value type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the value type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let set = HashSet::from([1, 2, 3]);
+ /// assert_eq!(set.get(&2), Some(&2));
+ /// assert_eq!(set.get(&4), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.get(value)
+ }
+
+ /// Inserts the given `value` into the set if it is not present, then
+ /// returns a reference to the value in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_set_entry)]
+ ///
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::from([1, 2, 3]);
+ /// assert_eq!(set.len(), 3);
+ /// assert_eq!(set.get_or_insert(2), &2);
+ /// assert_eq!(set.get_or_insert(100), &100);
+ /// assert_eq!(set.len(), 4); // 100 was inserted
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_set_entry", issue = "60896")]
+ pub fn get_or_insert(&mut self, value: T) -> &T {
+ // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
+ // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`.
+ self.base.get_or_insert(value)
+ }
+
+ /// Inserts an owned copy of the given `value` into the set if it is not
+ /// present, then returns a reference to the value in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_set_entry)]
+ ///
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set: HashSet<String> = ["cat", "dog", "horse"]
+ /// .iter().map(|&pet| pet.to_owned()).collect();
+ ///
+ /// assert_eq!(set.len(), 3);
+ /// for &pet in &["cat", "dog", "fish"] {
+ /// let value = set.get_or_insert_owned(pet);
+ /// assert_eq!(value, pet);
+ /// }
+ /// assert_eq!(set.len(), 4); // a new "fish" was inserted
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_set_entry", issue = "60896")]
+ pub fn get_or_insert_owned<Q: ?Sized>(&mut self, value: &Q) -> &T
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq + ToOwned<Owned = T>,
+ {
+ // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
+ // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`.
+ self.base.get_or_insert_owned(value)
+ }
+
+ /// Inserts a value computed from `f` into the set if the given `value` is
+ /// not present, then returns a reference to the value in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(hash_set_entry)]
+ ///
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set: HashSet<String> = ["cat", "dog", "horse"]
+ /// .iter().map(|&pet| pet.to_owned()).collect();
+ ///
+ /// assert_eq!(set.len(), 3);
+ /// for &pet in &["cat", "dog", "fish"] {
+ /// let value = set.get_or_insert_with(pet, str::to_owned);
+ /// assert_eq!(value, pet);
+ /// }
+ /// assert_eq!(set.len(), 4); // a new "fish" was inserted
+ /// ```
+ #[inline]
+ #[unstable(feature = "hash_set_entry", issue = "60896")]
+ pub fn get_or_insert_with<Q: ?Sized, F>(&mut self, value: &Q, f: F) -> &T
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq,
+ F: FnOnce(&Q) -> T,
+ {
+ // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
+ // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`.
+ self.base.get_or_insert_with(value, f)
+ }
+
+ /// Returns `true` if `self` has no elements in common with `other`.
+ /// This is equivalent to checking for an empty intersection.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let mut b = HashSet::new();
+ ///
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(4);
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(1);
+ /// assert_eq!(a.is_disjoint(&b), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_disjoint(&self, other: &HashSet<T, S>) -> bool {
+ if self.len() <= other.len() {
+ self.iter().all(|v| !other.contains(v))
+ } else {
+ other.iter().all(|v| !self.contains(v))
+ }
+ }
+
+ /// Returns `true` if the set is a subset of another,
+ /// i.e., `other` contains at least all the values in `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let sup = HashSet::from([1, 2, 3]);
+ /// let mut set = HashSet::new();
+ ///
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(2);
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(4);
+ /// assert_eq!(set.is_subset(&sup), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_subset(&self, other: &HashSet<T, S>) -> bool {
+ if self.len() <= other.len() { self.iter().all(|v| other.contains(v)) } else { false }
+ }
+
+ /// Returns `true` if the set is a superset of another,
+ /// i.e., `self` contains at least all the values in `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let sub = HashSet::from([1, 2]);
+ /// let mut set = HashSet::new();
+ ///
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(0);
+ /// set.insert(1);
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.is_superset(&sub), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_superset(&self, other: &HashSet<T, S>) -> bool {
+ other.is_subset(self)
+ }
+
+ /// Adds a value to the set.
+ ///
+ /// Returns whether the value was newly inserted. That is:
+ ///
+ /// - If the set did not previously contain this value, `true` is returned.
+ /// - If the set already contained this value, `false` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::new();
+ ///
+ /// assert_eq!(set.insert(2), true);
+ /// assert_eq!(set.insert(2), false);
+ /// assert_eq!(set.len(), 1);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: T) -> bool {
+ self.base.insert(value)
+ }
+
+ /// Adds a value to the set, replacing the existing value, if any, that is equal to the given
+ /// one. Returns the replaced value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::new();
+ /// set.insert(Vec::<i32>::new());
+ ///
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0);
+ /// set.replace(Vec::with_capacity(10));
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ self.base.replace(value)
+ }
+
+ /// Removes a value from the set. Returns whether the value was
+ /// present in the set.
+ ///
+ /// The value may be any borrowed form of the set's value type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the value type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::new();
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.remove(&2), true);
+ /// assert_eq!(set.remove(&2), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.remove(value)
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the given one.
+ ///
+ /// The value may be any borrowed form of the set's value type, but
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
+ /// the value type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let mut set = HashSet::from([1, 2, 3]);
+ /// assert_eq!(set.take(&2), Some(2));
+ /// assert_eq!(set.take(&2), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ T: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.base.take(value)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for HashSet<T, S>
+where
+ T: Clone,
+ S: Clone,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Self { base: self.base.clone() }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.base.clone_from(&other.base);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> PartialEq for HashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn eq(&self, other: &HashSet<T, S>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter().all(|key| other.contains(key))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Eq for HashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> fmt::Debug for HashSet<T, S>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> FromIterator<T> for HashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher + Default,
+{
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> HashSet<T, S> {
+ let mut set = HashSet::with_hasher(Default::default());
+ set.extend(iter);
+ set
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+// Note: as what is currently the most convenient built-in way to construct
+// a HashSet, a simple usage of this function must not *require* the user
+// to provide a type annotation in order to infer the third type parameter
+// (the hasher parameter, conventionally "S").
+// To that end, this impl is defined using RandomState as the concrete
+// type of S, rather than being generic over `S: BuildHasher + Default`.
+// It is expected that users who want to specify a hasher will manually use
+// `with_capacity_and_hasher`.
+// If type parameter defaults worked on impls, and if type parameter
+// defaults could be mixed with const generics, then perhaps
+// this could be generalized.
+// See also the equivalent impl on HashMap.
+impl<T, const N: usize> From<[T; N]> for HashSet<T, RandomState>
+where
+ T: Eq + Hash,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let set1 = HashSet::from([1, 2, 3, 4]);
+ /// let set2: HashSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Extend<T> for HashSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ self.base.extend(iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.base.insert(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.base.extend_reserve(additional);
+ }
+}
+
+#[stable(feature = "hash_extend_copy", since = "1.4.0")]
+impl<'a, T, S> Extend<&'a T> for HashSet<T, S>
+where
+ T: 'a + Eq + Hash + Copy,
+ S: BuildHasher,
+{
+ #[inline]
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.base.insert(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ Extend::<T>::extend_reserve(self, additional)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Default for HashSet<T, S>
+where
+ S: Default,
+{
+ /// Creates an empty `HashSet<T, S>` with the `Default` value for the hasher.
+ #[inline]
+ fn default() -> HashSet<T, S> {
+ HashSet { base: Default::default() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> BitOr<&HashSet<T, S>> for &HashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = HashSet<T, S>;
+
+ /// Returns the union of `self` and `rhs` as a new `HashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([3, 4, 5]);
+ ///
+ /// let set = &a | &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2, 3, 4, 5];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitor(self, rhs: &HashSet<T, S>) -> HashSet<T, S> {
+ self.union(rhs).cloned().collect()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> BitAnd<&HashSet<T, S>> for &HashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = HashSet<T, S>;
+
+ /// Returns the intersection of `self` and `rhs` as a new `HashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([2, 3, 4]);
+ ///
+ /// let set = &a & &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [2, 3];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitand(self, rhs: &HashSet<T, S>) -> HashSet<T, S> {
+ self.intersection(rhs).cloned().collect()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> BitXor<&HashSet<T, S>> for &HashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = HashSet<T, S>;
+
+ /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([3, 4, 5]);
+ ///
+ /// let set = &a ^ &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2, 4, 5];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn bitxor(self, rhs: &HashSet<T, S>) -> HashSet<T, S> {
+ self.symmetric_difference(rhs).cloned().collect()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Sub<&HashSet<T, S>> for &HashSet<T, S>
+where
+ T: Eq + Hash + Clone,
+ S: BuildHasher + Default,
+{
+ type Output = HashSet<T, S>;
+
+ /// Returns the difference of `self` and `rhs` as a new `HashSet<T, S>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ ///
+ /// let a = HashSet::from([1, 2, 3]);
+ /// let b = HashSet::from([3, 4, 5]);
+ ///
+ /// let set = &a - &b;
+ ///
+ /// let mut i = 0;
+ /// let expected = [1, 2];
+ /// for x in &set {
+ /// assert!(expected.contains(x));
+ /// i += 1;
+ /// }
+ /// assert_eq!(i, expected.len());
+ /// ```
+ fn sub(self, rhs: &HashSet<T, S>) -> HashSet<T, S> {
+ self.difference(rhs).cloned().collect()
+ }
+}
+
+/// An iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`iter`]: HashSet::iter
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+///
+/// let mut iter = a.iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, K: 'a> {
+ base: base::Iter<'a, K>,
+}
+
+/// An owning iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`HashSet`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: IntoIterator::into_iter
+/// [`IntoIterator`]: crate::iter::IntoIterator
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+///
+/// let mut iter = a.into_iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<K> {
+ base: base::IntoIter<K>,
+}
+
+/// A draining iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`drain`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`drain`]: HashSet::drain
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let mut a = HashSet::from([1, 2, 3]);
+///
+/// let mut drain = a.drain();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Drain<'a, K: 'a> {
+ base: base::Drain<'a, K>,
+}
+
+/// A draining, filtering iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`drain_filter`] method on [`HashSet`].
+///
+/// [`drain_filter`]: HashSet::drain_filter
+///
+/// # Examples
+///
+/// ```
+/// #![feature(hash_drain_filter)]
+///
+/// use std::collections::HashSet;
+///
+/// let mut a = HashSet::from([1, 2, 3]);
+///
+/// let mut drain_filtered = a.drain_filter(|v| v % 2 == 0);
+/// ```
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+pub struct DrainFilter<'a, K, F>
+where
+ F: FnMut(&K) -> bool,
+{
+ base: base::DrainFilter<'a, K, F>,
+}
+
+/// A lazy iterator producing elements in the intersection of `HashSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`intersection`]: HashSet::intersection
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+/// let b = HashSet::from([4, 2, 3, 4]);
+///
+/// let mut intersection = a.intersection(&b);
+/// ```
+#[must_use = "this returns the intersection as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Intersection<'a, T: 'a, S: 'a> {
+ // iterator of the first set
+ iter: Iter<'a, T>,
+ // the second set
+ other: &'a HashSet<T, S>,
+}
+
+/// A lazy iterator producing elements in the difference of `HashSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`difference`]: HashSet::difference
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+/// let b = HashSet::from([4, 2, 3, 4]);
+///
+/// let mut difference = a.difference(&b);
+/// ```
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Difference<'a, T: 'a, S: 'a> {
+ // iterator of the first set
+ iter: Iter<'a, T>,
+ // the second set
+ other: &'a HashSet<T, S>,
+}
+
+/// A lazy iterator producing elements in the symmetric difference of `HashSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`HashSet`]. See its documentation for more.
+///
+/// [`symmetric_difference`]: HashSet::symmetric_difference
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+/// let b = HashSet::from([4, 2, 3, 4]);
+///
+/// let mut intersection = a.symmetric_difference(&b);
+/// ```
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SymmetricDifference<'a, T: 'a, S: 'a> {
+ iter: Chain<Difference<'a, T, S>, Difference<'a, T, S>>,
+}
+
+/// A lazy iterator producing elements in the union of `HashSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`union`]: HashSet::union
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// let a = HashSet::from([1, 2, 3]);
+/// let b = HashSet::from([4, 2, 3, 4]);
+///
+/// let mut union_iter = a.union(&b);
+/// ```
+#[must_use = "this returns the union as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Union<'a, T: 'a, S: 'a> {
+ iter: Chain<Iter<'a, T>, Difference<'a, T, S>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, S> IntoIterator for &'a HashSet<T, S> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ #[inline]
+ #[rustc_lint_query_instability]
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> IntoIterator for HashSet<T, S> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out
+ /// of the set in arbitrary order. The set cannot be used after calling
+ /// this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a".to_string());
+ /// set.insert("b".to_string());
+ ///
+ /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
+ /// let v: Vec<String> = set.into_iter().collect();
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in &v {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[inline]
+ #[rustc_lint_query_instability]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { base: self.base.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K> Clone for Iter<'_, K> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { base: self.base.clone() }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K> Iterator for Iter<'a, K> {
+ type Item = &'a K;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a K> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K> ExactSizeIterator for Iter<'_, K> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K> FusedIterator for Iter<'_, K> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: fmt::Debug> fmt::Debug for Iter<'_, K> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K> Iterator for IntoIter<K> {
+ type Item = K;
+
+ #[inline]
+ fn next(&mut self) -> Option<K> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K> ExactSizeIterator for IntoIter<K> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K> FusedIterator for IntoIter<K> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: fmt::Debug> fmt::Debug for IntoIter<K> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.base, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K> Iterator for Drain<'a, K> {
+ type Item = K;
+
+ #[inline]
+ fn next(&mut self) -> Option<K> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K> ExactSizeIterator for Drain<'_, K> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.base.len()
+ }
+}
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K> FusedIterator for Drain<'_, K> {}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<K: fmt::Debug> fmt::Debug for Drain<'_, K> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.base, f)
+ }
+}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<K, F> Iterator for DrainFilter<'_, K, F>
+where
+ F: FnMut(&K) -> bool,
+{
+ type Item = K;
+
+ #[inline]
+ fn next(&mut self) -> Option<K> {
+ self.base.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.base.size_hint()
+ }
+}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<K, F> FusedIterator for DrainFilter<'_, K, F> where F: FnMut(&K) -> bool {}
+
+#[unstable(feature = "hash_drain_filter", issue = "59618")]
+impl<'a, K, F> fmt::Debug for DrainFilter<'a, K, F>
+where
+ F: FnMut(&K) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DrainFilter").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for Intersection<'_, T, S> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Intersection { iter: self.iter.clone(), ..*self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, S> Iterator for Intersection<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ loop {
+ let elt = self.iter.next()?;
+ if self.other.contains(elt) {
+ return Some(elt);
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T, S> fmt::Debug for Intersection<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, S> FusedIterator for Intersection<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for Difference<'_, T, S> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Difference { iter: self.iter.clone(), ..*self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, S> Iterator for Difference<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ loop {
+ let elt = self.iter.next()?;
+ if !self.other.contains(elt) {
+ return Some(elt);
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, S> FusedIterator for Difference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T, S> fmt::Debug for Difference<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for SymmetricDifference<'_, T, S> {
+ #[inline]
+ fn clone(&self) -> Self {
+ SymmetricDifference { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, S> Iterator for SymmetricDifference<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, S> FusedIterator for SymmetricDifference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T, S> fmt::Debug for SymmetricDifference<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, S> Clone for Union<'_, T, S> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Union { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, S> FusedIterator for Union<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T, S> fmt::Debug for Union<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, S> Iterator for Union<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> {
+ v
+ }
+ fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
+ v
+ }
+ fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> {
+ v
+ }
+ fn difference<'a, 'new>(
+ v: Difference<'a, &'static str, RandomState>,
+ ) -> Difference<'a, &'new str, RandomState> {
+ v
+ }
+ fn symmetric_difference<'a, 'new>(
+ v: SymmetricDifference<'a, &'static str, RandomState>,
+ ) -> SymmetricDifference<'a, &'new str, RandomState> {
+ v
+ }
+ fn intersection<'a, 'new>(
+ v: Intersection<'a, &'static str, RandomState>,
+ ) -> Intersection<'a, &'new str, RandomState> {
+ v
+ }
+ fn union<'a, 'new>(
+ v: Union<'a, &'static str, RandomState>,
+ ) -> Union<'a, &'new str, RandomState> {
+ v
+ }
+ fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
+ d
+ }
+}
diff --git a/library/std/src/collections/hash/set/tests.rs b/library/std/src/collections/hash/set/tests.rs
new file mode 100644
index 000000000..233db276b
--- /dev/null
+++ b/library/std/src/collections/hash/set/tests.rs
@@ -0,0 +1,498 @@
+use super::super::map::RandomState;
+use super::HashSet;
+
+use crate::panic::{catch_unwind, AssertUnwindSafe};
+use crate::sync::atomic::{AtomicU32, Ordering};
+
+#[test]
+fn test_zero_capacities() {
+ type HS = HashSet<i32>;
+
+ let s = HS::new();
+ assert_eq!(s.capacity(), 0);
+
+ let s = HS::default();
+ assert_eq!(s.capacity(), 0);
+
+ let s = HS::with_hasher(RandomState::new());
+ assert_eq!(s.capacity(), 0);
+
+ let s = HS::with_capacity(0);
+ assert_eq!(s.capacity(), 0);
+
+ let s = HS::with_capacity_and_hasher(0, RandomState::new());
+ assert_eq!(s.capacity(), 0);
+
+ let mut s = HS::new();
+ s.insert(1);
+ s.insert(2);
+ s.remove(&1);
+ s.remove(&2);
+ s.shrink_to_fit();
+ assert_eq!(s.capacity(), 0);
+
+ let mut s = HS::new();
+ s.reserve(0);
+ assert_eq!(s.capacity(), 0);
+}
+
+#[test]
+fn test_disjoint() {
+ let mut xs = HashSet::new();
+ let mut ys = HashSet::new();
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(5));
+ assert!(ys.insert(11));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(7));
+ assert!(xs.insert(19));
+ assert!(xs.insert(4));
+ assert!(ys.insert(2));
+ assert!(ys.insert(-11));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(ys.insert(7));
+ assert!(!xs.is_disjoint(&ys));
+ assert!(!ys.is_disjoint(&xs));
+}
+
+#[test]
+fn test_subset_and_superset() {
+ let mut a = HashSet::new();
+ assert!(a.insert(0));
+ assert!(a.insert(5));
+ assert!(a.insert(11));
+ assert!(a.insert(7));
+
+ let mut b = HashSet::new();
+ assert!(b.insert(0));
+ assert!(b.insert(7));
+ assert!(b.insert(19));
+ assert!(b.insert(250));
+ assert!(b.insert(11));
+ assert!(b.insert(200));
+
+ assert!(!a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(!b.is_superset(&a));
+
+ assert!(b.insert(5));
+
+ assert!(a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(b.is_superset(&a));
+}
+
+#[test]
+fn test_iterate() {
+ let mut a = HashSet::new();
+ for i in 0..32 {
+ assert!(a.insert(i));
+ }
+ let mut observed: u32 = 0;
+ for k in &a {
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+}
+
+#[test]
+fn test_intersection() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+ assert!(a.intersection(&b).next().is_none());
+
+ assert!(a.insert(11));
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(77));
+ assert!(a.insert(103));
+ assert!(a.insert(5));
+ assert!(a.insert(-5));
+
+ assert!(b.insert(2));
+ assert!(b.insert(11));
+ assert!(b.insert(77));
+ assert!(b.insert(-9));
+ assert!(b.insert(-42));
+ assert!(b.insert(5));
+ assert!(b.insert(3));
+
+ let mut i = 0;
+ let expected = [3, 5, 11, 77];
+ for x in a.intersection(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+
+ assert!(a.insert(9)); // make a bigger than b
+
+ i = 0;
+ for x in a.intersection(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+
+ i = 0;
+ for x in b.intersection(&a) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(3));
+ assert!(b.insert(9));
+
+ let mut i = 0;
+ let expected = [1, 5, 11];
+ for x in a.difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_symmetric_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(-2));
+ assert!(b.insert(3));
+ assert!(b.insert(9));
+ assert!(b.insert(14));
+ assert!(b.insert(22));
+
+ let mut i = 0;
+ let expected = [-2, 1, 5, 11, 14, 22];
+ for x in a.symmetric_difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_union() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+ assert!(a.union(&b).next().is_none());
+ assert!(b.union(&a).next().is_none());
+
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(11));
+ assert!(a.insert(16));
+ assert!(a.insert(19));
+ assert!(a.insert(24));
+
+ assert!(b.insert(-2));
+ assert!(b.insert(1));
+ assert!(b.insert(5));
+ assert!(b.insert(9));
+ assert!(b.insert(13));
+ assert!(b.insert(19));
+
+ let mut i = 0;
+ let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
+ for x in a.union(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+
+ assert!(a.insert(9)); // make a bigger than b
+ assert!(a.insert(5));
+
+ i = 0;
+ for x in a.union(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+
+ i = 0;
+ for x in b.union(&a) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set: HashSet<_> = xs.iter().cloned().collect();
+
+ for x in &xs {
+ assert!(set.contains(x));
+ }
+
+ assert_eq!(set.iter().len(), xs.len() - 1);
+}
+
+#[test]
+fn test_move_iter() {
+ let hs = {
+ let mut hs = HashSet::new();
+
+ hs.insert('a');
+ hs.insert('b');
+
+ hs
+ };
+
+ let v = hs.into_iter().collect::<Vec<char>>();
+ assert!(v == ['a', 'b'] || v == ['b', 'a']);
+}
+
+#[test]
+fn test_eq() {
+ // These constants once happened to expose a bug in insert().
+ // I'm keeping them around to prevent a regression.
+ let mut s1 = HashSet::new();
+
+ s1.insert(1);
+ s1.insert(2);
+ s1.insert(3);
+
+ let mut s2 = HashSet::new();
+
+ s2.insert(1);
+ s2.insert(2);
+
+ assert!(s1 != s2);
+
+ s2.insert(3);
+
+ assert_eq!(s1, s2);
+}
+
+#[test]
+fn test_show() {
+ let mut set = HashSet::new();
+ let empty = HashSet::<i32>::new();
+
+ set.insert(1);
+ set.insert(2);
+
+ let set_str = format!("{set:?}");
+
+ assert!(set_str == "{1, 2}" || set_str == "{2, 1}");
+ assert_eq!(format!("{empty:?}"), "{}");
+}
+
+#[test]
+fn test_trivial_drain() {
+ let mut s = HashSet::<i32>::new();
+ for _ in s.drain() {}
+ assert!(s.is_empty());
+ drop(s);
+
+ let mut s = HashSet::<i32>::new();
+ drop(s.drain());
+ assert!(s.is_empty());
+}
+
+#[test]
+fn test_drain() {
+ let mut s: HashSet<_> = (1..100).collect();
+
+ // try this a bunch of times to make sure we don't screw up internal state.
+ for _ in 0..20 {
+ assert_eq!(s.len(), 99);
+
+ {
+ let mut last_i = 0;
+ let mut d = s.drain();
+ for (i, x) in d.by_ref().take(50).enumerate() {
+ last_i = i;
+ assert!(x != 0);
+ }
+ assert_eq!(last_i, 49);
+ }
+
+ for _ in &s {
+ panic!("s should be empty!");
+ }
+
+ // reset to try again.
+ s.extend(1..100);
+ }
+}
+
+#[test]
+fn test_replace() {
+ use crate::hash;
+
+ #[derive(Debug)]
+ struct Foo(&'static str, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ impl Eq for Foo {}
+
+ impl hash::Hash for Foo {
+ fn hash<H: hash::Hasher>(&self, h: &mut H) {
+ self.0.hash(h);
+ }
+ }
+
+ let mut s = HashSet::new();
+ assert_eq!(s.replace(Foo("a", 1)), None);
+ assert_eq!(s.len(), 1);
+ assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
+ assert_eq!(s.len(), 1);
+
+ let mut it = s.iter();
+ assert_eq!(it.next(), Some(&Foo("a", 2)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = HashSet::new();
+ a.insert(1);
+
+ a.extend(&[2, 3, 4]);
+
+ assert_eq!(a.len(), 4);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+
+ let mut b = HashSet::new();
+ b.insert(5);
+ b.insert(6);
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 6);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+ assert!(a.contains(&5));
+ assert!(a.contains(&6));
+}
+
+#[test]
+fn test_retain() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut set: HashSet<i32> = xs.iter().cloned().collect();
+ set.retain(|&k| k % 2 == 0);
+ assert_eq!(set.len(), 3);
+ assert!(set.contains(&2));
+ assert!(set.contains(&4));
+ assert!(set.contains(&6));
+}
+
+#[test]
+fn test_drain_filter() {
+ let mut x: HashSet<_> = [1].iter().copied().collect();
+ let mut y: HashSet<_> = [1].iter().copied().collect();
+
+ x.drain_filter(|_| true);
+ y.drain_filter(|_| false);
+ assert_eq!(x.len(), 0);
+ assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+ static PREDS: AtomicU32 = AtomicU32::new(0);
+ static DROPS: AtomicU32 = AtomicU32::new(0);
+
+ #[derive(PartialEq, Eq, PartialOrd, Hash)]
+ struct D(i32);
+ impl Drop for D {
+ fn drop(&mut self) {
+ if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut set = (0..3).map(|i| D(i)).collect::<HashSet<_>>();
+
+ catch_unwind(move || {
+ drop(set.drain_filter(|_| {
+ PREDS.fetch_add(1, Ordering::SeqCst);
+ true
+ }))
+ })
+ .ok();
+
+ assert_eq!(PREDS.load(Ordering::SeqCst), 3);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+ static PREDS: AtomicU32 = AtomicU32::new(0);
+ static DROPS: AtomicU32 = AtomicU32::new(0);
+
+ #[derive(PartialEq, Eq, PartialOrd, Hash)]
+ struct D;
+ impl Drop for D {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let mut set: HashSet<_> = (0..3).map(|_| D).collect();
+
+ catch_unwind(AssertUnwindSafe(|| {
+ drop(set.drain_filter(|_| match PREDS.fetch_add(1, Ordering::SeqCst) {
+ 0 => true,
+ _ => panic!(),
+ }))
+ }))
+ .ok();
+
+ assert_eq!(PREDS.load(Ordering::SeqCst), 1);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 3);
+ assert_eq!(set.len(), 0);
+}
+
+#[test]
+fn from_array() {
+ let set = HashSet::from([1, 2, 3, 4]);
+ let unordered_duplicates = HashSet::from([4, 1, 4, 3, 2]);
+ assert_eq!(set, unordered_duplicates);
+
+ // This next line must infer the hasher type parameter.
+ // If you make a change that causes this line to no longer infer,
+ // that's a problem!
+ let _must_not_require_type_annotation = HashSet::from([1, 2]);
+}
diff --git a/library/std/src/collections/mod.rs b/library/std/src/collections/mod.rs
new file mode 100644
index 000000000..ae2baba09
--- /dev/null
+++ b/library/std/src/collections/mod.rs
@@ -0,0 +1,446 @@
+//! Collection types.
+//!
+//! Rust's standard collection library provides efficient implementations of the
+//! most common general purpose programming data structures. By using the
+//! standard implementations, it should be possible for two libraries to
+//! communicate without significant data conversion.
+//!
+//! To get this out of the way: you should probably just use [`Vec`] or [`HashMap`].
+//! These two collections cover most use cases for generic data storage and
+//! processing. They are exceptionally good at doing what they do. All the other
+//! collections in the standard library have specific use cases where they are
+//! the optimal choice, but these cases are borderline *niche* in comparison.
+//! Even when `Vec` and `HashMap` are technically suboptimal, they're probably a
+//! good enough choice to get started.
+//!
+//! Rust's collections can be grouped into four major categories:
+//!
+//! * Sequences: [`Vec`], [`VecDeque`], [`LinkedList`]
+//! * Maps: [`HashMap`], [`BTreeMap`]
+//! * Sets: [`HashSet`], [`BTreeSet`]
+//! * Misc: [`BinaryHeap`]
+//!
+//! # When Should You Use Which Collection?
+//!
+//! These are fairly high-level and quick break-downs of when each collection
+//! should be considered. Detailed discussions of strengths and weaknesses of
+//! individual collections can be found on their own documentation pages.
+//!
+//! ### Use a `Vec` when:
+//! * You want to collect items up to be processed or sent elsewhere later, and
+//! don't care about any properties of the actual values being stored.
+//! * You want a sequence of elements in a particular order, and will only be
+//! appending to (or near) the end.
+//! * You want a stack.
+//! * You want a resizable array.
+//! * You want a heap-allocated array.
+//!
+//! ### Use a `VecDeque` when:
+//! * You want a [`Vec`] that supports efficient insertion at both ends of the
+//! sequence.
+//! * You want a queue.
+//! * You want a double-ended queue (deque).
+//!
+//! ### Use a `LinkedList` when:
+//! * You want a [`Vec`] or [`VecDeque`] of unknown size, and can't tolerate
+//! amortization.
+//! * You want to efficiently split and append lists.
+//! * You are *absolutely* certain you *really*, *truly*, want a doubly linked
+//! list.
+//!
+//! ### Use a `HashMap` when:
+//! * You want to associate arbitrary keys with an arbitrary value.
+//! * You want a cache.
+//! * You want a map, with no extra functionality.
+//!
+//! ### Use a `BTreeMap` when:
+//! * You want a map sorted by its keys.
+//! * You want to be able to get a range of entries on-demand.
+//! * You're interested in what the smallest or largest key-value pair is.
+//! * You want to find the largest or smallest key that is smaller or larger
+//! than something.
+//!
+//! ### Use the `Set` variant of any of these `Map`s when:
+//! * You just want to remember which keys you've seen.
+//! * There is no meaningful value to associate with your keys.
+//! * You just want a set.
+//!
+//! ### Use a `BinaryHeap` when:
+//!
+//! * You want to store a bunch of elements, but only ever want to process the
+//! "biggest" or "most important" one at any given time.
+//! * You want a priority queue.
+//!
+//! # Performance
+//!
+//! Choosing the right collection for the job requires an understanding of what
+//! each collection is good at. Here we briefly summarize the performance of
+//! different collections for certain important operations. For further details,
+//! see each type's documentation, and note that the names of actual methods may
+//! differ from the tables below on certain collections.
+//!
+//! Throughout the documentation, we will follow a few conventions. For all
+//! operations, the collection's size is denoted by n. If another collection is
+//! involved in the operation, it contains m elements. Operations which have an
+//! *amortized* cost are suffixed with a `*`. Operations with an *expected*
+//! cost are suffixed with a `~`.
+//!
+//! All amortized costs are for the potential need to resize when capacity is
+//! exhausted. If a resize occurs it will take *O*(*n*) time. Our collections never
+//! automatically shrink, so removal operations aren't amortized. Over a
+//! sufficiently large series of operations, the average cost per operation will
+//! deterministically equal the given cost.
+//!
+//! Only [`HashMap`] has expected costs, due to the probabilistic nature of hashing.
+//! It is theoretically possible, though very unlikely, for [`HashMap`] to
+//! experience worse performance.
+//!
+//! ## Sequences
+//!
+//! | | get(i) | insert(i) | remove(i) | append | split_off(i) |
+//! |----------------|------------------------|-------------------------|------------------------|-----------|------------------------|
+//! | [`Vec`] | *O*(1) | *O*(*n*-*i*)* | *O*(*n*-*i*) | *O*(*m*)* | *O*(*n*-*i*) |
+//! | [`VecDeque`] | *O*(1) | *O*(min(*i*, *n*-*i*))* | *O*(min(*i*, *n*-*i*)) | *O*(*m*)* | *O*(min(*i*, *n*-*i*)) |
+//! | [`LinkedList`] | *O*(min(*i*, *n*-*i*)) | *O*(min(*i*, *n*-*i*)) | *O*(min(*i*, *n*-*i*)) | *O*(1) | *O*(min(*i*, *n*-*i*)) |
+//!
+//! Note that where ties occur, [`Vec`] is generally going to be faster than [`VecDeque`], and
+//! [`VecDeque`] is generally going to be faster than [`LinkedList`].
+//!
+//! ## Maps
+//!
+//! For Sets, all operations have the cost of the equivalent Map operation.
+//!
+//! | | get | insert | remove | range | append |
+//! |--------------|---------------|---------------|---------------|---------------|--------------|
+//! | [`HashMap`] | *O*(1)~ | *O*(1)~* | *O*(1)~ | N/A | N/A |
+//! | [`BTreeMap`] | *O*(log(*n*)) | *O*(log(*n*)) | *O*(log(*n*)) | *O*(log(*n*)) | *O*(*n*+*m*) |
+//!
+//! # Correct and Efficient Usage of Collections
+//!
+//! Of course, knowing which collection is the right one for the job doesn't
+//! instantly permit you to use it correctly. Here are some quick tips for
+//! efficient and correct usage of the standard collections in general. If
+//! you're interested in how to use a specific collection in particular, consult
+//! its documentation for detailed discussion and code examples.
+//!
+//! ## Capacity Management
+//!
+//! Many collections provide several constructors and methods that refer to
+//! "capacity". These collections are generally built on top of an array.
+//! Optimally, this array would be exactly the right size to fit only the
+//! elements stored in the collection, but for the collection to do this would
+//! be very inefficient. If the backing array was exactly the right size at all
+//! times, then every time an element is inserted, the collection would have to
+//! grow the array to fit it. Due to the way memory is allocated and managed on
+//! most computers, this would almost surely require allocating an entirely new
+//! array and copying every single element from the old one into the new one.
+//! Hopefully you can see that this wouldn't be very efficient to do on every
+//! operation.
+//!
+//! Most collections therefore use an *amortized* allocation strategy. They
+//! generally let themselves have a fair amount of unoccupied space so that they
+//! only have to grow on occasion. When they do grow, they allocate a
+//! substantially larger array to move the elements into so that it will take a
+//! while for another grow to be required. While this strategy is great in
+//! general, it would be even better if the collection *never* had to resize its
+//! backing array. Unfortunately, the collection itself doesn't have enough
+//! information to do this itself. Therefore, it is up to us programmers to give
+//! it hints.
+//!
+//! Any `with_capacity` constructor will instruct the collection to allocate
+//! enough space for the specified number of elements. Ideally this will be for
+//! exactly that many elements, but some implementation details may prevent
+//! this. See collection-specific documentation for details. In general, use
+//! `with_capacity` when you know exactly how many elements will be inserted, or
+//! at least have a reasonable upper-bound on that number.
+//!
+//! When anticipating a large influx of elements, the `reserve` family of
+//! methods can be used to hint to the collection how much room it should make
+//! for the coming items. As with `with_capacity`, the precise behavior of
+//! these methods will be specific to the collection of interest.
+//!
+//! For optimal performance, collections will generally avoid shrinking
+//! themselves. If you believe that a collection will not soon contain any more
+//! elements, or just really need the memory, the `shrink_to_fit` method prompts
+//! the collection to shrink the backing array to the minimum size capable of
+//! holding its elements.
+//!
+//! Finally, if ever you're interested in what the actual capacity of the
+//! collection is, most collections provide a `capacity` method to query this
+//! information on demand. This can be useful for debugging purposes, or for
+//! use with the `reserve` methods.
+//!
+//! ## Iterators
+//!
+//! Iterators are a powerful and robust mechanism used throughout Rust's
+//! standard libraries. Iterators provide a sequence of values in a generic,
+//! safe, efficient and convenient way. The contents of an iterator are usually
+//! *lazily* evaluated, so that only the values that are actually needed are
+//! ever actually produced, and no allocation need be done to temporarily store
+//! them. Iterators are primarily consumed using a `for` loop, although many
+//! functions also take iterators where a collection or sequence of values is
+//! desired.
+//!
+//! All of the standard collections provide several iterators for performing
+//! bulk manipulation of their contents. The three primary iterators almost
+//! every collection should provide are `iter`, `iter_mut`, and `into_iter`.
+//! Some of these are not provided on collections where it would be unsound or
+//! unreasonable to provide them.
+//!
+//! `iter` provides an iterator of immutable references to all the contents of a
+//! collection in the most "natural" order. For sequence collections like [`Vec`],
+//! this means the items will be yielded in increasing order of index starting
+//! at 0. For ordered collections like [`BTreeMap`], this means that the items
+//! will be yielded in sorted order. For unordered collections like [`HashMap`],
+//! the items will be yielded in whatever order the internal representation made
+//! most convenient. This is great for reading through all the contents of the
+//! collection.
+//!
+//! ```
+//! let vec = vec![1, 2, 3, 4];
+//! for x in vec.iter() {
+//! println!("vec contained {x:?}");
+//! }
+//! ```
+//!
+//! `iter_mut` provides an iterator of *mutable* references in the same order as
+//! `iter`. This is great for mutating all the contents of the collection.
+//!
+//! ```
+//! let mut vec = vec![1, 2, 3, 4];
+//! for x in vec.iter_mut() {
+//! *x += 1;
+//! }
+//! ```
+//!
+//! `into_iter` transforms the actual collection into an iterator over its
+//! contents by-value. This is great when the collection itself is no longer
+//! needed, and the values are needed elsewhere. Using `extend` with `into_iter`
+//! is the main way that contents of one collection are moved into another.
+//! `extend` automatically calls `into_iter`, and takes any <code>T: [IntoIterator]</code>.
+//! Calling `collect` on an iterator itself is also a great way to convert one
+//! collection into another. Both of these methods should internally use the
+//! capacity management tools discussed in the previous section to do this as
+//! efficiently as possible.
+//!
+//! ```
+//! let mut vec1 = vec![1, 2, 3, 4];
+//! let vec2 = vec![10, 20, 30, 40];
+//! vec1.extend(vec2);
+//! ```
+//!
+//! ```
+//! use std::collections::VecDeque;
+//!
+//! let vec = [1, 2, 3, 4];
+//! let buf: VecDeque<_> = vec.into_iter().collect();
+//! ```
+//!
+//! Iterators also provide a series of *adapter* methods for performing common
+//! threads to sequences. Among the adapters are functional favorites like `map`,
+//! `fold`, `skip` and `take`. Of particular interest to collections is the
+//! `rev` adapter, which reverses any iterator that supports this operation. Most
+//! collections provide reversible iterators as the way to iterate over them in
+//! reverse order.
+//!
+//! ```
+//! let vec = vec![1, 2, 3, 4];
+//! for x in vec.iter().rev() {
+//! println!("vec contained {x:?}");
+//! }
+//! ```
+//!
+//! Several other collection methods also return iterators to yield a sequence
+//! of results but avoid allocating an entire collection to store the result in.
+//! This provides maximum flexibility as `collect` or `extend` can be called to
+//! "pipe" the sequence into any collection if desired. Otherwise, the sequence
+//! can be looped over with a `for` loop. The iterator can also be discarded
+//! after partial use, preventing the computation of the unused items.
+//!
+//! ## Entries
+//!
+//! The `entry` API is intended to provide an efficient mechanism for
+//! manipulating the contents of a map conditionally on the presence of a key or
+//! not. The primary motivating use case for this is to provide efficient
+//! accumulator maps. For instance, if one wishes to maintain a count of the
+//! number of times each key has been seen, they will have to perform some
+//! conditional logic on whether this is the first time the key has been seen or
+//! not. Normally, this would require a `find` followed by an `insert`,
+//! effectively duplicating the search effort on each insertion.
+//!
+//! When a user calls `map.entry(key)`, the map will search for the key and
+//! then yield a variant of the `Entry` enum.
+//!
+//! If a `Vacant(entry)` is yielded, then the key *was not* found. In this case
+//! the only valid operation is to `insert` a value into the entry. When this is
+//! done, the vacant entry is consumed and converted into a mutable reference to
+//! the value that was inserted. This allows for further manipulation of the
+//! value beyond the lifetime of the search itself. This is useful if complex
+//! logic needs to be performed on the value regardless of whether the value was
+//! just inserted.
+//!
+//! If an `Occupied(entry)` is yielded, then the key *was* found. In this case,
+//! the user has several options: they can `get`, `insert` or `remove` the
+//! value of the occupied entry. Additionally, they can convert the occupied
+//! entry into a mutable reference to its value, providing symmetry to the
+//! vacant `insert` case.
+//!
+//! ### Examples
+//!
+//! Here are the two primary ways in which `entry` is used. First, a simple
+//! example where the logic performed on the values is trivial.
+//!
+//! #### Counting the number of times each character in a string occurs
+//!
+//! ```
+//! use std::collections::btree_map::BTreeMap;
+//!
+//! let mut count = BTreeMap::new();
+//! let message = "she sells sea shells by the sea shore";
+//!
+//! for c in message.chars() {
+//! *count.entry(c).or_insert(0) += 1;
+//! }
+//!
+//! assert_eq!(count.get(&'s'), Some(&8));
+//!
+//! println!("Number of occurrences of each character");
+//! for (char, count) in &count {
+//! println!("{char}: {count}");
+//! }
+//! ```
+//!
+//! When the logic to be performed on the value is more complex, we may simply
+//! use the `entry` API to ensure that the value is initialized and perform the
+//! logic afterwards.
+//!
+//! #### Tracking the inebriation of customers at a bar
+//!
+//! ```
+//! use std::collections::btree_map::BTreeMap;
+//!
+//! // A client of the bar. They have a blood alcohol level.
+//! struct Person { blood_alcohol: f32 }
+//!
+//! // All the orders made to the bar, by client ID.
+//! let orders = vec![1, 2, 1, 2, 3, 4, 1, 2, 2, 3, 4, 1, 1, 1];
+//!
+//! // Our clients.
+//! let mut blood_alcohol = BTreeMap::new();
+//!
+//! for id in orders {
+//! // If this is the first time we've seen this customer, initialize them
+//! // with no blood alcohol. Otherwise, just retrieve them.
+//! let person = blood_alcohol.entry(id).or_insert(Person { blood_alcohol: 0.0 });
+//!
+//! // Reduce their blood alcohol level. It takes time to order and drink a beer!
+//! person.blood_alcohol *= 0.9;
+//!
+//! // Check if they're sober enough to have another beer.
+//! if person.blood_alcohol > 0.3 {
+//! // Too drunk... for now.
+//! println!("Sorry {id}, I have to cut you off");
+//! } else {
+//! // Have another!
+//! person.blood_alcohol += 0.1;
+//! }
+//! }
+//! ```
+//!
+//! # Insert and complex keys
+//!
+//! If we have a more complex key, calls to `insert` will
+//! not update the value of the key. For example:
+//!
+//! ```
+//! use std::cmp::Ordering;
+//! use std::collections::BTreeMap;
+//! use std::hash::{Hash, Hasher};
+//!
+//! #[derive(Debug)]
+//! struct Foo {
+//! a: u32,
+//! b: &'static str,
+//! }
+//!
+//! // we will compare `Foo`s by their `a` value only.
+//! impl PartialEq for Foo {
+//! fn eq(&self, other: &Self) -> bool { self.a == other.a }
+//! }
+//!
+//! impl Eq for Foo {}
+//!
+//! // we will hash `Foo`s by their `a` value only.
+//! impl Hash for Foo {
+//! fn hash<H: Hasher>(&self, h: &mut H) { self.a.hash(h); }
+//! }
+//!
+//! impl PartialOrd for Foo {
+//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.a.partial_cmp(&other.a) }
+//! }
+//!
+//! impl Ord for Foo {
+//! fn cmp(&self, other: &Self) -> Ordering { self.a.cmp(&other.a) }
+//! }
+//!
+//! let mut map = BTreeMap::new();
+//! map.insert(Foo { a: 1, b: "baz" }, 99);
+//!
+//! // We already have a Foo with an a of 1, so this will be updating the value.
+//! map.insert(Foo { a: 1, b: "xyz" }, 100);
+//!
+//! // The value has been updated...
+//! assert_eq!(map.values().next().unwrap(), &100);
+//!
+//! // ...but the key hasn't changed. b is still "baz", not "xyz".
+//! assert_eq!(map.keys().next().unwrap().b, "baz");
+//! ```
+//!
+//! [IntoIterator]: crate::iter::IntoIterator "iter::IntoIterator"
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+// FIXME(#82080) The deprecation here is only theoretical, and does not actually produce a warning.
+#[deprecated(note = "moved to `std::ops::Bound`", since = "1.26.0")]
+#[doc(hidden)]
+pub use crate::ops::Bound;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::collections::{binary_heap, btree_map, btree_set};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::collections::{linked_list, vec_deque};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::collections::{BTreeMap, BTreeSet, BinaryHeap};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::collections::{LinkedList, VecDeque};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::hash_map::HashMap;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::hash_set::HashSet;
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+pub use alloc_crate::collections::TryReserveError;
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub use alloc_crate::collections::TryReserveErrorKind;
+
+mod hash;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod hash_map {
+ //! A hash map implemented with quadratic probing and SIMD lookup.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::hash::map::*;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod hash_set {
+ //! A hash set implemented as a `HashMap` where the value is `()`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::hash::set::*;
+}
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
new file mode 100644
index 000000000..463f71406
--- /dev/null
+++ b/library/std/src/env.rs
@@ -0,0 +1,982 @@
+//! Inspection and manipulation of the process's environment.
+//!
+//! This module contains functions to inspect various aspects such as
+//! environment variables, process arguments, the current directory, and various
+//! other important directories.
+//!
+//! There are several functions and structs in this module that have a
+//! counterpart ending in `os`. Those ending in `os` will return an [`OsString`]
+//! and those without will return a [`String`].
+
+#![stable(feature = "env", since = "1.0.0")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::error::Error;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::path::{Path, PathBuf};
+use crate::sys;
+use crate::sys::os as os_imp;
+
+/// Returns the current working directory as a [`PathBuf`].
+///
+/// # Platform-specific behavior
+///
+/// This function [currently] corresponds to the `getcwd` function on Unix
+/// and the `GetCurrentDirectoryW` function on Windows.
+///
+/// [currently]: crate::io#platform-specific-behavior
+///
+/// # Errors
+///
+/// Returns an [`Err`] if the current working directory value is invalid.
+/// Possible cases:
+///
+/// * Current directory does not exist.
+/// * There are insufficient permissions to access the current directory.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// fn main() -> std::io::Result<()> {
+/// let path = env::current_dir()?;
+/// println!("The current directory is {}", path.display());
+/// Ok(())
+/// }
+/// ```
+#[doc(alias = "pwd")]
+#[doc(alias = "getcwd")]
+#[doc(alias = "GetCurrentDirectory")]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn current_dir() -> io::Result<PathBuf> {
+ os_imp::getcwd()
+}
+
+/// Changes the current working directory to the specified path.
+///
+/// # Platform-specific behavior
+///
+/// This function [currently] corresponds to the `chdir` function on Unix
+/// and the `SetCurrentDirectoryW` function on Windows.
+///
+/// Returns an [`Err`] if the operation fails.
+///
+/// [currently]: crate::io#platform-specific-behavior
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+/// use std::path::Path;
+///
+/// let root = Path::new("/");
+/// assert!(env::set_current_dir(&root).is_ok());
+/// println!("Successfully changed working directory to {}!", root.display());
+/// ```
+#[doc(alias = "chdir")]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn set_current_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ os_imp::chdir(path.as_ref())
+}
+
+/// An iterator over a snapshot of the environment variables of this process.
+///
+/// This structure is created by [`env::vars()`]. See its documentation for more.
+///
+/// [`env::vars()`]: vars
+#[stable(feature = "env", since = "1.0.0")]
+pub struct Vars {
+ inner: VarsOs,
+}
+
+/// An iterator over a snapshot of the environment variables of this process.
+///
+/// This structure is created by [`env::vars_os()`]. See its documentation for more.
+///
+/// [`env::vars_os()`]: vars_os
+#[stable(feature = "env", since = "1.0.0")]
+pub struct VarsOs {
+ inner: os_imp::Env,
+}
+
+/// Returns an iterator of (variable, value) pairs of strings, for all the
+/// environment variables of the current process.
+///
+/// The returned iterator contains a snapshot of the process's environment
+/// variables at the time of this invocation. Modifications to environment
+/// variables afterwards will not be reflected in the returned iterator.
+///
+/// # Panics
+///
+/// While iterating, the returned iterator will panic if any key or value in the
+/// environment is not valid unicode. If this is not desired, consider using
+/// [`env::vars_os()`].
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// // We will iterate through the references to the element returned by
+/// // env::vars();
+/// for (key, value) in env::vars() {
+/// println!("{key}: {value}");
+/// }
+/// ```
+///
+/// [`env::vars_os()`]: vars_os
+#[must_use]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn vars() -> Vars {
+ Vars { inner: vars_os() }
+}
+
+/// Returns an iterator of (variable, value) pairs of OS strings, for all the
+/// environment variables of the current process.
+///
+/// The returned iterator contains a snapshot of the process's environment
+/// variables at the time of this invocation. Modifications to environment
+/// variables afterwards will not be reflected in the returned iterator.
+///
+/// Note that the returned iterator will not check if the environment variables
+/// are valid Unicode. If you want to panic on invalid UTF-8,
+/// use the [`vars`] function instead.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// // We will iterate through the references to the element returned by
+/// // env::vars_os();
+/// for (key, value) in env::vars_os() {
+/// println!("{key:?}: {value:?}");
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn vars_os() -> VarsOs {
+ VarsOs { inner: os_imp::env() }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Iterator for Vars {
+ type Item = (String, String);
+ fn next(&mut self) -> Option<(String, String)> {
+ self.inner.next().map(|(a, b)| (a.into_string().unwrap(), b.into_string().unwrap()))
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Vars {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Vars").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Iterator for VarsOs {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.inner.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for VarsOs {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("VarOs").finish_non_exhaustive()
+ }
+}
+
+/// Fetches the environment variable `key` from the current process.
+///
+/// # Errors
+///
+/// This function will return an error if the environment variable isn't set.
+///
+/// This function may return an error if the environment variable's name contains
+/// the equal sign character (`=`) or the NUL character.
+///
+/// This function will return an error if the environment variable's value is
+/// not valid Unicode. If this is not desired, consider using [`var_os`].
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// let key = "HOME";
+/// match env::var(key) {
+/// Ok(val) => println!("{key}: {val:?}"),
+/// Err(e) => println!("couldn't interpret {key}: {e}"),
+/// }
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn var<K: AsRef<OsStr>>(key: K) -> Result<String, VarError> {
+ _var(key.as_ref())
+}
+
+fn _var(key: &OsStr) -> Result<String, VarError> {
+ match var_os(key) {
+ Some(s) => s.into_string().map_err(VarError::NotUnicode),
+ None => Err(VarError::NotPresent),
+ }
+}
+
+/// Fetches the environment variable `key` from the current process, returning
+/// [`None`] if the variable isn't set or there's another error.
+///
+/// Note that the method will not check if the environment variable
+/// is valid Unicode. If you want to have an error on invalid UTF-8,
+/// use the [`var`] function instead.
+///
+/// # Errors
+///
+/// This function returns an error if the environment variable isn't set.
+///
+/// This function may return an error if the environment variable's name contains
+/// the equal sign character (`=`) or the NUL character.
+///
+/// This function may return an error if the environment variable's value contains
+/// the NUL character.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// let key = "HOME";
+/// match env::var_os(key) {
+/// Some(val) => println!("{key}: {val:?}"),
+/// None => println!("{key} is not defined in the environment.")
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn var_os<K: AsRef<OsStr>>(key: K) -> Option<OsString> {
+ _var_os(key.as_ref())
+}
+
+fn _var_os(key: &OsStr) -> Option<OsString> {
+ os_imp::getenv(key)
+}
+
+/// The error type for operations interacting with environment variables.
+/// Possibly returned from [`env::var()`].
+///
+/// [`env::var()`]: var
+#[derive(Debug, PartialEq, Eq, Clone)]
+#[stable(feature = "env", since = "1.0.0")]
+pub enum VarError {
+ /// The specified environment variable was not present in the current
+ /// process's environment.
+ #[stable(feature = "env", since = "1.0.0")]
+ NotPresent,
+
+ /// The specified environment variable was found, but it did not contain
+ /// valid unicode data. The found data is returned as a payload of this
+ /// variant.
+ #[stable(feature = "env", since = "1.0.0")]
+ NotUnicode(#[stable(feature = "env", since = "1.0.0")] OsString),
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl fmt::Display for VarError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ VarError::NotPresent => write!(f, "environment variable not found"),
+ VarError::NotUnicode(ref s) => {
+ write!(f, "environment variable was not valid unicode: {:?}", s)
+ }
+ }
+ }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Error for VarError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ match *self {
+ VarError::NotPresent => "environment variable not found",
+ VarError::NotUnicode(..) => "environment variable was not valid unicode",
+ }
+ }
+}
+
+/// Sets the environment variable `key` to the value `value` for the currently running
+/// process.
+///
+/// Note that while concurrent access to environment variables is safe in Rust,
+/// some platforms only expose inherently unsafe non-threadsafe APIs for
+/// inspecting the environment. As a result, extra care needs to be taken when
+/// auditing calls to unsafe external FFI functions to ensure that any external
+/// environment accesses are properly synchronized with accesses in Rust.
+///
+/// Discussion of this unsafety on Unix may be found in:
+///
+/// - [Austin Group Bugzilla](https://austingroupbugs.net/view.php?id=188)
+/// - [GNU C library Bugzilla](https://sourceware.org/bugzilla/show_bug.cgi?id=15607#c2)
+///
+/// # Panics
+///
+/// This function may panic if `key` is empty, contains an ASCII equals sign `'='`
+/// or the NUL character `'\0'`, or when `value` contains the NUL character.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// let key = "KEY";
+/// env::set_var(key, "VALUE");
+/// assert_eq!(env::var(key), Ok("VALUE".to_string()));
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn set_var<K: AsRef<OsStr>, V: AsRef<OsStr>>(key: K, value: V) {
+ _set_var(key.as_ref(), value.as_ref())
+}
+
+fn _set_var(key: &OsStr, value: &OsStr) {
+ os_imp::setenv(key, value).unwrap_or_else(|e| {
+ panic!("failed to set environment variable `{key:?}` to `{value:?}`: {e}")
+ })
+}
+
+/// Removes an environment variable from the environment of the currently running process.
+///
+/// Note that while concurrent access to environment variables is safe in Rust,
+/// some platforms only expose inherently unsafe non-threadsafe APIs for
+/// inspecting the environment. As a result extra care needs to be taken when
+/// auditing calls to unsafe external FFI functions to ensure that any external
+/// environment accesses are properly synchronized with accesses in Rust.
+///
+/// Discussion of this unsafety on Unix may be found in:
+///
+/// - [Austin Group Bugzilla](https://austingroupbugs.net/view.php?id=188)
+/// - [GNU C library Bugzilla](https://sourceware.org/bugzilla/show_bug.cgi?id=15607#c2)
+///
+/// # Panics
+///
+/// This function may panic if `key` is empty, contains an ASCII equals sign
+/// `'='` or the NUL character `'\0'`, or when the value contains the NUL
+/// character.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// let key = "KEY";
+/// env::set_var(key, "VALUE");
+/// assert_eq!(env::var(key), Ok("VALUE".to_string()));
+///
+/// env::remove_var(key);
+/// assert!(env::var(key).is_err());
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn remove_var<K: AsRef<OsStr>>(key: K) {
+ _remove_var(key.as_ref())
+}
+
+fn _remove_var(key: &OsStr) {
+ os_imp::unsetenv(key)
+ .unwrap_or_else(|e| panic!("failed to remove environment variable `{key:?}`: {e}"))
+}
+
+/// An iterator that splits an environment variable into paths according to
+/// platform-specific conventions.
+///
+/// The iterator element type is [`PathBuf`].
+///
+/// This structure is created by [`env::split_paths()`]. See its
+/// documentation for more.
+///
+/// [`env::split_paths()`]: split_paths
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "env", since = "1.0.0")]
+pub struct SplitPaths<'a> {
+ inner: os_imp::SplitPaths<'a>,
+}
+
+/// Parses input according to platform conventions for the `PATH`
+/// environment variable.
+///
+/// Returns an iterator over the paths contained in `unparsed`. The iterator
+/// element type is [`PathBuf`].
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// let key = "PATH";
+/// match env::var_os(key) {
+/// Some(paths) => {
+/// for path in env::split_paths(&paths) {
+/// println!("'{}'", path.display());
+/// }
+/// }
+/// None => println!("{key} is not defined in the environment.")
+/// }
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn split_paths<T: AsRef<OsStr> + ?Sized>(unparsed: &T) -> SplitPaths<'_> {
+ SplitPaths { inner: os_imp::split_paths(unparsed.as_ref()) }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.inner.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for SplitPaths<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitPaths").finish_non_exhaustive()
+ }
+}
+
+/// The error type for operations on the `PATH` variable. Possibly returned from
+/// [`env::join_paths()`].
+///
+/// [`env::join_paths()`]: join_paths
+#[derive(Debug)]
+#[stable(feature = "env", since = "1.0.0")]
+pub struct JoinPathsError {
+ inner: os_imp::JoinPathsError,
+}
+
+/// Joins a collection of [`Path`]s appropriately for the `PATH`
+/// environment variable.
+///
+/// # Errors
+///
+/// Returns an [`Err`] (containing an error message) if one of the input
+/// [`Path`]s contains an invalid character for constructing the `PATH`
+/// variable (a double quote on Windows or a colon on Unix).
+///
+/// # Examples
+///
+/// Joining paths on a Unix-like platform:
+///
+/// ```
+/// use std::env;
+/// use std::ffi::OsString;
+/// use std::path::Path;
+///
+/// fn main() -> Result<(), env::JoinPathsError> {
+/// # if cfg!(unix) {
+/// let paths = [Path::new("/bin"), Path::new("/usr/bin")];
+/// let path_os_string = env::join_paths(paths.iter())?;
+/// assert_eq!(path_os_string, OsString::from("/bin:/usr/bin"));
+/// # }
+/// Ok(())
+/// }
+/// ```
+///
+/// Joining a path containing a colon on a Unix-like platform results in an
+/// error:
+///
+/// ```
+/// # if cfg!(unix) {
+/// use std::env;
+/// use std::path::Path;
+///
+/// let paths = [Path::new("/bin"), Path::new("/usr/bi:n")];
+/// assert!(env::join_paths(paths.iter()).is_err());
+/// # }
+/// ```
+///
+/// Using `env::join_paths()` with [`env::split_paths()`] to append an item to
+/// the `PATH` environment variable:
+///
+/// ```
+/// use std::env;
+/// use std::path::PathBuf;
+///
+/// fn main() -> Result<(), env::JoinPathsError> {
+/// if let Some(path) = env::var_os("PATH") {
+/// let mut paths = env::split_paths(&path).collect::<Vec<_>>();
+/// paths.push(PathBuf::from("/home/xyz/bin"));
+/// let new_path = env::join_paths(paths)?;
+/// env::set_var("PATH", &new_path);
+/// }
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// [`env::split_paths()`]: split_paths
+#[stable(feature = "env", since = "1.0.0")]
+pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: IntoIterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ os_imp::join_paths(paths.into_iter()).map_err(|e| JoinPathsError { inner: e })
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Error for JoinPathsError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ self.inner.description()
+ }
+}
+
+/// Returns the path of the current user's home directory if known.
+///
+/// # Unix
+///
+/// - Returns the value of the 'HOME' environment variable if it is set
+/// (including to an empty string).
+/// - Otherwise, it tries to determine the home directory by invoking the `getpwuid_r` function
+/// using the UID of the current user. An empty home directory field returned from the
+/// `getpwuid_r` function is considered to be a valid value.
+/// - Returns `None` if the current user has no entry in the /etc/passwd file.
+///
+/// # Windows
+///
+/// - Returns the value of the 'HOME' environment variable if it is set
+/// (including to an empty string).
+/// - Otherwise, returns the value of the 'USERPROFILE' environment variable if it is set
+/// (including to an empty string).
+/// - If both do not exist, [`GetUserProfileDirectory`][msdn] is used to return the path.
+///
+/// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-getuserprofiledirectorya
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// match env::home_dir() {
+/// Some(path) => println!("Your home directory, probably: {}", path.display()),
+/// None => println!("Impossible to get your home dir!"),
+/// }
+/// ```
+#[deprecated(
+ since = "1.29.0",
+ note = "This function's behavior is unexpected and probably not what you want. \
+ Consider using a crate from crates.io instead."
+)]
+#[must_use]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn home_dir() -> Option<PathBuf> {
+ os_imp::home_dir()
+}
+
+/// Returns the path of a temporary directory.
+///
+/// The temporary directory may be shared among users, or between processes
+/// with different privileges; thus, the creation of any files or directories
+/// in the temporary directory must use a secure method to create a uniquely
+/// named file. Creating a file or directory with a fixed or predictable name
+/// may result in "insecure temporary file" security vulnerabilities. Consider
+/// using a crate that securely creates temporary files or directories.
+///
+/// # Platform-specific behavior
+///
+/// On Unix, returns the value of the `TMPDIR` environment variable if it is
+/// set, otherwise for non-Android it returns `/tmp`. If Android, since there
+/// is no global temporary folder (it is usually allocated per-app), it returns
+/// `/data/local/tmp`.
+/// On Windows, the behavior is equivalent to that of [`GetTempPath2`][GetTempPath2] /
+/// [`GetTempPath`][GetTempPath], which this function uses internally.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+/// [GetTempPath2]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-gettemppath2a
+/// [GetTempPath]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-gettemppatha
+///
+/// ```no_run
+/// use std::env;
+///
+/// fn main() {
+/// let dir = env::temp_dir();
+/// println!("Temporary directory: {}", dir.display());
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "env", since = "1.0.0")]
+pub fn temp_dir() -> PathBuf {
+ os_imp::temp_dir()
+}
+
+/// Returns the full filesystem path of the current running executable.
+///
+/// # Platform-specific behavior
+///
+/// If the executable was invoked through a symbolic link, some platforms will
+/// return the path of the symbolic link and other platforms will return the
+/// path of the symbolic link’s target.
+///
+/// If the executable is renamed while it is running, platforms may return the
+/// path at the time it was loaded instead of the new path.
+///
+/// # Errors
+///
+/// Acquiring the path of the current executable is a platform-specific operation
+/// that can fail for a good number of reasons. Some errors can include, but not
+/// be limited to, filesystem operations failing or general syscall failures.
+///
+/// # Security
+///
+/// The output of this function should not be trusted for anything
+/// that might have security implications. Basically, if users can run
+/// the executable, they can change the output arbitrarily.
+///
+/// As an example, you can easily introduce a race condition. It goes
+/// like this:
+///
+/// 1. You get the path to the current executable using `current_exe()`, and
+/// store it in a variable.
+/// 2. Time passes. A malicious actor removes the current executable, and
+/// replaces it with a malicious one.
+/// 3. You then use the stored path to re-execute the current
+/// executable.
+///
+/// You expected to safely execute the current executable, but you're
+/// instead executing something completely different. The code you
+/// just executed run with your privileges.
+///
+/// This sort of behavior has been known to [lead to privilege escalation] when
+/// used incorrectly.
+///
+/// [lead to privilege escalation]: https://securityvulns.com/Wdocument183.html
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// match env::current_exe() {
+/// Ok(exe_path) => println!("Path of this executable is: {}",
+/// exe_path.display()),
+/// Err(e) => println!("failed to get current exe path: {e}"),
+/// };
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ os_imp::current_exe()
+}
+
+/// An iterator over the arguments of a process, yielding a [`String`] value for
+/// each argument.
+///
+/// This struct is created by [`env::args()`]. See its documentation
+/// for more.
+///
+/// The first element is traditionally the path of the executable, but it can be
+/// set to arbitrary text, and might not even exist. This means this property
+/// should not be relied upon for security purposes.
+///
+/// [`env::args()`]: args
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "env", since = "1.0.0")]
+pub struct Args {
+ inner: ArgsOs,
+}
+
+/// An iterator over the arguments of a process, yielding an [`OsString`] value
+/// for each argument.
+///
+/// This struct is created by [`env::args_os()`]. See its documentation
+/// for more.
+///
+/// The first element is traditionally the path of the executable, but it can be
+/// set to arbitrary text, and might not even exist. This means this property
+/// should not be relied upon for security purposes.
+///
+/// [`env::args_os()`]: args_os
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "env", since = "1.0.0")]
+pub struct ArgsOs {
+ inner: sys::args::Args,
+}
+
+/// Returns the arguments that this program was started with (normally passed
+/// via the command line).
+///
+/// The first element is traditionally the path of the executable, but it can be
+/// set to arbitrary text, and might not even exist. This means this property should
+/// not be relied upon for security purposes.
+///
+/// On Unix systems the shell usually expands unquoted arguments with glob patterns
+/// (such as `*` and `?`). On Windows this is not done, and such arguments are
+/// passed as-is.
+///
+/// On glibc Linux systems, arguments are retrieved by placing a function in `.init_array`.
+/// glibc passes `argc`, `argv`, and `envp` to functions in `.init_array`, as a non-standard
+/// extension. This allows `std::env::args` to work even in a `cdylib` or `staticlib`, as it
+/// does on macOS and Windows.
+///
+/// # Panics
+///
+/// The returned iterator will panic during iteration if any argument to the
+/// process is not valid Unicode. If this is not desired,
+/// use the [`args_os`] function instead.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// // Prints each argument on a separate line
+/// for argument in env::args() {
+/// println!("{argument}");
+/// }
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn args() -> Args {
+ Args { inner: args_os() }
+}
+
+/// Returns the arguments that this program was started with (normally passed
+/// via the command line).
+///
+/// The first element is traditionally the path of the executable, but it can be
+/// set to arbitrary text, and might not even exist. This means this property should
+/// not be relied upon for security purposes.
+///
+/// On Unix systems the shell usually expands unquoted arguments with glob patterns
+/// (such as `*` and `?`). On Windows this is not done, and such arguments are
+/// passed as-is.
+///
+/// On glibc Linux systems, arguments are retrieved by placing a function in `.init_array`.
+/// glibc passes `argc`, `argv`, and `envp` to functions in `.init_array`, as a non-standard
+/// extension. This allows `std::env::args_os` to work even in a `cdylib` or `staticlib`, as it
+/// does on macOS and Windows.
+///
+/// Note that the returned iterator will not check if the arguments to the
+/// process are valid Unicode. If you want to panic on invalid UTF-8,
+/// use the [`args`] function instead.
+///
+/// # Examples
+///
+/// ```
+/// use std::env;
+///
+/// // Prints each argument on a separate line
+/// for argument in env::args_os() {
+/// println!("{argument:?}");
+/// }
+/// ```
+#[stable(feature = "env", since = "1.0.0")]
+pub fn args_os() -> ArgsOs {
+ ArgsOs { inner: sys::args::args() }
+}
+
+#[stable(feature = "env_unimpl_send_sync", since = "1.26.0")]
+impl !Send for Args {}
+
+#[stable(feature = "env_unimpl_send_sync", since = "1.26.0")]
+impl !Sync for Args {}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Iterator for Args {
+ type Item = String;
+ fn next(&mut self) -> Option<String> {
+ self.inner.next().map(|s| s.into_string().unwrap())
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
+
+#[stable(feature = "env_iterators", since = "1.12.0")]
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<String> {
+ self.inner.next_back().map(|s| s.into_string().unwrap())
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Args").field("inner", &self.inner.inner).finish()
+ }
+}
+
+#[stable(feature = "env_unimpl_send_sync", since = "1.26.0")]
+impl !Send for ArgsOs {}
+
+#[stable(feature = "env_unimpl_send_sync", since = "1.26.0")]
+impl !Sync for ArgsOs {}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl Iterator for ArgsOs {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.inner.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "env", since = "1.0.0")]
+impl ExactSizeIterator for ArgsOs {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
+
+#[stable(feature = "env_iterators", since = "1.12.0")]
+impl DoubleEndedIterator for ArgsOs {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for ArgsOs {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ArgsOs").field("inner", &self.inner).finish()
+ }
+}
+
+/// Constants associated with the current target
+#[stable(feature = "env", since = "1.0.0")]
+pub mod consts {
+ use crate::sys::env::os;
+
+ /// A string describing the architecture of the CPU that is currently
+ /// in use.
+ ///
+ /// Some possible values:
+ ///
+ /// - x86
+ /// - x86_64
+ /// - arm
+ /// - aarch64
+ /// - m68k
+ /// - mips
+ /// - mips64
+ /// - powerpc
+ /// - powerpc64
+ /// - riscv64
+ /// - s390x
+ /// - sparc64
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const ARCH: &str = env!("STD_ENV_ARCH");
+
+ /// The family of the operating system. Example value is `unix`.
+ ///
+ /// Some possible values:
+ ///
+ /// - unix
+ /// - windows
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const FAMILY: &str = os::FAMILY;
+
+ /// A string describing the specific operating system in use.
+ /// Example value is `linux`.
+ ///
+ /// Some possible values:
+ ///
+ /// - linux
+ /// - macos
+ /// - ios
+ /// - freebsd
+ /// - dragonfly
+ /// - netbsd
+ /// - openbsd
+ /// - solaris
+ /// - android
+ /// - windows
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const OS: &str = os::OS;
+
+ /// Specifies the filename prefix used for shared libraries on this
+ /// platform. Example value is `lib`.
+ ///
+ /// Some possible values:
+ ///
+ /// - lib
+ /// - `""` (an empty string)
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const DLL_PREFIX: &str = os::DLL_PREFIX;
+
+ /// Specifies the filename suffix used for shared libraries on this
+ /// platform. Example value is `.so`.
+ ///
+ /// Some possible values:
+ ///
+ /// - .so
+ /// - .dylib
+ /// - .dll
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const DLL_SUFFIX: &str = os::DLL_SUFFIX;
+
+ /// Specifies the file extension used for shared libraries on this
+ /// platform that goes after the dot. Example value is `so`.
+ ///
+ /// Some possible values:
+ ///
+ /// - so
+ /// - dylib
+ /// - dll
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const DLL_EXTENSION: &str = os::DLL_EXTENSION;
+
+ /// Specifies the filename suffix used for executable binaries on this
+ /// platform. Example value is `.exe`.
+ ///
+ /// Some possible values:
+ ///
+ /// - .exe
+ /// - .nexe
+ /// - .pexe
+ /// - `""` (an empty string)
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const EXE_SUFFIX: &str = os::EXE_SUFFIX;
+
+ /// Specifies the file extension, if any, used for executable binaries
+ /// on this platform. Example value is `exe`.
+ ///
+ /// Some possible values:
+ ///
+ /// - exe
+ /// - `""` (an empty string)
+ #[stable(feature = "env", since = "1.0.0")]
+ pub const EXE_EXTENSION: &str = os::EXE_EXTENSION;
+}
diff --git a/library/std/src/env/tests.rs b/library/std/src/env/tests.rs
new file mode 100644
index 000000000..94cace03a
--- /dev/null
+++ b/library/std/src/env/tests.rs
@@ -0,0 +1,102 @@
+use super::*;
+
+use crate::path::Path;
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_env = "sgx"), ignore)]
+fn test_self_exe_path() {
+ let path = current_exe();
+ assert!(path.is_ok());
+ let path = path.unwrap();
+
+ // Hard to test this function
+ assert!(path.is_absolute());
+}
+
+#[test]
+fn test() {
+ assert!((!Path::new("test-path").is_absolute()));
+
+ #[cfg(not(target_env = "sgx"))]
+ current_dir().unwrap();
+}
+
+#[test]
+#[cfg(windows)]
+fn split_paths_windows() {
+ use crate::path::PathBuf;
+
+ fn check_parse(unparsed: &str, parsed: &[&str]) -> bool {
+ split_paths(unparsed).collect::<Vec<_>>()
+ == parsed.iter().map(|s| PathBuf::from(*s)).collect::<Vec<_>>()
+ }
+
+ assert!(check_parse("", &mut [""]));
+ assert!(check_parse(r#""""#, &mut [""]));
+ assert!(check_parse(";;", &mut ["", "", ""]));
+ assert!(check_parse(r"c:\", &mut [r"c:\"]));
+ assert!(check_parse(r"c:\;", &mut [r"c:\", ""]));
+ assert!(check_parse(r"c:\;c:\Program Files\", &mut [r"c:\", r"c:\Program Files\"]));
+ assert!(check_parse(r#"c:\;c:\"foo"\"#, &mut [r"c:\", r"c:\foo\"]));
+ assert!(check_parse(r#"c:\;c:\"foo;bar"\;c:\baz"#, &mut [r"c:\", r"c:\foo;bar\", r"c:\baz"]));
+}
+
+#[test]
+#[cfg(unix)]
+fn split_paths_unix() {
+ use crate::path::PathBuf;
+
+ fn check_parse(unparsed: &str, parsed: &[&str]) -> bool {
+ split_paths(unparsed).collect::<Vec<_>>()
+ == parsed.iter().map(|s| PathBuf::from(*s)).collect::<Vec<_>>()
+ }
+
+ assert!(check_parse("", &mut [""]));
+ assert!(check_parse("::", &mut ["", "", ""]));
+ assert!(check_parse("/", &mut ["/"]));
+ assert!(check_parse("/:", &mut ["/", ""]));
+ assert!(check_parse("/:/usr/local", &mut ["/", "/usr/local"]));
+}
+
+#[test]
+#[cfg(unix)]
+fn join_paths_unix() {
+ use crate::ffi::OsStr;
+
+ fn test_eq(input: &[&str], output: &str) -> bool {
+ &*join_paths(input.iter().cloned()).unwrap() == OsStr::new(output)
+ }
+
+ assert!(test_eq(&[], ""));
+ assert!(test_eq(&["/bin", "/usr/bin", "/usr/local/bin"], "/bin:/usr/bin:/usr/local/bin"));
+ assert!(test_eq(&["", "/bin", "", "", "/usr/bin", ""], ":/bin:::/usr/bin:"));
+ assert!(join_paths(["/te:st"].iter().cloned()).is_err());
+}
+
+#[test]
+#[cfg(windows)]
+fn join_paths_windows() {
+ use crate::ffi::OsStr;
+
+ fn test_eq(input: &[&str], output: &str) -> bool {
+ &*join_paths(input.iter().cloned()).unwrap() == OsStr::new(output)
+ }
+
+ assert!(test_eq(&[], ""));
+ assert!(test_eq(&[r"c:\windows", r"c:\"], r"c:\windows;c:\"));
+ assert!(test_eq(&["", r"c:\windows", "", "", r"c:\", ""], r";c:\windows;;;c:\;"));
+ assert!(test_eq(&[r"c:\te;st", r"c:\"], r#""c:\te;st";c:\"#));
+ assert!(join_paths([r#"c:\te"st"#].iter().cloned()).is_err());
+}
+
+#[test]
+fn args_debug() {
+ assert_eq!(
+ format!("Args {{ inner: {:?} }}", args().collect::<Vec<_>>()),
+ format!("{:?}", args())
+ );
+ assert_eq!(
+ format!("ArgsOs {{ inner: {:?} }}", args_os().collect::<Vec<_>>()),
+ format!("{:?}", args_os())
+ );
+}
diff --git a/library/std/src/error.rs b/library/std/src/error.rs
new file mode 100644
index 000000000..722df119d
--- /dev/null
+++ b/library/std/src/error.rs
@@ -0,0 +1,1746 @@
+//! Interfaces for working with Errors.
+//!
+//! # Error Handling In Rust
+//!
+//! The Rust language provides two complementary systems for constructing /
+//! representing, reporting, propagating, reacting to, and discarding errors.
+//! These responsibilities are collectively known as "error handling." The
+//! components of the first system, the panic runtime and interfaces, are most
+//! commonly used to represent bugs that have been detected in your program. The
+//! components of the second system, `Result`, the error traits, and user
+//! defined types, are used to represent anticipated runtime failure modes of
+//! your program.
+//!
+//! ## The Panic Interfaces
+//!
+//! The following are the primary interfaces of the panic system and the
+//! responsibilities they cover:
+//!
+//! * [`panic!`] and [`panic_any`] (Constructing, Propagated automatically)
+//! * [`PanicInfo`] (Reporting)
+//! * [`set_hook`], [`take_hook`], and [`#[panic_handler]`][panic-handler] (Reporting)
+//! * [`catch_unwind`] and [`resume_unwind`] (Discarding, Propagating)
+//!
+//! The following are the primary interfaces of the error system and the
+//! responsibilities they cover:
+//!
+//! * [`Result`] (Propagating, Reacting)
+//! * The [`Error`] trait (Reporting)
+//! * User defined types (Constructing / Representing)
+//! * [`match`] and [`downcast`] (Reacting)
+//! * The question mark operator ([`?`]) (Propagating)
+//! * The partially stable [`Try`] traits (Propagating, Constructing)
+//! * [`Termination`] (Reporting)
+//!
+//! ## Converting Errors into Panics
+//!
+//! The panic and error systems are not entirely distinct. Often times errors
+//! that are anticipated runtime failures in an API might instead represent bugs
+//! to a caller. For these situations the standard library provides APIs for
+//! constructing panics with an `Error` as it's source.
+//!
+//! * [`Result::unwrap`]
+//! * [`Result::expect`]
+//!
+//! These functions are equivalent, they either return the inner value if the
+//! `Result` is `Ok` or panic if the `Result` is `Err` printing the inner error
+//! as the source. The only difference between them is that with `expect` you
+//! provide a panic error message to be printed alongside the source, whereas
+//! `unwrap` has a default message indicating only that you unwraped an `Err`.
+//!
+//! Of the two, `expect` is generally preferred since its `msg` field allows you
+//! to convey your intent and assumptions which makes tracking down the source
+//! of a panic easier. `unwrap` on the other hand can still be a good fit in
+//! situations where you can trivially show that a piece of code will never
+//! panic, such as `"127.0.0.1".parse::<std::net::IpAddr>().unwrap()` or early
+//! prototyping.
+//!
+//! # Common Message Styles
+//!
+//! There are two common styles for how people word `expect` messages. Using
+//! the message to present information to users encountering a panic
+//! ("expect as error message") or using the message to present information
+//! to developers debugging the panic ("expect as precondition").
+//!
+//! In the former case the expect message is used to describe the error that
+//! has occurred which is considered a bug. Consider the following example:
+//!
+//! ```should_panic
+//! // Read environment variable, panic if it is not present
+//! let path = std::env::var("IMPORTANT_PATH").unwrap();
+//! ```
+//!
+//! In the "expect as error message" style we would use expect to describe
+//! that the environment variable was not set when it should have been:
+//!
+//! ```should_panic
+//! let path = std::env::var("IMPORTANT_PATH")
+//! .expect("env variable `IMPORTANT_PATH` is not set");
+//! ```
+//!
+//! In the "expect as precondition" style, we would instead describe the
+//! reason we _expect_ the `Result` should be `Ok`. With this style we would
+//! prefer to write:
+//!
+//! ```should_panic
+//! let path = std::env::var("IMPORTANT_PATH")
+//! .expect("env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`");
+//! ```
+//!
+//! The "expect as error message" style does not work as well with the
+//! default output of the std panic hooks, and often ends up repeating
+//! information that is already communicated by the source error being
+//! unwrapped:
+//!
+//! ```text
+//! thread 'main' panicked at 'env variable `IMPORTANT_PATH` is not set: NotPresent', src/main.rs:4:6
+//! ```
+//!
+//! In this example we end up mentioning that an env variable is not set,
+//! followed by our source message that says the env is not present, the
+//! only additional information we're communicating is the name of the
+//! environment variable being checked.
+//!
+//! The "expect as precondition" style instead focuses on source code
+//! readability, making it easier to understand what must have gone wrong in
+//! situations where panics are being used to represent bugs exclusively.
+//! Also, by framing our expect in terms of what "SHOULD" have happened to
+//! prevent the source error, we end up introducing new information that is
+//! independent from our source error.
+//!
+//! ```text
+//! thread 'main' panicked at 'env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`: NotPresent', src/main.rs:4:6
+//! ```
+//!
+//! In this example we are communicating not only the name of the
+//! environment variable that should have been set, but also an explanation
+//! for why it should have been set, and we let the source error display as
+//! a clear contradiction to our expectation.
+//!
+//! **Hint**: If you're having trouble remembering how to phrase
+//! expect-as-precondition style error messages remember to focus on the word
+//! "should" as in "env variable should be set by blah" or "the given binary
+//! should be available and executable by the current user".
+//!
+//! [`panic_any`]: crate::panic::panic_any
+//! [`PanicInfo`]: crate::panic::PanicInfo
+//! [`catch_unwind`]: crate::panic::catch_unwind
+//! [`resume_unwind`]: crate::panic::resume_unwind
+//! [`downcast`]: crate::error::Error
+//! [`Termination`]: crate::process::Termination
+//! [`Try`]: crate::ops::Try
+//! [panic hook]: crate::panic::set_hook
+//! [`set_hook`]: crate::panic::set_hook
+//! [`take_hook`]: crate::panic::take_hook
+//! [panic-handler]: <https://doc.rust-lang.org/nomicon/panic-handler.html>
+//! [`match`]: ../../std/keyword.match.html
+//! [`?`]: ../../std/result/index.html#the-question-mark-operator-
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+// A note about crates and the facade:
+//
+// Originally, the `Error` trait was defined in libcore, and the impls
+// were scattered about. However, coherence objected to this
+// arrangement, because to create the blanket impls for `Box` required
+// knowing that `&str: !Error`, and we have no means to deal with that
+// sort of conflict just now. Therefore, for the time being, we have
+// moved the `Error` trait into libstd. As we evolve a sol'n to the
+// coherence challenge (e.g., specialization, neg impls, etc) we can
+// reconsider what crate these items belong in.
+
+#[cfg(test)]
+mod tests;
+
+use core::array;
+use core::convert::Infallible;
+
+use crate::alloc::{AllocError, LayoutError};
+use crate::any::{Demand, Provider, TypeId};
+use crate::backtrace::Backtrace;
+use crate::borrow::Cow;
+use crate::cell;
+use crate::char;
+use crate::fmt::{self, Debug, Display, Write};
+use crate::io;
+use crate::mem::transmute;
+use crate::num;
+use crate::str;
+use crate::string;
+use crate::sync::Arc;
+use crate::time;
+
+/// `Error` is a trait representing the basic expectations for error values,
+/// i.e., values of type `E` in [`Result<T, E>`].
+///
+/// Errors must describe themselves through the [`Display`] and [`Debug`]
+/// traits. Error messages are typically concise lowercase sentences without
+/// trailing punctuation:
+///
+/// ```
+/// let err = "NaN".parse::<u32>().unwrap_err();
+/// assert_eq!(err.to_string(), "invalid digit found in string");
+/// ```
+///
+/// Errors may provide cause chain information. [`Error::source()`] is generally
+/// used when errors cross "abstraction boundaries". If one module must report
+/// an error that is caused by an error from a lower-level module, it can allow
+/// accessing that error via [`Error::source()`]. This makes it possible for the
+/// high-level module to provide its own errors while also revealing some of the
+/// implementation for debugging via `source` chains.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Error")]
+pub trait Error: Debug + Display {
+ /// The lower-level source of this error, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ ///
+ /// #[derive(Debug)]
+ /// struct SuperError {
+ /// source: SuperErrorSideKick,
+ /// }
+ ///
+ /// impl fmt::Display for SuperError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "SuperError is here!")
+ /// }
+ /// }
+ ///
+ /// impl Error for SuperError {
+ /// fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// Some(&self.source)
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct SuperErrorSideKick;
+ ///
+ /// impl fmt::Display for SuperErrorSideKick {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "SuperErrorSideKick is here!")
+ /// }
+ /// }
+ ///
+ /// impl Error for SuperErrorSideKick {}
+ ///
+ /// fn get_super_error() -> Result<(), SuperError> {
+ /// Err(SuperError { source: SuperErrorSideKick })
+ /// }
+ ///
+ /// fn main() {
+ /// match get_super_error() {
+ /// Err(e) => {
+ /// println!("Error: {e}");
+ /// println!("Caused by: {}", e.source().unwrap());
+ /// }
+ /// _ => println!("No error"),
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "error_source", since = "1.30.0")]
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ None
+ }
+
+ /// Gets the `TypeId` of `self`.
+ #[doc(hidden)]
+ #[unstable(
+ feature = "error_type_id",
+ reason = "this is memory-unsafe to override in user code",
+ issue = "60784"
+ )]
+ fn type_id(&self, _: private::Internal) -> TypeId
+ where
+ Self: 'static,
+ {
+ TypeId::of::<Self>()
+ }
+
+ /// ```
+ /// if let Err(e) = "xc".parse::<u32>() {
+ /// // Print `e` itself, no need for description().
+ /// eprintln!("Error: {e}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.42.0", note = "use the Display impl or to_string()")]
+ fn description(&self) -> &str {
+ "description() is deprecated; use Display"
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.33.0",
+ note = "replaced by Error::source, which can support downcasting"
+ )]
+ #[allow(missing_docs)]
+ fn cause(&self) -> Option<&dyn Error> {
+ self.source()
+ }
+
+ /// Provides type based access to context intended for error reports.
+ ///
+ /// Used in conjunction with [`Demand::provide_value`] and [`Demand::provide_ref`] to extract
+ /// references to member variables from `dyn Error` trait objects.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// #![feature(provide_any)]
+ /// #![feature(error_generic_member_access)]
+ /// use core::fmt;
+ /// use core::any::Demand;
+ ///
+ /// #[derive(Debug)]
+ /// struct MyBacktrace {
+ /// // ...
+ /// }
+ ///
+ /// impl MyBacktrace {
+ /// fn new() -> MyBacktrace {
+ /// // ...
+ /// # MyBacktrace {}
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct SourceError {
+ /// // ...
+ /// }
+ ///
+ /// impl fmt::Display for SourceError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "Example Source Error")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SourceError {}
+ ///
+ /// #[derive(Debug)]
+ /// struct Error {
+ /// source: SourceError,
+ /// backtrace: MyBacktrace,
+ /// }
+ ///
+ /// impl fmt::Display for Error {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "Example Error")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Error {
+ /// fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ /// req
+ /// .provide_ref::<MyBacktrace>(&self.backtrace)
+ /// .provide_ref::<dyn std::error::Error + 'static>(&self.source);
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// let backtrace = MyBacktrace::new();
+ /// let source = SourceError {};
+ /// let error = Error { source, backtrace };
+ /// let dyn_error = &error as &dyn std::error::Error;
+ /// let backtrace_ref = dyn_error.request_ref::<MyBacktrace>().unwrap();
+ ///
+ /// assert!(core::ptr::eq(&error.backtrace, backtrace_ref));
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ #[allow(unused_variables)]
+ fn provide<'a>(&'a self, req: &mut Demand<'a>) {}
+}
+
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+impl<'b> Provider for dyn Error + 'b {
+ fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ self.provide(req)
+ }
+}
+
+mod private {
+ // This is a hack to prevent `type_id` from being overridden by `Error`
+ // implementations, since that can enable unsound downcasting.
+ #[unstable(feature = "error_type_id", issue = "60784")]
+ #[derive(Debug)]
+ pub struct Internal;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ Box::new(err)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ Box::new(err)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for Box<dyn Error + Send + Sync> {
+ /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: String) -> Box<dyn Error + Send + Sync> {
+ struct StringError(String);
+
+ impl Error for StringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ &self.0
+ }
+ }
+
+ impl Display for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt(&self.0, f)
+ }
+ }
+
+ // Purposefully skip printing "StringError(..)"
+ impl Debug for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Debug::fmt(&self.0, f)
+ }
+ }
+
+ Box::new(StringError(err))
+ }
+}
+
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<String> for Box<dyn Error> {
+ /// Converts a [`String`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(str_err: String) -> Box<dyn Error> {
+ let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
+ let err2: Box<dyn Error> = err1;
+ err2
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<&str> for Box<dyn Error> {
+ /// Converts a [`str`] into a box of dyn [`Error`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: &str) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'a, str>) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Error for ! {}
+
+#[unstable(
+ feature = "allocator_api",
+ reason = "the precise API and guarantees it provides may be tweaked.",
+ issue = "32838"
+)]
+impl Error for AllocError {}
+
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+impl Error for LayoutError {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for str::ParseBoolError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "failed to parse bool"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for str::Utf8Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-8: corrupt contents"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for num::ParseIntError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for num::TryFromIntError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for array::TryFromSliceError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for num::ParseFloatError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for string::FromUtf8Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-8"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for string::FromUtf16Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-16"
+ }
+}
+
+#[stable(feature = "str_parse_error2", since = "1.8.0")]
+impl Error for Infallible {
+ fn description(&self) -> &str {
+ match *self {}
+ }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl Error for char::DecodeUtf16Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "unpaired surrogate found"
+ }
+}
+
+#[stable(feature = "u8_from_char", since = "1.59.0")]
+impl Error for char::TryFromCharError {}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: Debug + Ord, V: Debug> Error
+ for crate::collections::btree_map::OccupiedError<'a, K, V>
+{
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: Debug, V: Debug> Error for crate::collections::hash_map::OccupiedError<'a, K, V> {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
+#[stable(feature = "box_error", since = "1.8.0")]
+impl<T: Error> Error for Box<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn Error> {
+ Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Error::source(&**self)
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + crate::error::Error> crate::error::Error for crate::boxed::ThinBox<T> {
+ fn source(&self) -> Option<&(dyn crate::error::Error + 'static)> {
+ use core::ops::Deref;
+ self.deref().source()
+ }
+}
+
+#[stable(feature = "error_by_ref", since = "1.51.0")]
+impl<'a, T: Error + ?Sized> Error for &'a T {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn Error> {
+ Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Error::source(&**self)
+ }
+
+ fn provide<'b>(&'b self, req: &mut Demand<'b>) {
+ Error::provide(&**self, req);
+ }
+}
+
+#[stable(feature = "arc_error", since = "1.52.0")]
+impl<T: Error + ?Sized> Error for Arc<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn Error> {
+ Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Error::source(&**self)
+ }
+
+ fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ Error::provide(&**self, req);
+ }
+}
+
+#[stable(feature = "fmt_error", since = "1.11.0")]
+impl Error for fmt::Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "an error occurred when formatting an argument"
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for cell::BorrowError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "already mutably borrowed"
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for cell::BorrowMutError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "already borrowed"
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl Error for char::CharTryFromError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "converted integer out of range for `char`"
+ }
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl Error for char::ParseCharError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl Error for alloc::collections::TryReserveError {}
+
+#[unstable(feature = "duration_checked_float", issue = "83400")]
+impl Error for time::FromFloatSecsError {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for alloc::ffi::NulError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "nul byte found in data"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<alloc::ffi::NulError> for io::Error {
+ /// Converts a [`alloc::ffi::NulError`] into a [`io::Error`].
+ fn from(_: alloc::ffi::NulError) -> io::Error {
+ io::const_io_error!(io::ErrorKind::InvalidInput, "data provided contains a nul byte")
+ }
+}
+
+#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")]
+impl Error for core::ffi::FromBytesWithNulError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+impl Error for core::ffi::FromBytesUntilNulError {}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl Error for alloc::ffi::FromVecWithNulError {}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl Error for alloc::ffi::IntoStringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "C string contained non-utf8 bytes"
+ }
+
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Some(self.__source())
+ }
+}
+
+impl<'a> dyn Error + 'a {
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&'a self) -> Option<&'a T> {
+ core::any::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&'a self) -> Option<T> {
+ core::any::request_value(self)
+ }
+}
+
+// Copied from `any.rs`.
+impl dyn Error + 'static {
+ /// Returns `true` if the inner type is the same as `T`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ // Get `TypeId` of the type this function is instantiated with.
+ let t = TypeId::of::<T>();
+
+ // Get `TypeId` of the type in the trait object (`self`).
+ let concrete = self.type_id(private::Internal);
+
+ // Compare both `TypeId`s on equality.
+ t == concrete
+ }
+
+ /// Returns some reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ if self.is::<T>() {
+ unsafe { Some(&*(self as *const dyn Error as *const T)) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the inner value if it is of type `T`, or
+ /// `None` if it isn't.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ if self.is::<T>() {
+ unsafe { Some(&mut *(self as *mut dyn Error as *mut T)) }
+ } else {
+ None
+ }
+ }
+}
+
+impl dyn Error + 'static + Send {
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <dyn Error + 'static>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ <dyn Error + 'static>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ <dyn Error + 'static>::downcast_mut::<T>(self)
+ }
+
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
+ <dyn Error>::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&self) -> Option<T> {
+ <dyn Error>::request_value(self)
+ }
+}
+
+impl dyn Error + 'static + Send + Sync {
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn is<T: Error + 'static>(&self) -> bool {
+ <dyn Error + 'static>::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
+ <dyn Error + 'static>::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `dyn Error`.
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[inline]
+ pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
+ <dyn Error + 'static>::downcast_mut::<T>(self)
+ }
+
+ /// Request a reference of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
+ <dyn Error>::request_ref(self)
+ }
+
+ /// Request a value of type `T` as context about this error.
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn request_value<T: 'static>(&self) -> Option<T> {
+ <dyn Error>::request_value(self)
+ }
+}
+
+impl dyn Error {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
+ if self.is::<T>() {
+ unsafe {
+ let raw: *mut dyn Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Returns an iterator starting with the current error and continuing with
+ /// recursively calling [`Error::source`].
+ ///
+ /// If you want to omit the current error and only use its sources,
+ /// use `skip(1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(error_iter)]
+ /// use std::error::Error;
+ /// use std::fmt;
+ ///
+ /// #[derive(Debug)]
+ /// struct A;
+ ///
+ /// #[derive(Debug)]
+ /// struct B(Option<Box<dyn Error + 'static>>);
+ ///
+ /// impl fmt::Display for A {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "A")
+ /// }
+ /// }
+ ///
+ /// impl fmt::Display for B {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "B")
+ /// }
+ /// }
+ ///
+ /// impl Error for A {}
+ ///
+ /// impl Error for B {
+ /// fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// self.0.as_ref().map(|e| e.as_ref())
+ /// }
+ /// }
+ ///
+ /// let b = B(Some(Box::new(A)));
+ ///
+ /// // let err : Box<Error> = b.into(); // or
+ /// let err = &b as &(dyn Error);
+ ///
+ /// let mut iter = err.chain();
+ ///
+ /// assert_eq!("B".to_string(), iter.next().unwrap().to_string());
+ /// assert_eq!("A".to_string(), iter.next().unwrap().to_string());
+ /// assert!(iter.next().is_none());
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[unstable(feature = "error_iter", issue = "58520")]
+ #[inline]
+ pub fn chain(&self) -> Chain<'_> {
+ Chain { current: Some(self) }
+ }
+}
+
+/// An iterator over an [`Error`] and its sources.
+///
+/// If you want to omit the initial error and only process
+/// its sources, use `skip(1)`.
+#[unstable(feature = "error_iter", issue = "58520")]
+#[derive(Clone, Debug)]
+pub struct Chain<'a> {
+ current: Option<&'a (dyn Error + 'static)>,
+}
+
+#[unstable(feature = "error_iter", issue = "58520")]
+impl<'a> Iterator for Chain<'a> {
+ type Item = &'a (dyn Error + 'static);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let current = self.current;
+ self.current = self.current.and_then(Error::source);
+ current
+ }
+}
+
+impl dyn Error + Send {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+ transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ })
+ }
+}
+
+impl dyn Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+ transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ })
+ }
+}
+
+/// An error reporter that prints an error and its sources.
+///
+/// Report also exposes configuration options for formatting the error chain, either entirely on a
+/// single line, or in multi-line format with each cause in the error chain on a new line.
+///
+/// `Report` only requires that the wrapped error implement `Error`. It doesn't require that the
+/// wrapped error be `Send`, `Sync`, or `'static`.
+///
+/// # Examples
+///
+/// ```rust
+/// #![feature(error_reporter)]
+/// use std::error::{Error, Report};
+/// use std::fmt;
+///
+/// #[derive(Debug)]
+/// struct SuperError {
+/// source: SuperErrorSideKick,
+/// }
+///
+/// impl fmt::Display for SuperError {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "SuperError is here!")
+/// }
+/// }
+///
+/// impl Error for SuperError {
+/// fn source(&self) -> Option<&(dyn Error + 'static)> {
+/// Some(&self.source)
+/// }
+/// }
+///
+/// #[derive(Debug)]
+/// struct SuperErrorSideKick;
+///
+/// impl fmt::Display for SuperErrorSideKick {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "SuperErrorSideKick is here!")
+/// }
+/// }
+///
+/// impl Error for SuperErrorSideKick {}
+///
+/// fn get_super_error() -> Result<(), SuperError> {
+/// Err(SuperError { source: SuperErrorSideKick })
+/// }
+///
+/// fn main() {
+/// match get_super_error() {
+/// Err(e) => println!("Error: {}", Report::new(e)),
+/// _ => println!("No error"),
+/// }
+/// }
+/// ```
+///
+/// This example produces the following output:
+///
+/// ```console
+/// Error: SuperError is here!: SuperErrorSideKick is here!
+/// ```
+///
+/// ## Output consistency
+///
+/// Report prints the same output via `Display` and `Debug`, so it works well with
+/// [`Result::unwrap`]/[`Result::expect`] which print their `Err` variant via `Debug`:
+///
+/// ```should_panic
+/// #![feature(error_reporter)]
+/// use std::error::Report;
+/// # use std::error::Error;
+/// # use std::fmt;
+/// # #[derive(Debug)]
+/// # struct SuperError {
+/// # source: SuperErrorSideKick,
+/// # }
+/// # impl fmt::Display for SuperError {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperError is here!")
+/// # }
+/// # }
+/// # impl Error for SuperError {
+/// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+/// # Some(&self.source)
+/// # }
+/// # }
+/// # #[derive(Debug)]
+/// # struct SuperErrorSideKick;
+/// # impl fmt::Display for SuperErrorSideKick {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperErrorSideKick is here!")
+/// # }
+/// # }
+/// # impl Error for SuperErrorSideKick {}
+/// # fn get_super_error() -> Result<(), SuperError> {
+/// # Err(SuperError { source: SuperErrorSideKick })
+/// # }
+///
+/// get_super_error().map_err(Report::new).unwrap();
+/// ```
+///
+/// This example produces the following output:
+///
+/// ```console
+/// thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: SuperError is here!: SuperErrorSideKick is here!', src/error.rs:34:40
+/// note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
+/// ```
+///
+/// ## Return from `main`
+///
+/// `Report` also implements `From` for all types that implement [`Error`]; this when combined with
+/// the `Debug` output means `Report` is an ideal starting place for formatting errors returned
+/// from `main`.
+///
+/// ```should_panic
+/// #![feature(error_reporter)]
+/// use std::error::Report;
+/// # use std::error::Error;
+/// # use std::fmt;
+/// # #[derive(Debug)]
+/// # struct SuperError {
+/// # source: SuperErrorSideKick,
+/// # }
+/// # impl fmt::Display for SuperError {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperError is here!")
+/// # }
+/// # }
+/// # impl Error for SuperError {
+/// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+/// # Some(&self.source)
+/// # }
+/// # }
+/// # #[derive(Debug)]
+/// # struct SuperErrorSideKick;
+/// # impl fmt::Display for SuperErrorSideKick {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperErrorSideKick is here!")
+/// # }
+/// # }
+/// # impl Error for SuperErrorSideKick {}
+/// # fn get_super_error() -> Result<(), SuperError> {
+/// # Err(SuperError { source: SuperErrorSideKick })
+/// # }
+///
+/// fn main() -> Result<(), Report> {
+/// get_super_error()?;
+/// Ok(())
+/// }
+/// ```
+///
+/// This example produces the following output:
+///
+/// ```console
+/// Error: SuperError is here!: SuperErrorSideKick is here!
+/// ```
+///
+/// **Note**: `Report`s constructed via `?` and `From` will be configured to use the single line
+/// output format. If you want to make sure your `Report`s are pretty printed and include backtrace
+/// you will need to manually convert and enable those flags.
+///
+/// ```should_panic
+/// #![feature(error_reporter)]
+/// use std::error::Report;
+/// # use std::error::Error;
+/// # use std::fmt;
+/// # #[derive(Debug)]
+/// # struct SuperError {
+/// # source: SuperErrorSideKick,
+/// # }
+/// # impl fmt::Display for SuperError {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperError is here!")
+/// # }
+/// # }
+/// # impl Error for SuperError {
+/// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+/// # Some(&self.source)
+/// # }
+/// # }
+/// # #[derive(Debug)]
+/// # struct SuperErrorSideKick;
+/// # impl fmt::Display for SuperErrorSideKick {
+/// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// # write!(f, "SuperErrorSideKick is here!")
+/// # }
+/// # }
+/// # impl Error for SuperErrorSideKick {}
+/// # fn get_super_error() -> Result<(), SuperError> {
+/// # Err(SuperError { source: SuperErrorSideKick })
+/// # }
+///
+/// fn main() -> Result<(), Report> {
+/// get_super_error()
+/// .map_err(Report::from)
+/// .map_err(|r| r.pretty(true).show_backtrace(true))?;
+/// Ok(())
+/// }
+/// ```
+///
+/// This example produces the following output:
+///
+/// ```console
+/// Error: SuperError is here!
+///
+/// Caused by:
+/// SuperErrorSideKick is here!
+/// ```
+#[unstable(feature = "error_reporter", issue = "90172")]
+pub struct Report<E = Box<dyn Error>> {
+ /// The error being reported.
+ error: E,
+ /// Whether a backtrace should be included as part of the report.
+ show_backtrace: bool,
+ /// Whether the report should be pretty-printed.
+ pretty: bool,
+}
+
+impl<E> Report<E>
+where
+ Report<E>: From<E>,
+{
+ /// Create a new `Report` from an input error.
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ pub fn new(error: E) -> Report<E> {
+ Self::from(error)
+ }
+}
+
+impl<E> Report<E> {
+ /// Enable pretty-printing the report across multiple lines.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(error_reporter)]
+ /// use std::error::Report;
+ /// # use std::error::Error;
+ /// # use std::fmt;
+ /// # #[derive(Debug)]
+ /// # struct SuperError {
+ /// # source: SuperErrorSideKick,
+ /// # }
+ /// # impl fmt::Display for SuperError {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperError is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperError {
+ /// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// # Some(&self.source)
+ /// # }
+ /// # }
+ /// # #[derive(Debug)]
+ /// # struct SuperErrorSideKick;
+ /// # impl fmt::Display for SuperErrorSideKick {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperErrorSideKick is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperErrorSideKick {}
+ ///
+ /// let error = SuperError { source: SuperErrorSideKick };
+ /// let report = Report::new(error).pretty(true);
+ /// eprintln!("Error: {report:?}");
+ /// ```
+ ///
+ /// This example produces the following output:
+ ///
+ /// ```console
+ /// Error: SuperError is here!
+ ///
+ /// Caused by:
+ /// SuperErrorSideKick is here!
+ /// ```
+ ///
+ /// When there are multiple source errors the causes will be numbered in order of iteration
+ /// starting from the outermost error.
+ ///
+ /// ```rust
+ /// #![feature(error_reporter)]
+ /// use std::error::Report;
+ /// # use std::error::Error;
+ /// # use std::fmt;
+ /// # #[derive(Debug)]
+ /// # struct SuperError {
+ /// # source: SuperErrorSideKick,
+ /// # }
+ /// # impl fmt::Display for SuperError {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperError is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperError {
+ /// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// # Some(&self.source)
+ /// # }
+ /// # }
+ /// # #[derive(Debug)]
+ /// # struct SuperErrorSideKick {
+ /// # source: SuperErrorSideKickSideKick,
+ /// # }
+ /// # impl fmt::Display for SuperErrorSideKick {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperErrorSideKick is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperErrorSideKick {
+ /// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// # Some(&self.source)
+ /// # }
+ /// # }
+ /// # #[derive(Debug)]
+ /// # struct SuperErrorSideKickSideKick;
+ /// # impl fmt::Display for SuperErrorSideKickSideKick {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperErrorSideKickSideKick is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperErrorSideKickSideKick { }
+ ///
+ /// let source = SuperErrorSideKickSideKick;
+ /// let source = SuperErrorSideKick { source };
+ /// let error = SuperError { source };
+ /// let report = Report::new(error).pretty(true);
+ /// eprintln!("Error: {report:?}");
+ /// ```
+ ///
+ /// This example produces the following output:
+ ///
+ /// ```console
+ /// Error: SuperError is here!
+ ///
+ /// Caused by:
+ /// 0: SuperErrorSideKick is here!
+ /// 1: SuperErrorSideKickSideKick is here!
+ /// ```
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ pub fn pretty(mut self, pretty: bool) -> Self {
+ self.pretty = pretty;
+ self
+ }
+
+ /// Display backtrace if available when using pretty output format.
+ ///
+ /// # Examples
+ ///
+ /// **Note**: Report will search for the first `Backtrace` it can find starting from the
+ /// outermost error. In this example it will display the backtrace from the second error in the
+ /// chain, `SuperErrorSideKick`.
+ ///
+ /// ```rust
+ /// #![feature(error_reporter)]
+ /// #![feature(backtrace)]
+ /// #![feature(provide_any)]
+ /// #![feature(error_generic_member_access)]
+ /// # use std::error::Error;
+ /// # use std::fmt;
+ /// use std::any::Demand;
+ /// use std::error::Report;
+ /// use std::backtrace::Backtrace;
+ ///
+ /// # #[derive(Debug)]
+ /// # struct SuperError {
+ /// # source: SuperErrorSideKick,
+ /// # }
+ /// # impl fmt::Display for SuperError {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperError is here!")
+ /// # }
+ /// # }
+ /// # impl Error for SuperError {
+ /// # fn source(&self) -> Option<&(dyn Error + 'static)> {
+ /// # Some(&self.source)
+ /// # }
+ /// # }
+ /// #[derive(Debug)]
+ /// struct SuperErrorSideKick {
+ /// backtrace: Backtrace,
+ /// }
+ ///
+ /// impl SuperErrorSideKick {
+ /// fn new() -> SuperErrorSideKick {
+ /// SuperErrorSideKick { backtrace: Backtrace::force_capture() }
+ /// }
+ /// }
+ ///
+ /// impl Error for SuperErrorSideKick {
+ /// fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ /// req
+ /// .provide_ref::<Backtrace>(&self.backtrace);
+ /// }
+ /// }
+ ///
+ /// // The rest of the example is unchanged ...
+ /// # impl fmt::Display for SuperErrorSideKick {
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # write!(f, "SuperErrorSideKick is here!")
+ /// # }
+ /// # }
+ ///
+ /// let source = SuperErrorSideKick::new();
+ /// let error = SuperError { source };
+ /// let report = Report::new(error).pretty(true).show_backtrace(true);
+ /// eprintln!("Error: {report:?}");
+ /// ```
+ ///
+ /// This example produces something similar to the following output:
+ ///
+ /// ```console
+ /// Error: SuperError is here!
+ ///
+ /// Caused by:
+ /// SuperErrorSideKick is here!
+ ///
+ /// Stack backtrace:
+ /// 0: rust_out::main::_doctest_main_src_error_rs_1158_0::SuperErrorSideKick::new
+ /// 1: rust_out::main::_doctest_main_src_error_rs_1158_0
+ /// 2: rust_out::main
+ /// 3: core::ops::function::FnOnce::call_once
+ /// 4: std::sys_common::backtrace::__rust_begin_short_backtrace
+ /// 5: std::rt::lang_start::{{closure}}
+ /// 6: std::panicking::try
+ /// 7: std::rt::lang_start_internal
+ /// 8: std::rt::lang_start
+ /// 9: main
+ /// 10: __libc_start_main
+ /// 11: _start
+ /// ```
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ pub fn show_backtrace(mut self, show_backtrace: bool) -> Self {
+ self.show_backtrace = show_backtrace;
+ self
+ }
+}
+
+impl<E> Report<E>
+where
+ E: Error,
+{
+ fn backtrace(&self) -> Option<&Backtrace> {
+ // have to grab the backtrace on the first error directly since that error may not be
+ // 'static
+ let backtrace = (&self.error as &dyn Error).request_ref();
+ let backtrace = backtrace.or_else(|| {
+ self.error
+ .source()
+ .map(|source| source.chain().find_map(|source| source.request_ref()))
+ .flatten()
+ });
+ backtrace
+ }
+
+ /// Format the report as a single line.
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ fn fmt_singleline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.error)?;
+
+ let sources = self.error.source().into_iter().flat_map(<dyn Error>::chain);
+
+ for cause in sources {
+ write!(f, ": {cause}")?;
+ }
+
+ Ok(())
+ }
+
+ /// Format the report as multiple lines, with each error cause on its own line.
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ fn fmt_multiline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let error = &self.error;
+
+ write!(f, "{error}")?;
+
+ if let Some(cause) = error.source() {
+ write!(f, "\n\nCaused by:")?;
+
+ let multiple = cause.source().is_some();
+
+ for (ind, error) in cause.chain().enumerate() {
+ writeln!(f)?;
+ let mut indented = Indented { inner: f };
+ if multiple {
+ write!(indented, "{ind: >4}: {error}")?;
+ } else {
+ write!(indented, " {error}")?;
+ }
+ }
+ }
+
+ if self.show_backtrace {
+ let backtrace = self.backtrace();
+
+ if let Some(backtrace) = backtrace {
+ let backtrace = backtrace.to_string();
+
+ f.write_str("\n\nStack backtrace:\n")?;
+ f.write_str(backtrace.trim_end())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl Report<Box<dyn Error>> {
+ fn backtrace(&self) -> Option<&Backtrace> {
+ // have to grab the backtrace on the first error directly since that error may not be
+ // 'static
+ let backtrace = self.error.request_ref();
+ let backtrace = backtrace.or_else(|| {
+ self.error
+ .source()
+ .map(|source| source.chain().find_map(|source| source.request_ref()))
+ .flatten()
+ });
+ backtrace
+ }
+
+ /// Format the report as a single line.
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ fn fmt_singleline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.error)?;
+
+ let sources = self.error.source().into_iter().flat_map(<dyn Error>::chain);
+
+ for cause in sources {
+ write!(f, ": {cause}")?;
+ }
+
+ Ok(())
+ }
+
+ /// Format the report as multiple lines, with each error cause on its own line.
+ #[unstable(feature = "error_reporter", issue = "90172")]
+ fn fmt_multiline(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let error = &self.error;
+
+ write!(f, "{error}")?;
+
+ if let Some(cause) = error.source() {
+ write!(f, "\n\nCaused by:")?;
+
+ let multiple = cause.source().is_some();
+
+ for (ind, error) in cause.chain().enumerate() {
+ writeln!(f)?;
+ let mut indented = Indented { inner: f };
+ if multiple {
+ write!(indented, "{ind: >4}: {error}")?;
+ } else {
+ write!(indented, " {error}")?;
+ }
+ }
+ }
+
+ if self.show_backtrace {
+ let backtrace = self.backtrace();
+
+ if let Some(backtrace) = backtrace {
+ let backtrace = backtrace.to_string();
+
+ f.write_str("\n\nStack backtrace:\n")?;
+ f.write_str(backtrace.trim_end())?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+#[unstable(feature = "error_reporter", issue = "90172")]
+impl<E> From<E> for Report<E>
+where
+ E: Error,
+{
+ fn from(error: E) -> Self {
+ Report { error, show_backtrace: false, pretty: false }
+ }
+}
+
+#[unstable(feature = "error_reporter", issue = "90172")]
+impl<'a, E> From<E> for Report<Box<dyn Error + 'a>>
+where
+ E: Error + 'a,
+{
+ fn from(error: E) -> Self {
+ let error = box error;
+ Report { error, show_backtrace: false, pretty: false }
+ }
+}
+
+#[unstable(feature = "error_reporter", issue = "90172")]
+impl<E> fmt::Display for Report<E>
+where
+ E: Error,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.pretty { self.fmt_multiline(f) } else { self.fmt_singleline(f) }
+ }
+}
+
+#[unstable(feature = "error_reporter", issue = "90172")]
+impl fmt::Display for Report<Box<dyn Error>> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.pretty { self.fmt_multiline(f) } else { self.fmt_singleline(f) }
+ }
+}
+
+// This type intentionally outputs the same format for `Display` and `Debug`for
+// situations where you unwrap a `Report` or return it from main.
+#[unstable(feature = "error_reporter", issue = "90172")]
+impl<E> fmt::Debug for Report<E>
+where
+ Report<E>: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+/// Wrapper type for indenting the inner source.
+struct Indented<'a, D> {
+ inner: &'a mut D,
+}
+
+impl<T> Write for Indented<'_, T>
+where
+ T: Write,
+{
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ for (i, line) in s.split('\n').enumerate() {
+ if i > 0 {
+ self.inner.write_char('\n')?;
+ self.inner.write_str(" ")?;
+ }
+
+ self.inner.write_str(line)?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/library/std/src/error/tests.rs b/library/std/src/error/tests.rs
new file mode 100644
index 000000000..ee999bd65
--- /dev/null
+++ b/library/std/src/error/tests.rs
@@ -0,0 +1,443 @@
+use super::Error;
+use crate::fmt;
+use core::any::Demand;
+
+#[derive(Debug, PartialEq)]
+struct A;
+#[derive(Debug, PartialEq)]
+struct B;
+
+impl fmt::Display for A {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "A")
+ }
+}
+impl fmt::Display for B {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "B")
+ }
+}
+
+impl Error for A {}
+impl Error for B {}
+
+#[test]
+fn downcasting() {
+ let mut a = A;
+ let a = &mut a as &mut (dyn Error + 'static);
+ assert_eq!(a.downcast_ref::<A>(), Some(&A));
+ assert_eq!(a.downcast_ref::<B>(), None);
+ assert_eq!(a.downcast_mut::<A>(), Some(&mut A));
+ assert_eq!(a.downcast_mut::<B>(), None);
+
+ let a: Box<dyn Error> = Box::new(A);
+ match a.downcast::<B>() {
+ Ok(..) => panic!("expected error"),
+ Err(e) => assert_eq!(*e.downcast::<A>().unwrap(), A),
+ }
+}
+
+use crate::backtrace::Backtrace;
+use crate::error::Report;
+
+#[derive(Debug)]
+struct SuperError {
+ source: SuperErrorSideKick,
+}
+
+impl fmt::Display for SuperError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SuperError is here!")
+ }
+}
+
+impl Error for SuperError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Some(&self.source)
+ }
+}
+
+#[derive(Debug)]
+struct SuperErrorSideKick;
+
+impl fmt::Display for SuperErrorSideKick {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SuperErrorSideKick is here!")
+ }
+}
+
+impl Error for SuperErrorSideKick {}
+
+#[test]
+fn single_line_formatting() {
+ let error = SuperError { source: SuperErrorSideKick };
+ let report = Report::new(&error);
+ let actual = report.to_string();
+ let expected = String::from("SuperError is here!: SuperErrorSideKick is here!");
+
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn multi_line_formatting() {
+ let error = SuperError { source: SuperErrorSideKick };
+ let report = Report::new(&error).pretty(true);
+ let actual = report.to_string();
+ let expected = String::from(
+ "\
+SuperError is here!
+
+Caused by:
+ SuperErrorSideKick is here!",
+ );
+
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn error_with_no_sources_formats_single_line_correctly() {
+ let report = Report::new(SuperErrorSideKick);
+ let actual = report.to_string();
+ let expected = String::from("SuperErrorSideKick is here!");
+
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn error_with_no_sources_formats_multi_line_correctly() {
+ let report = Report::new(SuperErrorSideKick).pretty(true);
+ let actual = report.to_string();
+ let expected = String::from("SuperErrorSideKick is here!");
+
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn error_with_backtrace_outputs_correctly_with_one_source() {
+ let trace = Backtrace::force_capture();
+ let expected = format!(
+ "\
+The source of the error
+
+Caused by:
+ Error with backtrace
+
+Stack backtrace:
+{}",
+ trace
+ );
+ let error = GenericError::new("Error with backtrace");
+ let mut error = GenericError::new_with_source("The source of the error", error);
+ error.backtrace = Some(trace);
+ let report = Report::new(error).pretty(true).show_backtrace(true);
+
+ println!("Error: {report}");
+ assert_eq!(expected.trim_end(), report.to_string());
+}
+
+#[test]
+fn error_with_backtrace_outputs_correctly_with_two_sources() {
+ let trace = Backtrace::force_capture();
+ let expected = format!(
+ "\
+Error with two sources
+
+Caused by:
+ 0: The source of the error
+ 1: Error with backtrace
+
+Stack backtrace:
+{}",
+ trace
+ );
+ let mut error = GenericError::new("Error with backtrace");
+ error.backtrace = Some(trace);
+ let error = GenericError::new_with_source("The source of the error", error);
+ let error = GenericError::new_with_source("Error with two sources", error);
+ let report = Report::new(error).pretty(true).show_backtrace(true);
+
+ println!("Error: {report}");
+ assert_eq!(expected.trim_end(), report.to_string());
+}
+
+#[derive(Debug)]
+struct GenericError<D> {
+ message: D,
+ backtrace: Option<Backtrace>,
+ source: Option<Box<dyn Error + 'static>>,
+}
+
+impl<D> GenericError<D> {
+ fn new(message: D) -> GenericError<D> {
+ Self { message, backtrace: None, source: None }
+ }
+
+ fn new_with_source<E>(message: D, source: E) -> GenericError<D>
+ where
+ E: Error + 'static,
+ {
+ let source: Box<dyn Error + 'static> = Box::new(source);
+ let source = Some(source);
+ GenericError { message, backtrace: None, source }
+ }
+}
+
+impl<D> fmt::Display for GenericError<D>
+where
+ D: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.message, f)
+ }
+}
+
+impl<D> Error for GenericError<D>
+where
+ D: fmt::Debug + fmt::Display,
+{
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ self.source.as_deref()
+ }
+
+ fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ self.backtrace.as_ref().map(|bt| req.provide_ref::<Backtrace>(bt));
+ }
+}
+
+#[test]
+fn error_formats_single_line_with_rude_display_impl() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("line 1\nline 2")?;
+ f.write_str("\nline 3\nline 4\n")?;
+ f.write_str("line 5\nline 6")?;
+ Ok(())
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error);
+ let expected = "\
+line 1
+line 2
+line 3
+line 4
+line 5
+line 6: line 1
+line 2
+line 3
+line 4
+line 5
+line 6: line 1
+line 2
+line 3
+line 4
+line 5
+line 6: line 1
+line 2
+line 3
+line 4
+line 5
+line 6";
+
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn error_formats_multi_line_with_rude_display_impl() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("line 1\nline 2")?;
+ f.write_str("\nline 3\nline 4\n")?;
+ f.write_str("line 5\nline 6")?;
+ Ok(())
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error).pretty(true);
+ let expected = "line 1
+line 2
+line 3
+line 4
+line 5
+line 6
+
+Caused by:
+ 0: line 1
+ line 2
+ line 3
+ line 4
+ line 5
+ line 6
+ 1: line 1
+ line 2
+ line 3
+ line 4
+ line 5
+ line 6
+ 2: line 1
+ line 2
+ line 3
+ line 4
+ line 5
+ line 6";
+
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn errors_that_start_with_newline_formats_correctly() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("\nThe message\n")
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error).pretty(true);
+ let expected = "
+The message
+
+
+Caused by:
+ 0: \
+\n The message
+ \
+\n 1: \
+\n The message
+ ";
+
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn errors_with_multiple_writes_on_same_line_dont_insert_erroneous_newlines() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("The message")?;
+ f.write_str(" goes on")?;
+ f.write_str(" and on.")
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error).pretty(true);
+ let expected = "\
+The message goes on and on.
+
+Caused by:
+ 0: The message goes on and on.
+ 1: The message goes on and on.";
+
+ let actual = report.to_string();
+ println!("{actual}");
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn errors_with_string_interpolation_formats_correctly() {
+ #[derive(Debug)]
+ struct MyMessage(usize);
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Got an error code: ({}). ", self.0)?;
+ write!(f, "What would you like to do in response?")
+ }
+ }
+
+ let error = GenericError::new(MyMessage(10));
+ let error = GenericError::new_with_source(MyMessage(20), error);
+ let report = Report::new(error).pretty(true);
+ let expected = "\
+Got an error code: (20). What would you like to do in response?
+
+Caused by:
+ Got an error code: (10). What would you like to do in response?";
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn empty_lines_mid_message() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("line 1\n\nline 2")
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error).pretty(true);
+ let expected = "\
+line 1
+
+line 2
+
+Caused by:
+ 0: line 1
+ \
+\n line 2
+ 1: line 1
+ \
+\n line 2";
+
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn only_one_source() {
+ #[derive(Debug)]
+ struct MyMessage;
+
+ impl fmt::Display for MyMessage {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("line 1\nline 2")
+ }
+ }
+
+ let error = GenericError::new(MyMessage);
+ let error = GenericError::new_with_source(MyMessage, error);
+ let report = Report::new(error).pretty(true);
+ let expected = "\
+line 1
+line 2
+
+Caused by:
+ line 1
+ line 2";
+
+ let actual = report.to_string();
+ assert_eq!(expected, actual);
+}
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
new file mode 100644
index 000000000..933b52b4d
--- /dev/null
+++ b/library/std/src/f32.rs
@@ -0,0 +1,923 @@
+//! Constants specific to the `f32` single-precision floating point type.
+//!
+//! *[See also the `f32` primitive type](primitive@f32).*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! For the constants defined directly in this module
+//! (as distinct from those defined in the `consts` sub-module),
+//! new code should instead use the associated constants
+//! defined directly on the `f32` type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![allow(missing_docs)]
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(not(test))]
+use crate::intrinsics;
+#[cfg(not(test))]
+use crate::sys::cmath;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::f32::{
+ consts, DIGITS, EPSILON, INFINITY, MANTISSA_DIGITS, MAX, MAX_10_EXP, MAX_EXP, MIN, MIN_10_EXP,
+ MIN_EXP, MIN_POSITIVE, NAN, NEG_INFINITY, RADIX,
+};
+
+#[cfg(not(test))]
+impl f32 {
+ /// Returns the largest integer less than or equal to `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.7_f32;
+ /// let g = 3.0_f32;
+ /// let h = -3.7_f32;
+ ///
+ /// assert_eq!(f.floor(), 3.0);
+ /// assert_eq!(g.floor(), 3.0);
+ /// assert_eq!(h.floor(), -4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn floor(self) -> f32 {
+ unsafe { intrinsics::floorf32(self) }
+ }
+
+ /// Returns the smallest integer greater than or equal to `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.01_f32;
+ /// let g = 4.0_f32;
+ ///
+ /// assert_eq!(f.ceil(), 4.0);
+ /// assert_eq!(g.ceil(), 4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ceil(self) -> f32 {
+ unsafe { intrinsics::ceilf32(self) }
+ }
+
+ /// Returns the nearest integer to `self`. Round half-way cases away from
+ /// `0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.3_f32;
+ /// let g = -3.3_f32;
+ ///
+ /// assert_eq!(f.round(), 3.0);
+ /// assert_eq!(g.round(), -3.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn round(self) -> f32 {
+ unsafe { intrinsics::roundf32(self) }
+ }
+
+ /// Returns the integer part of `self`.
+ /// This means that non-integer numbers are always truncated towards zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.7_f32;
+ /// let g = 3.0_f32;
+ /// let h = -3.7_f32;
+ ///
+ /// assert_eq!(f.trunc(), 3.0);
+ /// assert_eq!(g.trunc(), 3.0);
+ /// assert_eq!(h.trunc(), -3.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn trunc(self) -> f32 {
+ unsafe { intrinsics::truncf32(self) }
+ }
+
+ /// Returns the fractional part of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.6_f32;
+ /// let y = -3.6_f32;
+ /// let abs_difference_x = (x.fract() - 0.6).abs();
+ /// let abs_difference_y = (y.fract() - (-0.6)).abs();
+ ///
+ /// assert!(abs_difference_x <= f32::EPSILON);
+ /// assert!(abs_difference_y <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn fract(self) -> f32 {
+ self - self.trunc()
+ }
+
+ /// Computes the absolute value of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.5_f32;
+ /// let y = -3.5_f32;
+ ///
+ /// let abs_difference_x = (x.abs() - x).abs();
+ /// let abs_difference_y = (y.abs() - (-y)).abs();
+ ///
+ /// assert!(abs_difference_x <= f32::EPSILON);
+ /// assert!(abs_difference_y <= f32::EPSILON);
+ ///
+ /// assert!(f32::NAN.abs().is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn abs(self) -> f32 {
+ unsafe { intrinsics::fabsf32(self) }
+ }
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
+ /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+ /// - NaN if the number is NaN
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.5_f32;
+ ///
+ /// assert_eq!(f.signum(), 1.0);
+ /// assert_eq!(f32::NEG_INFINITY.signum(), -1.0);
+ ///
+ /// assert!(f32::NAN.signum().is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn signum(self) -> f32 {
+ if self.is_nan() { Self::NAN } else { 1.0_f32.copysign(self) }
+ }
+
+ /// Returns a number composed of the magnitude of `self` and the sign of
+ /// `sign`.
+ ///
+ /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise
+ /// equal to `-self`. If `self` is a NaN, then a NaN with the sign bit of
+ /// `sign` is returned. Note, however, that conserving the sign bit on NaN
+ /// across arithmetical operations is not generally guaranteed.
+ /// See [explanation of NaN as a special value](primitive@f32) for more info.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.5_f32;
+ ///
+ /// assert_eq!(f.copysign(0.42), 3.5_f32);
+ /// assert_eq!(f.copysign(-0.42), -3.5_f32);
+ /// assert_eq!((-f).copysign(0.42), 3.5_f32);
+ /// assert_eq!((-f).copysign(-0.42), -3.5_f32);
+ ///
+ /// assert!(f32::NAN.copysign(1.0).is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[inline]
+ #[stable(feature = "copysign", since = "1.35.0")]
+ pub fn copysign(self, sign: f32) -> f32 {
+ unsafe { intrinsics::copysignf32(self, sign) }
+ }
+
+ /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+ /// error, yielding a more accurate result than an unfused multiply-add.
+ ///
+ /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction. However,
+ /// this is not always true, and will be heavily dependant on designing
+ /// algorithms with specific target hardware in mind.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let m = 10.0_f32;
+ /// let x = 4.0_f32;
+ /// let b = 60.0_f32;
+ ///
+ /// // 100.0
+ /// let abs_difference = (m.mul_add(x, b) - ((m * x) + b)).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn mul_add(self, a: f32, b: f32) -> f32 {
+ unsafe { intrinsics::fmaf32(self, a, b) }
+ }
+
+ /// Calculates Euclidean division, the matching method for `rem_euclid`.
+ ///
+ /// This computes the integer `n` such that
+ /// `self = n * rhs + self.rem_euclid(rhs)`.
+ /// In other words, the result is `self / rhs` rounded to the integer `n`
+ /// such that `self >= n * rhs`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a: f32 = 7.0;
+ /// let b = 4.0;
+ /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
+ /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
+ /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
+ /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ pub fn div_euclid(self, rhs: f32) -> f32 {
+ let q = (self / rhs).trunc();
+ if self % rhs < 0.0 {
+ return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ /// Calculates the least nonnegative remainder of `self (mod rhs)`.
+ ///
+ /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
+ /// most cases. However, due to a floating point round-off error it can
+ /// result in `r == rhs.abs()`, violating the mathematical definition, if
+ /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
+ /// This result is not an element of the function's codomain, but it is the
+ /// closest floating point number in the real numbers and thus fulfills the
+ /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
+ /// approximatively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a: f32 = 7.0;
+ /// let b = 4.0;
+ /// assert_eq!(a.rem_euclid(b), 3.0);
+ /// assert_eq!((-a).rem_euclid(b), 1.0);
+ /// assert_eq!(a.rem_euclid(-b), 3.0);
+ /// assert_eq!((-a).rem_euclid(-b), 1.0);
+ /// // limitation due to round-off error
+ /// assert!((-f32::EPSILON).rem_euclid(3.0) != 0.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ pub fn rem_euclid(self, rhs: f32) -> f32 {
+ let r = self % rhs;
+ if r < 0.0 { r + rhs.abs() } else { r }
+ }
+
+ /// Raises a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`.
+ /// It might have a different sequence of rounding operations than `powf`,
+ /// so the results are not guaranteed to agree.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0_f32;
+ /// let abs_difference = (x.powi(2) - (x * x)).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn powi(self, n: i32) -> f32 {
+ unsafe { intrinsics::powif32(self, n) }
+ }
+
+ /// Raises a number to a floating point power.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0_f32;
+ /// let abs_difference = (x.powf(2.0) - (x * x)).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn powf(self, n: f32) -> f32 {
+ unsafe { intrinsics::powf32(self, n) }
+ }
+
+ /// Returns the square root of a number.
+ ///
+ /// Returns NaN if `self` is a negative number other than `-0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let positive = 4.0_f32;
+ /// let negative = -4.0_f32;
+ /// let negative_zero = -0.0_f32;
+ ///
+ /// let abs_difference = (positive.sqrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// assert!(negative.sqrt().is_nan());
+ /// assert!(negative_zero.sqrt() == negative_zero);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sqrt(self) -> f32 {
+ unsafe { intrinsics::sqrtf32(self) }
+ }
+
+ /// Returns `e^(self)`, (the exponential function).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one = 1.0f32;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp(self) -> f32 {
+ unsafe { intrinsics::expf32(self) }
+ }
+
+ /// Returns `2^(self)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 2.0f32;
+ ///
+ /// // 2^2 - 4 == 0
+ /// let abs_difference = (f.exp2() - 4.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp2(self) -> f32 {
+ unsafe { intrinsics::exp2f32(self) }
+ }
+
+ /// Returns the natural logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one = 1.0f32;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ln(self) -> f32 {
+ unsafe { intrinsics::logf32(self) }
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base.
+ ///
+ /// The result might not be correctly rounded owing to implementation details;
+ /// `self.log2()` can produce more accurate results for base 2, and
+ /// `self.log10()` can produce more accurate results for base 10.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let five = 5.0f32;
+ ///
+ /// // log5(5) - 1 == 0
+ /// let abs_difference = (five.log(5.0) - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log(self, base: f32) -> f32 {
+ self.ln() / base.ln()
+ }
+
+ /// Returns the base 2 logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let two = 2.0f32;
+ ///
+ /// // log2(2) - 1 == 0
+ /// let abs_difference = (two.log2() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log2(self) -> f32 {
+ #[cfg(target_os = "android")]
+ return crate::sys::android::log2f32(self);
+ #[cfg(not(target_os = "android"))]
+ return unsafe { intrinsics::log2f32(self) };
+ }
+
+ /// Returns the base 10 logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ten = 10.0f32;
+ ///
+ /// // log10(10) - 1 == 0
+ /// let abs_difference = (ten.log10() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log10(self) -> f32 {
+ unsafe { intrinsics::log10f32(self) }
+ }
+
+ /// The positive difference of two numbers.
+ ///
+ /// * If `self <= other`: `0:0`
+ /// * Else: `self - other`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.0f32;
+ /// let y = -3.0f32;
+ ///
+ /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs();
+ /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs();
+ ///
+ /// assert!(abs_difference_x <= f32::EPSILON);
+ /// assert!(abs_difference_y <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[deprecated(
+ since = "1.10.0",
+ note = "you probably meant `(self - other).abs()`: \
+ this operation is `(self - other).max(0.0)` \
+ except that `abs_sub` also propagates NaNs (also \
+ known as `fdimf` in C). If you truly need the positive \
+ difference, consider using that expression or the C function \
+ `fdimf`, depending on how you wish to handle NaN (please consider \
+ filing an issue describing your use-case too)."
+ )]
+ pub fn abs_sub(self, other: f32) -> f32 {
+ unsafe { cmath::fdimf(self, other) }
+ }
+
+ /// Returns the cube root of a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 8.0f32;
+ ///
+ /// // x^(1/3) - 2 == 0
+ /// let abs_difference = (x.cbrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cbrt(self) -> f32 {
+ unsafe { cmath::cbrtf(self) }
+ }
+
+ /// Calculates the length of the hypotenuse of a right-angle triangle given
+ /// legs of length `x` and `y`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0f32;
+ /// let y = 3.0f32;
+ ///
+ /// // sqrt(x^2 + y^2)
+ /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn hypot(self, other: f32) -> f32 {
+ unsafe { cmath::hypotf(self, other) }
+ }
+
+ /// Computes the sine of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f32::consts::FRAC_PI_2;
+ ///
+ /// let abs_difference = (x.sin() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sin(self) -> f32 {
+ unsafe { intrinsics::sinf32(self) }
+ }
+
+ /// Computes the cosine of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0 * std::f32::consts::PI;
+ ///
+ /// let abs_difference = (x.cos() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cos(self) -> f32 {
+ unsafe { intrinsics::cosf32(self) }
+ }
+
+ /// Computes the tangent of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f32::consts::FRAC_PI_4;
+ /// let abs_difference = (x.tan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn tan(self) -> f32 {
+ unsafe { cmath::tanf(self) }
+ }
+
+ /// Computes the arcsine of a number. Return value is in radians in
+ /// the range [-pi/2, pi/2] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = std::f32::consts::FRAC_PI_2;
+ ///
+ /// // asin(sin(pi/2))
+ /// let abs_difference = (f.sin().asin() - std::f32::consts::FRAC_PI_2).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn asin(self) -> f32 {
+ unsafe { cmath::asinf(self) }
+ }
+
+ /// Computes the arccosine of a number. Return value is in radians in
+ /// the range [0, pi] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = std::f32::consts::FRAC_PI_4;
+ ///
+ /// // acos(cos(pi/4))
+ /// let abs_difference = (f.cos().acos() - std::f32::consts::FRAC_PI_4).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn acos(self) -> f32 {
+ unsafe { cmath::acosf(self) }
+ }
+
+ /// Computes the arctangent of a number. Return value is in radians in the
+ /// range [-pi/2, pi/2];
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 1.0f32;
+ ///
+ /// // atan(tan(1))
+ /// let abs_difference = (f.tan().atan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atan(self) -> f32 {
+ unsafe { cmath::atanf(self) }
+ }
+
+ /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`) in radians.
+ ///
+ /// * `x = 0`, `y = 0`: `0`
+ /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]`
+ /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]`
+ /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Positive angles measured counter-clockwise
+ /// // from positive x axis
+ /// // -pi/4 radians (45 deg clockwise)
+ /// let x1 = 3.0f32;
+ /// let y1 = -3.0f32;
+ ///
+ /// // 3pi/4 radians (135 deg counter-clockwise)
+ /// let x2 = -3.0f32;
+ /// let y2 = 3.0f32;
+ ///
+ /// let abs_difference_1 = (y1.atan2(x1) - (-std::f32::consts::FRAC_PI_4)).abs();
+ /// let abs_difference_2 = (y2.atan2(x2) - (3.0 * std::f32::consts::FRAC_PI_4)).abs();
+ ///
+ /// assert!(abs_difference_1 <= f32::EPSILON);
+ /// assert!(abs_difference_2 <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atan2(self, other: f32) -> f32 {
+ unsafe { cmath::atan2f(self, other) }
+ }
+
+ /// Simultaneously computes the sine and cosine of the number, `x`. Returns
+ /// `(sin(x), cos(x))`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f32::consts::FRAC_PI_4;
+ /// let f = x.sin_cos();
+ ///
+ /// let abs_difference_0 = (f.0 - x.sin()).abs();
+ /// let abs_difference_1 = (f.1 - x.cos()).abs();
+ ///
+ /// assert!(abs_difference_0 <= f32::EPSILON);
+ /// assert!(abs_difference_1 <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sin_cos(self) -> (f32, f32) {
+ (self.sin(), self.cos())
+ }
+
+ /// Returns `e^(self) - 1` in a way that is accurate even if the
+ /// number is close to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1e-8_f32;
+ ///
+ /// // for very small x, e^x is approximately 1 + x + x^2 / 2
+ /// let approx = x + x * x / 2.0;
+ /// let abs_difference = (x.exp_m1() - approx).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp_m1(self) -> f32 {
+ unsafe { cmath::expm1f(self) }
+ }
+
+ /// Returns `ln(1+n)` (natural logarithm) more accurately than if
+ /// the operations were performed separately.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1e-8_f32;
+ ///
+ /// // for very small x, ln(1 + x) is approximately x - x^2 / 2
+ /// let approx = x - x * x / 2.0;
+ /// let abs_difference = (x.ln_1p() - approx).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ln_1p(self) -> f32 {
+ unsafe { cmath::log1pf(self) }
+ }
+
+ /// Hyperbolic sine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f32::consts::E;
+ /// let x = 1.0f32;
+ ///
+ /// let f = x.sinh();
+ /// // Solving sinh() at 1 gives `(e^2-1)/(2e)`
+ /// let g = ((e * e) - 1.0) / (2.0 * e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sinh(self) -> f32 {
+ unsafe { cmath::sinhf(self) }
+ }
+
+ /// Hyperbolic cosine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f32::consts::E;
+ /// let x = 1.0f32;
+ /// let f = x.cosh();
+ /// // Solving cosh() at 1 gives this result
+ /// let g = ((e * e) + 1.0) / (2.0 * e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// // Same result
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cosh(self) -> f32 {
+ unsafe { cmath::coshf(self) }
+ }
+
+ /// Hyperbolic tangent function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f32::consts::E;
+ /// let x = 1.0f32;
+ ///
+ /// let f = x.tanh();
+ /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))`
+ /// let g = (1.0 - e.powi(-2)) / (1.0 + e.powi(-2));
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn tanh(self) -> f32 {
+ unsafe { cmath::tanhf(self) }
+ }
+
+ /// Inverse hyperbolic sine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let f = x.sinh().asinh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn asinh(self) -> f32 {
+ (self.abs() + ((self * self) + 1.0).sqrt()).ln().copysign(self)
+ }
+
+ /// Inverse hyperbolic cosine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let f = x.cosh().acosh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn acosh(self) -> f32 {
+ if self < 1.0 { Self::NAN } else { (self + ((self * self) - 1.0).sqrt()).ln() }
+ }
+
+ /// Inverse hyperbolic tangent function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f32::consts::E;
+ /// let f = e.tanh().atanh();
+ ///
+ /// let abs_difference = (f - e).abs();
+ ///
+ /// assert!(abs_difference <= 1e-5);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atanh(self) -> f32 {
+ 0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
+ }
+}
diff --git a/library/std/src/f32/tests.rs b/library/std/src/f32/tests.rs
new file mode 100644
index 000000000..69fa203ff
--- /dev/null
+++ b/library/std/src/f32/tests.rs
@@ -0,0 +1,771 @@
+use crate::f32::consts;
+use crate::num::FpCategory as Fp;
+use crate::num::*;
+
+#[test]
+fn test_num_f32() {
+ test_num(10f32, 2f32);
+}
+
+#[test]
+fn test_min_nan() {
+ assert_eq!(f32::NAN.min(2.0), 2.0);
+ assert_eq!(2.0f32.min(f32::NAN), 2.0);
+}
+
+#[test]
+fn test_max_nan() {
+ assert_eq!(f32::NAN.max(2.0), 2.0);
+ assert_eq!(2.0f32.max(f32::NAN), 2.0);
+}
+
+#[test]
+fn test_minimum() {
+ assert!(f32::NAN.minimum(2.0).is_nan());
+ assert!(2.0f32.minimum(f32::NAN).is_nan());
+}
+
+#[test]
+fn test_maximum() {
+ assert!(f32::NAN.maximum(2.0).is_nan());
+ assert!(2.0f32.maximum(f32::NAN).is_nan());
+}
+
+#[test]
+fn test_nan() {
+ let nan: f32 = f32::NAN;
+ assert!(nan.is_nan());
+ assert!(!nan.is_infinite());
+ assert!(!nan.is_finite());
+ assert!(!nan.is_normal());
+ assert!(nan.is_sign_positive());
+ assert!(!nan.is_sign_negative());
+ assert_eq!(Fp::Nan, nan.classify());
+}
+
+#[test]
+fn test_infinity() {
+ let inf: f32 = f32::INFINITY;
+ assert!(inf.is_infinite());
+ assert!(!inf.is_finite());
+ assert!(inf.is_sign_positive());
+ assert!(!inf.is_sign_negative());
+ assert!(!inf.is_nan());
+ assert!(!inf.is_normal());
+ assert_eq!(Fp::Infinite, inf.classify());
+}
+
+#[test]
+fn test_neg_infinity() {
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert!(neg_inf.is_infinite());
+ assert!(!neg_inf.is_finite());
+ assert!(!neg_inf.is_sign_positive());
+ assert!(neg_inf.is_sign_negative());
+ assert!(!neg_inf.is_nan());
+ assert!(!neg_inf.is_normal());
+ assert_eq!(Fp::Infinite, neg_inf.classify());
+}
+
+#[test]
+fn test_zero() {
+ let zero: f32 = 0.0f32;
+ assert_eq!(0.0, zero);
+ assert!(!zero.is_infinite());
+ assert!(zero.is_finite());
+ assert!(zero.is_sign_positive());
+ assert!(!zero.is_sign_negative());
+ assert!(!zero.is_nan());
+ assert!(!zero.is_normal());
+ assert_eq!(Fp::Zero, zero.classify());
+}
+
+#[test]
+fn test_neg_zero() {
+ let neg_zero: f32 = -0.0;
+ assert_eq!(0.0, neg_zero);
+ assert!(!neg_zero.is_infinite());
+ assert!(neg_zero.is_finite());
+ assert!(!neg_zero.is_sign_positive());
+ assert!(neg_zero.is_sign_negative());
+ assert!(!neg_zero.is_nan());
+ assert!(!neg_zero.is_normal());
+ assert_eq!(Fp::Zero, neg_zero.classify());
+}
+
+#[test]
+fn test_one() {
+ let one: f32 = 1.0f32;
+ assert_eq!(1.0, one);
+ assert!(!one.is_infinite());
+ assert!(one.is_finite());
+ assert!(one.is_sign_positive());
+ assert!(!one.is_sign_negative());
+ assert!(!one.is_nan());
+ assert!(one.is_normal());
+ assert_eq!(Fp::Normal, one.classify());
+}
+
+#[test]
+fn test_is_nan() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert!(nan.is_nan());
+ assert!(!0.0f32.is_nan());
+ assert!(!5.3f32.is_nan());
+ assert!(!(-10.732f32).is_nan());
+ assert!(!inf.is_nan());
+ assert!(!neg_inf.is_nan());
+}
+
+#[test]
+fn test_is_infinite() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert!(!nan.is_infinite());
+ assert!(inf.is_infinite());
+ assert!(neg_inf.is_infinite());
+ assert!(!0.0f32.is_infinite());
+ assert!(!42.8f32.is_infinite());
+ assert!(!(-109.2f32).is_infinite());
+}
+
+#[test]
+fn test_is_finite() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert!(!nan.is_finite());
+ assert!(!inf.is_finite());
+ assert!(!neg_inf.is_finite());
+ assert!(0.0f32.is_finite());
+ assert!(42.8f32.is_finite());
+ assert!((-109.2f32).is_finite());
+}
+
+#[test]
+fn test_is_normal() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let zero: f32 = 0.0f32;
+ let neg_zero: f32 = -0.0;
+ assert!(!nan.is_normal());
+ assert!(!inf.is_normal());
+ assert!(!neg_inf.is_normal());
+ assert!(!zero.is_normal());
+ assert!(!neg_zero.is_normal());
+ assert!(1f32.is_normal());
+ assert!(1e-37f32.is_normal());
+ assert!(!1e-38f32.is_normal());
+}
+
+#[test]
+fn test_classify() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let zero: f32 = 0.0f32;
+ let neg_zero: f32 = -0.0;
+ assert_eq!(nan.classify(), Fp::Nan);
+ assert_eq!(inf.classify(), Fp::Infinite);
+ assert_eq!(neg_inf.classify(), Fp::Infinite);
+ assert_eq!(zero.classify(), Fp::Zero);
+ assert_eq!(neg_zero.classify(), Fp::Zero);
+ assert_eq!(1f32.classify(), Fp::Normal);
+ assert_eq!(1e-37f32.classify(), Fp::Normal);
+ assert_eq!(1e-38f32.classify(), Fp::Subnormal);
+}
+
+#[test]
+fn test_floor() {
+ assert_approx_eq!(1.0f32.floor(), 1.0f32);
+ assert_approx_eq!(1.3f32.floor(), 1.0f32);
+ assert_approx_eq!(1.5f32.floor(), 1.0f32);
+ assert_approx_eq!(1.7f32.floor(), 1.0f32);
+ assert_approx_eq!(0.0f32.floor(), 0.0f32);
+ assert_approx_eq!((-0.0f32).floor(), -0.0f32);
+ assert_approx_eq!((-1.0f32).floor(), -1.0f32);
+ assert_approx_eq!((-1.3f32).floor(), -2.0f32);
+ assert_approx_eq!((-1.5f32).floor(), -2.0f32);
+ assert_approx_eq!((-1.7f32).floor(), -2.0f32);
+}
+
+#[test]
+fn test_ceil() {
+ assert_approx_eq!(1.0f32.ceil(), 1.0f32);
+ assert_approx_eq!(1.3f32.ceil(), 2.0f32);
+ assert_approx_eq!(1.5f32.ceil(), 2.0f32);
+ assert_approx_eq!(1.7f32.ceil(), 2.0f32);
+ assert_approx_eq!(0.0f32.ceil(), 0.0f32);
+ assert_approx_eq!((-0.0f32).ceil(), -0.0f32);
+ assert_approx_eq!((-1.0f32).ceil(), -1.0f32);
+ assert_approx_eq!((-1.3f32).ceil(), -1.0f32);
+ assert_approx_eq!((-1.5f32).ceil(), -1.0f32);
+ assert_approx_eq!((-1.7f32).ceil(), -1.0f32);
+}
+
+#[test]
+fn test_round() {
+ assert_approx_eq!(1.0f32.round(), 1.0f32);
+ assert_approx_eq!(1.3f32.round(), 1.0f32);
+ assert_approx_eq!(1.5f32.round(), 2.0f32);
+ assert_approx_eq!(1.7f32.round(), 2.0f32);
+ assert_approx_eq!(0.0f32.round(), 0.0f32);
+ assert_approx_eq!((-0.0f32).round(), -0.0f32);
+ assert_approx_eq!((-1.0f32).round(), -1.0f32);
+ assert_approx_eq!((-1.3f32).round(), -1.0f32);
+ assert_approx_eq!((-1.5f32).round(), -2.0f32);
+ assert_approx_eq!((-1.7f32).round(), -2.0f32);
+}
+
+#[test]
+fn test_trunc() {
+ assert_approx_eq!(1.0f32.trunc(), 1.0f32);
+ assert_approx_eq!(1.3f32.trunc(), 1.0f32);
+ assert_approx_eq!(1.5f32.trunc(), 1.0f32);
+ assert_approx_eq!(1.7f32.trunc(), 1.0f32);
+ assert_approx_eq!(0.0f32.trunc(), 0.0f32);
+ assert_approx_eq!((-0.0f32).trunc(), -0.0f32);
+ assert_approx_eq!((-1.0f32).trunc(), -1.0f32);
+ assert_approx_eq!((-1.3f32).trunc(), -1.0f32);
+ assert_approx_eq!((-1.5f32).trunc(), -1.0f32);
+ assert_approx_eq!((-1.7f32).trunc(), -1.0f32);
+}
+
+#[test]
+fn test_fract() {
+ assert_approx_eq!(1.0f32.fract(), 0.0f32);
+ assert_approx_eq!(1.3f32.fract(), 0.3f32);
+ assert_approx_eq!(1.5f32.fract(), 0.5f32);
+ assert_approx_eq!(1.7f32.fract(), 0.7f32);
+ assert_approx_eq!(0.0f32.fract(), 0.0f32);
+ assert_approx_eq!((-0.0f32).fract(), -0.0f32);
+ assert_approx_eq!((-1.0f32).fract(), -0.0f32);
+ assert_approx_eq!((-1.3f32).fract(), -0.3f32);
+ assert_approx_eq!((-1.5f32).fract(), -0.5f32);
+ assert_approx_eq!((-1.7f32).fract(), -0.7f32);
+}
+
+#[test]
+fn test_abs() {
+ assert_eq!(f32::INFINITY.abs(), f32::INFINITY);
+ assert_eq!(1f32.abs(), 1f32);
+ assert_eq!(0f32.abs(), 0f32);
+ assert_eq!((-0f32).abs(), 0f32);
+ assert_eq!((-1f32).abs(), 1f32);
+ assert_eq!(f32::NEG_INFINITY.abs(), f32::INFINITY);
+ assert_eq!((1f32 / f32::NEG_INFINITY).abs(), 0f32);
+ assert!(f32::NAN.abs().is_nan());
+}
+
+#[test]
+fn test_signum() {
+ assert_eq!(f32::INFINITY.signum(), 1f32);
+ assert_eq!(1f32.signum(), 1f32);
+ assert_eq!(0f32.signum(), 1f32);
+ assert_eq!((-0f32).signum(), -1f32);
+ assert_eq!((-1f32).signum(), -1f32);
+ assert_eq!(f32::NEG_INFINITY.signum(), -1f32);
+ assert_eq!((1f32 / f32::NEG_INFINITY).signum(), -1f32);
+ assert!(f32::NAN.signum().is_nan());
+}
+
+#[test]
+fn test_is_sign_positive() {
+ assert!(f32::INFINITY.is_sign_positive());
+ assert!(1f32.is_sign_positive());
+ assert!(0f32.is_sign_positive());
+ assert!(!(-0f32).is_sign_positive());
+ assert!(!(-1f32).is_sign_positive());
+ assert!(!f32::NEG_INFINITY.is_sign_positive());
+ assert!(!(1f32 / f32::NEG_INFINITY).is_sign_positive());
+ assert!(f32::NAN.is_sign_positive());
+ assert!(!(-f32::NAN).is_sign_positive());
+}
+
+#[test]
+fn test_is_sign_negative() {
+ assert!(!f32::INFINITY.is_sign_negative());
+ assert!(!1f32.is_sign_negative());
+ assert!(!0f32.is_sign_negative());
+ assert!((-0f32).is_sign_negative());
+ assert!((-1f32).is_sign_negative());
+ assert!(f32::NEG_INFINITY.is_sign_negative());
+ assert!((1f32 / f32::NEG_INFINITY).is_sign_negative());
+ assert!(!f32::NAN.is_sign_negative());
+ assert!((-f32::NAN).is_sign_negative());
+}
+
+#[test]
+fn test_mul_add() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_approx_eq!(12.3f32.mul_add(4.5, 6.7), 62.05);
+ assert_approx_eq!((-12.3f32).mul_add(-4.5, -6.7), 48.65);
+ assert_approx_eq!(0.0f32.mul_add(8.9, 1.2), 1.2);
+ assert_approx_eq!(3.4f32.mul_add(-0.0, 5.6), 5.6);
+ assert!(nan.mul_add(7.8, 9.0).is_nan());
+ assert_eq!(inf.mul_add(7.8, 9.0), inf);
+ assert_eq!(neg_inf.mul_add(7.8, 9.0), neg_inf);
+ assert_eq!(8.9f32.mul_add(inf, 3.2), inf);
+ assert_eq!((-3.2f32).mul_add(2.4, neg_inf), neg_inf);
+}
+
+#[test]
+fn test_recip() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(1.0f32.recip(), 1.0);
+ assert_eq!(2.0f32.recip(), 0.5);
+ assert_eq!((-0.4f32).recip(), -2.5);
+ assert_eq!(0.0f32.recip(), inf);
+ assert!(nan.recip().is_nan());
+ assert_eq!(inf.recip(), 0.0);
+ assert_eq!(neg_inf.recip(), 0.0);
+}
+
+#[test]
+fn test_powi() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(1.0f32.powi(1), 1.0);
+ assert_approx_eq!((-3.1f32).powi(2), 9.61);
+ assert_approx_eq!(5.9f32.powi(-2), 0.028727);
+ assert_eq!(8.3f32.powi(0), 1.0);
+ assert!(nan.powi(2).is_nan());
+ assert_eq!(inf.powi(3), inf);
+ assert_eq!(neg_inf.powi(2), inf);
+}
+
+#[test]
+fn test_powf() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(1.0f32.powf(1.0), 1.0);
+ assert_approx_eq!(3.4f32.powf(4.5), 246.408218);
+ assert_approx_eq!(2.7f32.powf(-3.2), 0.041652);
+ assert_approx_eq!((-3.1f32).powf(2.0), 9.61);
+ assert_approx_eq!(5.9f32.powf(-2.0), 0.028727);
+ assert_eq!(8.3f32.powf(0.0), 1.0);
+ assert!(nan.powf(2.0).is_nan());
+ assert_eq!(inf.powf(2.0), inf);
+ assert_eq!(neg_inf.powf(3.0), neg_inf);
+}
+
+#[test]
+fn test_sqrt_domain() {
+ assert!(f32::NAN.sqrt().is_nan());
+ assert!(f32::NEG_INFINITY.sqrt().is_nan());
+ assert!((-1.0f32).sqrt().is_nan());
+ assert_eq!((-0.0f32).sqrt(), -0.0);
+ assert_eq!(0.0f32.sqrt(), 0.0);
+ assert_eq!(1.0f32.sqrt(), 1.0);
+ assert_eq!(f32::INFINITY.sqrt(), f32::INFINITY);
+}
+
+#[test]
+fn test_exp() {
+ assert_eq!(1.0, 0.0f32.exp());
+ assert_approx_eq!(2.718282, 1.0f32.exp());
+ assert_approx_eq!(148.413162, 5.0f32.exp());
+
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let nan: f32 = f32::NAN;
+ assert_eq!(inf, inf.exp());
+ assert_eq!(0.0, neg_inf.exp());
+ assert!(nan.exp().is_nan());
+}
+
+#[test]
+fn test_exp2() {
+ assert_eq!(32.0, 5.0f32.exp2());
+ assert_eq!(1.0, 0.0f32.exp2());
+
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let nan: f32 = f32::NAN;
+ assert_eq!(inf, inf.exp2());
+ assert_eq!(0.0, neg_inf.exp2());
+ assert!(nan.exp2().is_nan());
+}
+
+#[test]
+fn test_ln() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_approx_eq!(1.0f32.exp().ln(), 1.0);
+ assert!(nan.ln().is_nan());
+ assert_eq!(inf.ln(), inf);
+ assert!(neg_inf.ln().is_nan());
+ assert!((-2.3f32).ln().is_nan());
+ assert_eq!((-0.0f32).ln(), neg_inf);
+ assert_eq!(0.0f32.ln(), neg_inf);
+ assert_approx_eq!(4.0f32.ln(), 1.386294);
+}
+
+#[test]
+fn test_log() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(10.0f32.log(10.0), 1.0);
+ assert_approx_eq!(2.3f32.log(3.5), 0.664858);
+ assert_eq!(1.0f32.exp().log(1.0f32.exp()), 1.0);
+ assert!(1.0f32.log(1.0).is_nan());
+ assert!(1.0f32.log(-13.9).is_nan());
+ assert!(nan.log(2.3).is_nan());
+ assert_eq!(inf.log(10.0), inf);
+ assert!(neg_inf.log(8.8).is_nan());
+ assert!((-2.3f32).log(0.1).is_nan());
+ assert_eq!((-0.0f32).log(2.0), neg_inf);
+ assert_eq!(0.0f32.log(7.0), neg_inf);
+}
+
+#[test]
+fn test_log2() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_approx_eq!(10.0f32.log2(), 3.321928);
+ assert_approx_eq!(2.3f32.log2(), 1.201634);
+ assert_approx_eq!(1.0f32.exp().log2(), 1.442695);
+ assert!(nan.log2().is_nan());
+ assert_eq!(inf.log2(), inf);
+ assert!(neg_inf.log2().is_nan());
+ assert!((-2.3f32).log2().is_nan());
+ assert_eq!((-0.0f32).log2(), neg_inf);
+ assert_eq!(0.0f32.log2(), neg_inf);
+}
+
+#[test]
+fn test_log10() {
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(10.0f32.log10(), 1.0);
+ assert_approx_eq!(2.3f32.log10(), 0.361728);
+ assert_approx_eq!(1.0f32.exp().log10(), 0.434294);
+ assert_eq!(1.0f32.log10(), 0.0);
+ assert!(nan.log10().is_nan());
+ assert_eq!(inf.log10(), inf);
+ assert!(neg_inf.log10().is_nan());
+ assert!((-2.3f32).log10().is_nan());
+ assert_eq!((-0.0f32).log10(), neg_inf);
+ assert_eq!(0.0f32.log10(), neg_inf);
+}
+
+#[test]
+fn test_to_degrees() {
+ let pi: f32 = consts::PI;
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(0.0f32.to_degrees(), 0.0);
+ assert_approx_eq!((-5.8f32).to_degrees(), -332.315521);
+ assert_eq!(pi.to_degrees(), 180.0);
+ assert!(nan.to_degrees().is_nan());
+ assert_eq!(inf.to_degrees(), inf);
+ assert_eq!(neg_inf.to_degrees(), neg_inf);
+ assert_eq!(1_f32.to_degrees(), 57.2957795130823208767981548141051703);
+}
+
+#[test]
+fn test_to_radians() {
+ let pi: f32 = consts::PI;
+ let nan: f32 = f32::NAN;
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ assert_eq!(0.0f32.to_radians(), 0.0);
+ assert_approx_eq!(154.6f32.to_radians(), 2.698279);
+ assert_approx_eq!((-332.31f32).to_radians(), -5.799903);
+ assert_eq!(180.0f32.to_radians(), pi);
+ assert!(nan.to_radians().is_nan());
+ assert_eq!(inf.to_radians(), inf);
+ assert_eq!(neg_inf.to_radians(), neg_inf);
+}
+
+#[test]
+fn test_asinh() {
+ assert_eq!(0.0f32.asinh(), 0.0f32);
+ assert_eq!((-0.0f32).asinh(), -0.0f32);
+
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let nan: f32 = f32::NAN;
+ assert_eq!(inf.asinh(), inf);
+ assert_eq!(neg_inf.asinh(), neg_inf);
+ assert!(nan.asinh().is_nan());
+ assert!((-0.0f32).asinh().is_sign_negative()); // issue 63271
+ assert_approx_eq!(2.0f32.asinh(), 1.443635475178810342493276740273105f32);
+ assert_approx_eq!((-2.0f32).asinh(), -1.443635475178810342493276740273105f32);
+ // regression test for the catastrophic cancellation fixed in 72486
+ assert_approx_eq!((-3000.0f32).asinh(), -8.699514775987968673236893537700647f32);
+}
+
+#[test]
+fn test_acosh() {
+ assert_eq!(1.0f32.acosh(), 0.0f32);
+ assert!(0.999f32.acosh().is_nan());
+
+ let inf: f32 = f32::INFINITY;
+ let neg_inf: f32 = f32::NEG_INFINITY;
+ let nan: f32 = f32::NAN;
+ assert_eq!(inf.acosh(), inf);
+ assert!(neg_inf.acosh().is_nan());
+ assert!(nan.acosh().is_nan());
+ assert_approx_eq!(2.0f32.acosh(), 1.31695789692481670862504634730796844f32);
+ assert_approx_eq!(3.0f32.acosh(), 1.76274717403908605046521864995958461f32);
+}
+
+#[test]
+fn test_atanh() {
+ assert_eq!(0.0f32.atanh(), 0.0f32);
+ assert_eq!((-0.0f32).atanh(), -0.0f32);
+
+ let inf32: f32 = f32::INFINITY;
+ let neg_inf32: f32 = f32::NEG_INFINITY;
+ assert_eq!(1.0f32.atanh(), inf32);
+ assert_eq!((-1.0f32).atanh(), neg_inf32);
+
+ assert!(2f64.atanh().atanh().is_nan());
+ assert!((-2f64).atanh().atanh().is_nan());
+
+ let inf64: f32 = f32::INFINITY;
+ let neg_inf64: f32 = f32::NEG_INFINITY;
+ let nan32: f32 = f32::NAN;
+ assert!(inf64.atanh().is_nan());
+ assert!(neg_inf64.atanh().is_nan());
+ assert!(nan32.atanh().is_nan());
+
+ assert_approx_eq!(0.5f32.atanh(), 0.54930614433405484569762261846126285f32);
+ assert_approx_eq!((-0.5f32).atanh(), -0.54930614433405484569762261846126285f32);
+}
+
+#[test]
+fn test_real_consts() {
+ use super::consts;
+
+ let pi: f32 = consts::PI;
+ let frac_pi_2: f32 = consts::FRAC_PI_2;
+ let frac_pi_3: f32 = consts::FRAC_PI_3;
+ let frac_pi_4: f32 = consts::FRAC_PI_4;
+ let frac_pi_6: f32 = consts::FRAC_PI_6;
+ let frac_pi_8: f32 = consts::FRAC_PI_8;
+ let frac_1_pi: f32 = consts::FRAC_1_PI;
+ let frac_2_pi: f32 = consts::FRAC_2_PI;
+ let frac_2_sqrtpi: f32 = consts::FRAC_2_SQRT_PI;
+ let sqrt2: f32 = consts::SQRT_2;
+ let frac_1_sqrt2: f32 = consts::FRAC_1_SQRT_2;
+ let e: f32 = consts::E;
+ let log2_e: f32 = consts::LOG2_E;
+ let log10_e: f32 = consts::LOG10_E;
+ let ln_2: f32 = consts::LN_2;
+ let ln_10: f32 = consts::LN_10;
+
+ assert_approx_eq!(frac_pi_2, pi / 2f32);
+ assert_approx_eq!(frac_pi_3, pi / 3f32);
+ assert_approx_eq!(frac_pi_4, pi / 4f32);
+ assert_approx_eq!(frac_pi_6, pi / 6f32);
+ assert_approx_eq!(frac_pi_8, pi / 8f32);
+ assert_approx_eq!(frac_1_pi, 1f32 / pi);
+ assert_approx_eq!(frac_2_pi, 2f32 / pi);
+ assert_approx_eq!(frac_2_sqrtpi, 2f32 / pi.sqrt());
+ assert_approx_eq!(sqrt2, 2f32.sqrt());
+ assert_approx_eq!(frac_1_sqrt2, 1f32 / 2f32.sqrt());
+ assert_approx_eq!(log2_e, e.log2());
+ assert_approx_eq!(log10_e, e.log10());
+ assert_approx_eq!(ln_2, 2f32.ln());
+ assert_approx_eq!(ln_10, 10f32.ln());
+}
+
+#[test]
+fn test_float_bits_conv() {
+ assert_eq!((1f32).to_bits(), 0x3f800000);
+ assert_eq!((12.5f32).to_bits(), 0x41480000);
+ assert_eq!((1337f32).to_bits(), 0x44a72000);
+ assert_eq!((-14.25f32).to_bits(), 0xc1640000);
+ assert_approx_eq!(f32::from_bits(0x3f800000), 1.0);
+ assert_approx_eq!(f32::from_bits(0x41480000), 12.5);
+ assert_approx_eq!(f32::from_bits(0x44a72000), 1337.0);
+ assert_approx_eq!(f32::from_bits(0xc1640000), -14.25);
+
+ // Check that NaNs roundtrip their bits regardless of signaling-ness
+ // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
+ let masked_nan1 = f32::NAN.to_bits() ^ 0x002A_AAAA;
+ let masked_nan2 = f32::NAN.to_bits() ^ 0x0055_5555;
+ assert!(f32::from_bits(masked_nan1).is_nan());
+ assert!(f32::from_bits(masked_nan2).is_nan());
+
+ assert_eq!(f32::from_bits(masked_nan1).to_bits(), masked_nan1);
+ assert_eq!(f32::from_bits(masked_nan2).to_bits(), masked_nan2);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_min_greater_than_max() {
+ let _ = 1.0f32.clamp(3.0, 1.0);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_min_is_nan() {
+ let _ = 1.0f32.clamp(f32::NAN, 1.0);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_max_is_nan() {
+ let _ = 1.0f32.clamp(3.0, f32::NAN);
+}
+
+#[test]
+fn test_total_cmp() {
+ use core::cmp::Ordering;
+
+ fn quiet_bit_mask() -> u32 {
+ 1 << (f32::MANTISSA_DIGITS - 2)
+ }
+
+ fn min_subnorm() -> f32 {
+ f32::MIN_POSITIVE / f32::powf(2.0, f32::MANTISSA_DIGITS as f32 - 1.0)
+ }
+
+ fn max_subnorm() -> f32 {
+ f32::MIN_POSITIVE - min_subnorm()
+ }
+
+ fn q_nan() -> f32 {
+ f32::from_bits(f32::NAN.to_bits() | quiet_bit_mask())
+ }
+
+ fn s_nan() -> f32 {
+ f32::from_bits((f32::NAN.to_bits() & !quiet_bit_mask()) + 42)
+ }
+
+ assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
+ assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Equal, (-f32::INFINITY).total_cmp(&-f32::INFINITY));
+ assert_eq!(Ordering::Equal, (-f32::MAX).total_cmp(&-f32::MAX));
+ assert_eq!(Ordering::Equal, (-2.5_f32).total_cmp(&-2.5));
+ assert_eq!(Ordering::Equal, (-1.0_f32).total_cmp(&-1.0));
+ assert_eq!(Ordering::Equal, (-1.5_f32).total_cmp(&-1.5));
+ assert_eq!(Ordering::Equal, (-0.5_f32).total_cmp(&-0.5));
+ assert_eq!(Ordering::Equal, (-f32::MIN_POSITIVE).total_cmp(&-f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Equal, (-0.0_f32).total_cmp(&-0.0));
+ assert_eq!(Ordering::Equal, 0.0_f32.total_cmp(&0.0));
+ assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Equal, f32::MIN_POSITIVE.total_cmp(&f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Equal, 0.5_f32.total_cmp(&0.5));
+ assert_eq!(Ordering::Equal, 1.0_f32.total_cmp(&1.0));
+ assert_eq!(Ordering::Equal, 1.5_f32.total_cmp(&1.5));
+ assert_eq!(Ordering::Equal, 2.5_f32.total_cmp(&2.5));
+ assert_eq!(Ordering::Equal, f32::MAX.total_cmp(&f32::MAX));
+ assert_eq!(Ordering::Equal, f32::INFINITY.total_cmp(&f32::INFINITY));
+ assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
+ assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
+
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::INFINITY));
+ assert_eq!(Ordering::Less, (-f32::INFINITY).total_cmp(&-f32::MAX));
+ assert_eq!(Ordering::Less, (-f32::MAX).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-2.5_f32).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-1.5_f32).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-1.0_f32).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-0.5_f32).total_cmp(&-f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-f32::MIN_POSITIVE).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-0.0_f32).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, 0.0_f32.total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, f32::MIN_POSITIVE.total_cmp(&0.5));
+ assert_eq!(Ordering::Less, 0.5_f32.total_cmp(&1.0));
+ assert_eq!(Ordering::Less, 1.0_f32.total_cmp(&1.5));
+ assert_eq!(Ordering::Less, 1.5_f32.total_cmp(&2.5));
+ assert_eq!(Ordering::Less, 2.5_f32.total_cmp(&f32::MAX));
+ assert_eq!(Ordering::Less, f32::MAX.total_cmp(&f32::INFINITY));
+ assert_eq!(Ordering::Less, f32::INFINITY.total_cmp(&s_nan()));
+ assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
+
+ assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
+ assert_eq!(Ordering::Greater, (-f32::INFINITY).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Greater, (-f32::MAX).total_cmp(&-f32::INFINITY));
+ assert_eq!(Ordering::Greater, (-2.5_f32).total_cmp(&-f32::MAX));
+ assert_eq!(Ordering::Greater, (-1.5_f32).total_cmp(&-2.5));
+ assert_eq!(Ordering::Greater, (-1.0_f32).total_cmp(&-1.5));
+ assert_eq!(Ordering::Greater, (-0.5_f32).total_cmp(&-1.0));
+ assert_eq!(Ordering::Greater, (-f32::MIN_POSITIVE).total_cmp(&-0.5));
+ assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Greater, (-0.0_f32).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Greater, 0.0_f32.total_cmp(&-0.0));
+ assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
+ assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Greater, f32::MIN_POSITIVE.total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Greater, 0.5_f32.total_cmp(&f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Greater, 1.0_f32.total_cmp(&0.5));
+ assert_eq!(Ordering::Greater, 1.5_f32.total_cmp(&1.0));
+ assert_eq!(Ordering::Greater, 2.5_f32.total_cmp(&1.5));
+ assert_eq!(Ordering::Greater, f32::MAX.total_cmp(&2.5));
+ assert_eq!(Ordering::Greater, f32::INFINITY.total_cmp(&f32::MAX));
+ assert_eq!(Ordering::Greater, s_nan().total_cmp(&f32::INFINITY));
+ assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
+
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::INFINITY));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::MAX));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::MAX));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f32::INFINITY));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
+
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::INFINITY));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::MAX));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::MAX));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::INFINITY));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
+}
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
new file mode 100644
index 000000000..a9aa84f70
--- /dev/null
+++ b/library/std/src/f64.rs
@@ -0,0 +1,949 @@
+//! Constants specific to the `f64` double-precision floating point type.
+//!
+//! *[See also the `f64` primitive type](primitive@f64).*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! For the constants defined directly in this module
+//! (as distinct from those defined in the `consts` sub-module),
+//! new code should instead use the associated constants
+//! defined directly on the `f64` type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![allow(missing_docs)]
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(not(test))]
+use crate::intrinsics;
+#[cfg(not(test))]
+use crate::sys::cmath;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::f64::{
+ consts, DIGITS, EPSILON, INFINITY, MANTISSA_DIGITS, MAX, MAX_10_EXP, MAX_EXP, MIN, MIN_10_EXP,
+ MIN_EXP, MIN_POSITIVE, NAN, NEG_INFINITY, RADIX,
+};
+
+#[cfg(not(test))]
+impl f64 {
+ /// Returns the largest integer less than or equal to `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.7_f64;
+ /// let g = 3.0_f64;
+ /// let h = -3.7_f64;
+ ///
+ /// assert_eq!(f.floor(), 3.0);
+ /// assert_eq!(g.floor(), 3.0);
+ /// assert_eq!(h.floor(), -4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn floor(self) -> f64 {
+ unsafe { intrinsics::floorf64(self) }
+ }
+
+ /// Returns the smallest integer greater than or equal to `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.01_f64;
+ /// let g = 4.0_f64;
+ ///
+ /// assert_eq!(f.ceil(), 4.0);
+ /// assert_eq!(g.ceil(), 4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ceil(self) -> f64 {
+ unsafe { intrinsics::ceilf64(self) }
+ }
+
+ /// Returns the nearest integer to `self`. Round half-way cases away from
+ /// `0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.3_f64;
+ /// let g = -3.3_f64;
+ ///
+ /// assert_eq!(f.round(), 3.0);
+ /// assert_eq!(g.round(), -3.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn round(self) -> f64 {
+ unsafe { intrinsics::roundf64(self) }
+ }
+
+ /// Returns the integer part of `self`.
+ /// This means that non-integer numbers are always truncated towards zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.7_f64;
+ /// let g = 3.0_f64;
+ /// let h = -3.7_f64;
+ ///
+ /// assert_eq!(f.trunc(), 3.0);
+ /// assert_eq!(g.trunc(), 3.0);
+ /// assert_eq!(h.trunc(), -3.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn trunc(self) -> f64 {
+ unsafe { intrinsics::truncf64(self) }
+ }
+
+ /// Returns the fractional part of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.6_f64;
+ /// let y = -3.6_f64;
+ /// let abs_difference_x = (x.fract() - 0.6).abs();
+ /// let abs_difference_y = (y.fract() - (-0.6)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn fract(self) -> f64 {
+ self - self.trunc()
+ }
+
+ /// Computes the absolute value of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.5_f64;
+ /// let y = -3.5_f64;
+ ///
+ /// let abs_difference_x = (x.abs() - x).abs();
+ /// let abs_difference_y = (y.abs() - (-y)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ ///
+ /// assert!(f64::NAN.abs().is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn abs(self) -> f64 {
+ unsafe { intrinsics::fabsf64(self) }
+ }
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
+ /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+ /// - NaN if the number is NaN
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.5_f64;
+ ///
+ /// assert_eq!(f.signum(), 1.0);
+ /// assert_eq!(f64::NEG_INFINITY.signum(), -1.0);
+ ///
+ /// assert!(f64::NAN.signum().is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn signum(self) -> f64 {
+ if self.is_nan() { Self::NAN } else { 1.0_f64.copysign(self) }
+ }
+
+ /// Returns a number composed of the magnitude of `self` and the sign of
+ /// `sign`.
+ ///
+ /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise
+ /// equal to `-self`. If `self` is a NaN, then a NaN with the sign bit of
+ /// `sign` is returned. Note, however, that conserving the sign bit on NaN
+ /// across arithmetical operations is not generally guaranteed.
+ /// See [explanation of NaN as a special value](primitive@f32) for more info.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 3.5_f64;
+ ///
+ /// assert_eq!(f.copysign(0.42), 3.5_f64);
+ /// assert_eq!(f.copysign(-0.42), -3.5_f64);
+ /// assert_eq!((-f).copysign(0.42), 3.5_f64);
+ /// assert_eq!((-f).copysign(-0.42), -3.5_f64);
+ ///
+ /// assert!(f64::NAN.copysign(1.0).is_nan());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "copysign", since = "1.35.0")]
+ #[inline]
+ pub fn copysign(self, sign: f64) -> f64 {
+ unsafe { intrinsics::copysignf64(self, sign) }
+ }
+
+ /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+ /// error, yielding a more accurate result than an unfused multiply-add.
+ ///
+ /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction. However,
+ /// this is not always true, and will be heavily dependant on designing
+ /// algorithms with specific target hardware in mind.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let m = 10.0_f64;
+ /// let x = 4.0_f64;
+ /// let b = 60.0_f64;
+ ///
+ /// // 100.0
+ /// let abs_difference = (m.mul_add(x, b) - ((m * x) + b)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn mul_add(self, a: f64, b: f64) -> f64 {
+ unsafe { intrinsics::fmaf64(self, a, b) }
+ }
+
+ /// Calculates Euclidean division, the matching method for `rem_euclid`.
+ ///
+ /// This computes the integer `n` such that
+ /// `self = n * rhs + self.rem_euclid(rhs)`.
+ /// In other words, the result is `self / rhs` rounded to the integer `n`
+ /// such that `self >= n * rhs`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a: f64 = 7.0;
+ /// let b = 4.0;
+ /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
+ /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
+ /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
+ /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ pub fn div_euclid(self, rhs: f64) -> f64 {
+ let q = (self / rhs).trunc();
+ if self % rhs < 0.0 {
+ return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ /// Calculates the least nonnegative remainder of `self (mod rhs)`.
+ ///
+ /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
+ /// most cases. However, due to a floating point round-off error it can
+ /// result in `r == rhs.abs()`, violating the mathematical definition, if
+ /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
+ /// This result is not an element of the function's codomain, but it is the
+ /// closest floating point number in the real numbers and thus fulfills the
+ /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
+ /// approximatively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a: f64 = 7.0;
+ /// let b = 4.0;
+ /// assert_eq!(a.rem_euclid(b), 3.0);
+ /// assert_eq!((-a).rem_euclid(b), 1.0);
+ /// assert_eq!(a.rem_euclid(-b), 3.0);
+ /// assert_eq!((-a).rem_euclid(-b), 1.0);
+ /// // limitation due to round-off error
+ /// assert!((-f64::EPSILON).rem_euclid(3.0) != 0.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ pub fn rem_euclid(self, rhs: f64) -> f64 {
+ let r = self % rhs;
+ if r < 0.0 { r + rhs.abs() } else { r }
+ }
+
+ /// Raises a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`.
+ /// It might have a different sequence of rounding operations than `powf`,
+ /// so the results are not guaranteed to agree.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0_f64;
+ /// let abs_difference = (x.powi(2) - (x * x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn powi(self, n: i32) -> f64 {
+ unsafe { intrinsics::powif64(self, n) }
+ }
+
+ /// Raises a number to a floating point power.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0_f64;
+ /// let abs_difference = (x.powf(2.0) - (x * x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn powf(self, n: f64) -> f64 {
+ unsafe { intrinsics::powf64(self, n) }
+ }
+
+ /// Returns the square root of a number.
+ ///
+ /// Returns NaN if `self` is a negative number other than `-0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let positive = 4.0_f64;
+ /// let negative = -4.0_f64;
+ /// let negative_zero = -0.0_f64;
+ ///
+ /// let abs_difference = (positive.sqrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// assert!(negative.sqrt().is_nan());
+ /// assert!(negative_zero.sqrt() == negative_zero);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sqrt(self) -> f64 {
+ unsafe { intrinsics::sqrtf64(self) }
+ }
+
+ /// Returns `e^(self)`, (the exponential function).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one = 1.0_f64;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp(self) -> f64 {
+ unsafe { intrinsics::expf64(self) }
+ }
+
+ /// Returns `2^(self)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 2.0_f64;
+ ///
+ /// // 2^2 - 4 == 0
+ /// let abs_difference = (f.exp2() - 4.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp2(self) -> f64 {
+ unsafe { intrinsics::exp2f64(self) }
+ }
+
+ /// Returns the natural logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one = 1.0_f64;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ln(self) -> f64 {
+ self.log_wrapper(|n| unsafe { intrinsics::logf64(n) })
+ }
+
+ /// Returns the logarithm of the number with respect to an arbitrary base.
+ ///
+ /// The result might not be correctly rounded owing to implementation details;
+ /// `self.log2()` can produce more accurate results for base 2, and
+ /// `self.log10()` can produce more accurate results for base 10.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let twenty_five = 25.0_f64;
+ ///
+ /// // log5(25) - 2 == 0
+ /// let abs_difference = (twenty_five.log(5.0) - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log(self, base: f64) -> f64 {
+ self.ln() / base.ln()
+ }
+
+ /// Returns the base 2 logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let four = 4.0_f64;
+ ///
+ /// // log2(4) - 2 == 0
+ /// let abs_difference = (four.log2() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log2(self) -> f64 {
+ self.log_wrapper(|n| {
+ #[cfg(target_os = "android")]
+ return crate::sys::android::log2f64(n);
+ #[cfg(not(target_os = "android"))]
+ return unsafe { intrinsics::log2f64(n) };
+ })
+ }
+
+ /// Returns the base 10 logarithm of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let hundred = 100.0_f64;
+ ///
+ /// // log10(100) - 2 == 0
+ /// let abs_difference = (hundred.log10() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn log10(self) -> f64 {
+ self.log_wrapper(|n| unsafe { intrinsics::log10f64(n) })
+ }
+
+ /// The positive difference of two numbers.
+ ///
+ /// * If `self <= other`: `0:0`
+ /// * Else: `self - other`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 3.0_f64;
+ /// let y = -3.0_f64;
+ ///
+ /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs();
+ /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[deprecated(
+ since = "1.10.0",
+ note = "you probably meant `(self - other).abs()`: \
+ this operation is `(self - other).max(0.0)` \
+ except that `abs_sub` also propagates NaNs (also \
+ known as `fdim` in C). If you truly need the positive \
+ difference, consider using that expression or the C function \
+ `fdim`, depending on how you wish to handle NaN (please consider \
+ filing an issue describing your use-case too)."
+ )]
+ pub fn abs_sub(self, other: f64) -> f64 {
+ unsafe { cmath::fdim(self, other) }
+ }
+
+ /// Returns the cube root of a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 8.0_f64;
+ ///
+ /// // x^(1/3) - 2 == 0
+ /// let abs_difference = (x.cbrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cbrt(self) -> f64 {
+ unsafe { cmath::cbrt(self) }
+ }
+
+ /// Calculates the length of the hypotenuse of a right-angle triangle given
+ /// legs of length `x` and `y`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0_f64;
+ /// let y = 3.0_f64;
+ ///
+ /// // sqrt(x^2 + y^2)
+ /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn hypot(self, other: f64) -> f64 {
+ unsafe { cmath::hypot(self, other) }
+ }
+
+ /// Computes the sine of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f64::consts::FRAC_PI_2;
+ ///
+ /// let abs_difference = (x.sin() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sin(self) -> f64 {
+ unsafe { intrinsics::sinf64(self) }
+ }
+
+ /// Computes the cosine of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 2.0 * std::f64::consts::PI;
+ ///
+ /// let abs_difference = (x.cos() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cos(self) -> f64 {
+ unsafe { intrinsics::cosf64(self) }
+ }
+
+ /// Computes the tangent of a number (in radians).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f64::consts::FRAC_PI_4;
+ /// let abs_difference = (x.tan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-14);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn tan(self) -> f64 {
+ unsafe { cmath::tan(self) }
+ }
+
+ /// Computes the arcsine of a number. Return value is in radians in
+ /// the range [-pi/2, pi/2] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = std::f64::consts::FRAC_PI_2;
+ ///
+ /// // asin(sin(pi/2))
+ /// let abs_difference = (f.sin().asin() - std::f64::consts::FRAC_PI_2).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn asin(self) -> f64 {
+ unsafe { cmath::asin(self) }
+ }
+
+ /// Computes the arccosine of a number. Return value is in radians in
+ /// the range [0, pi] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = std::f64::consts::FRAC_PI_4;
+ ///
+ /// // acos(cos(pi/4))
+ /// let abs_difference = (f.cos().acos() - std::f64::consts::FRAC_PI_4).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn acos(self) -> f64 {
+ unsafe { cmath::acos(self) }
+ }
+
+ /// Computes the arctangent of a number. Return value is in radians in the
+ /// range [-pi/2, pi/2];
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let f = 1.0_f64;
+ ///
+ /// // atan(tan(1))
+ /// let abs_difference = (f.tan().atan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atan(self) -> f64 {
+ unsafe { cmath::atan(self) }
+ }
+
+ /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`) in radians.
+ ///
+ /// * `x = 0`, `y = 0`: `0`
+ /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]`
+ /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]`
+ /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Positive angles measured counter-clockwise
+ /// // from positive x axis
+ /// // -pi/4 radians (45 deg clockwise)
+ /// let x1 = 3.0_f64;
+ /// let y1 = -3.0_f64;
+ ///
+ /// // 3pi/4 radians (135 deg counter-clockwise)
+ /// let x2 = -3.0_f64;
+ /// let y2 = 3.0_f64;
+ ///
+ /// let abs_difference_1 = (y1.atan2(x1) - (-std::f64::consts::FRAC_PI_4)).abs();
+ /// let abs_difference_2 = (y2.atan2(x2) - (3.0 * std::f64::consts::FRAC_PI_4)).abs();
+ ///
+ /// assert!(abs_difference_1 < 1e-10);
+ /// assert!(abs_difference_2 < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atan2(self, other: f64) -> f64 {
+ unsafe { cmath::atan2(self, other) }
+ }
+
+ /// Simultaneously computes the sine and cosine of the number, `x`. Returns
+ /// `(sin(x), cos(x))`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = std::f64::consts::FRAC_PI_4;
+ /// let f = x.sin_cos();
+ ///
+ /// let abs_difference_0 = (f.0 - x.sin()).abs();
+ /// let abs_difference_1 = (f.1 - x.cos()).abs();
+ ///
+ /// assert!(abs_difference_0 < 1e-10);
+ /// assert!(abs_difference_1 < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sin_cos(self) -> (f64, f64) {
+ (self.sin(), self.cos())
+ }
+
+ /// Returns `e^(self) - 1` in a way that is accurate even if the
+ /// number is close to zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1e-16_f64;
+ ///
+ /// // for very small x, e^x is approximately 1 + x + x^2 / 2
+ /// let approx = x + x * x / 2.0;
+ /// let abs_difference = (x.exp_m1() - approx).abs();
+ ///
+ /// assert!(abs_difference < 1e-20);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn exp_m1(self) -> f64 {
+ unsafe { cmath::expm1(self) }
+ }
+
+ /// Returns `ln(1+n)` (natural logarithm) more accurately than if
+ /// the operations were performed separately.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1e-16_f64;
+ ///
+ /// // for very small x, ln(1 + x) is approximately x - x^2 / 2
+ /// let approx = x - x * x / 2.0;
+ /// let abs_difference = (x.ln_1p() - approx).abs();
+ ///
+ /// assert!(abs_difference < 1e-20);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn ln_1p(self) -> f64 {
+ unsafe { cmath::log1p(self) }
+ }
+
+ /// Hyperbolic sine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f64::consts::E;
+ /// let x = 1.0_f64;
+ ///
+ /// let f = x.sinh();
+ /// // Solving sinh() at 1 gives `(e^2-1)/(2e)`
+ /// let g = ((e * e) - 1.0) / (2.0 * e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sinh(self) -> f64 {
+ unsafe { cmath::sinh(self) }
+ }
+
+ /// Hyperbolic cosine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f64::consts::E;
+ /// let x = 1.0_f64;
+ /// let f = x.cosh();
+ /// // Solving cosh() at 1 gives this result
+ /// let g = ((e * e) + 1.0) / (2.0 * e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// // Same result
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn cosh(self) -> f64 {
+ unsafe { cmath::cosh(self) }
+ }
+
+ /// Hyperbolic tangent function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f64::consts::E;
+ /// let x = 1.0_f64;
+ ///
+ /// let f = x.tanh();
+ /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))`
+ /// let g = (1.0 - e.powi(-2)) / (1.0 + e.powi(-2));
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn tanh(self) -> f64 {
+ unsafe { cmath::tanh(self) }
+ }
+
+ /// Inverse hyperbolic sine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let f = x.sinh().asinh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn asinh(self) -> f64 {
+ (self.abs() + ((self * self) + 1.0).sqrt()).ln().copysign(self)
+ }
+
+ /// Inverse hyperbolic cosine function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let f = x.cosh().acosh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn acosh(self) -> f64 {
+ if self < 1.0 { Self::NAN } else { (self + ((self * self) - 1.0).sqrt()).ln() }
+ }
+
+ /// Inverse hyperbolic tangent function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let e = std::f64::consts::E;
+ /// let f = e.tanh().atanh();
+ ///
+ /// let abs_difference = (f - e).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn atanh(self) -> f64 {
+ 0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
+ }
+
+ // Solaris/Illumos requires a wrapper around log, log2, and log10 functions
+ // because of their non-standard behavior (e.g., log(-n) returns -Inf instead
+ // of expected NaN).
+ #[rustc_allow_incoherent_impl]
+ fn log_wrapper<F: Fn(f64) -> f64>(self, log_fn: F) -> f64 {
+ if !cfg!(any(target_os = "solaris", target_os = "illumos")) {
+ log_fn(self)
+ } else if self.is_finite() {
+ if self > 0.0 {
+ log_fn(self)
+ } else if self == 0.0 {
+ Self::NEG_INFINITY // log(0) = -Inf
+ } else {
+ Self::NAN // log(-n) = NaN
+ }
+ } else if self.is_nan() {
+ self // log(NaN) = NaN
+ } else if self > 0.0 {
+ self // log(Inf) = Inf
+ } else {
+ Self::NAN // log(-Inf) = NaN
+ }
+ }
+}
diff --git a/library/std/src/f64/tests.rs b/library/std/src/f64/tests.rs
new file mode 100644
index 000000000..5c163cfe9
--- /dev/null
+++ b/library/std/src/f64/tests.rs
@@ -0,0 +1,755 @@
+use crate::f64::consts;
+use crate::num::FpCategory as Fp;
+use crate::num::*;
+
+#[test]
+fn test_num_f64() {
+ test_num(10f64, 2f64);
+}
+
+#[test]
+fn test_min_nan() {
+ assert_eq!(f64::NAN.min(2.0), 2.0);
+ assert_eq!(2.0f64.min(f64::NAN), 2.0);
+}
+
+#[test]
+fn test_max_nan() {
+ assert_eq!(f64::NAN.max(2.0), 2.0);
+ assert_eq!(2.0f64.max(f64::NAN), 2.0);
+}
+
+#[test]
+fn test_nan() {
+ let nan: f64 = f64::NAN;
+ assert!(nan.is_nan());
+ assert!(!nan.is_infinite());
+ assert!(!nan.is_finite());
+ assert!(!nan.is_normal());
+ assert!(nan.is_sign_positive());
+ assert!(!nan.is_sign_negative());
+ assert_eq!(Fp::Nan, nan.classify());
+}
+
+#[test]
+fn test_infinity() {
+ let inf: f64 = f64::INFINITY;
+ assert!(inf.is_infinite());
+ assert!(!inf.is_finite());
+ assert!(inf.is_sign_positive());
+ assert!(!inf.is_sign_negative());
+ assert!(!inf.is_nan());
+ assert!(!inf.is_normal());
+ assert_eq!(Fp::Infinite, inf.classify());
+}
+
+#[test]
+fn test_neg_infinity() {
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert!(neg_inf.is_infinite());
+ assert!(!neg_inf.is_finite());
+ assert!(!neg_inf.is_sign_positive());
+ assert!(neg_inf.is_sign_negative());
+ assert!(!neg_inf.is_nan());
+ assert!(!neg_inf.is_normal());
+ assert_eq!(Fp::Infinite, neg_inf.classify());
+}
+
+#[test]
+fn test_zero() {
+ let zero: f64 = 0.0f64;
+ assert_eq!(0.0, zero);
+ assert!(!zero.is_infinite());
+ assert!(zero.is_finite());
+ assert!(zero.is_sign_positive());
+ assert!(!zero.is_sign_negative());
+ assert!(!zero.is_nan());
+ assert!(!zero.is_normal());
+ assert_eq!(Fp::Zero, zero.classify());
+}
+
+#[test]
+fn test_neg_zero() {
+ let neg_zero: f64 = -0.0;
+ assert_eq!(0.0, neg_zero);
+ assert!(!neg_zero.is_infinite());
+ assert!(neg_zero.is_finite());
+ assert!(!neg_zero.is_sign_positive());
+ assert!(neg_zero.is_sign_negative());
+ assert!(!neg_zero.is_nan());
+ assert!(!neg_zero.is_normal());
+ assert_eq!(Fp::Zero, neg_zero.classify());
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn test_one() {
+ let one: f64 = 1.0f64;
+ assert_eq!(1.0, one);
+ assert!(!one.is_infinite());
+ assert!(one.is_finite());
+ assert!(one.is_sign_positive());
+ assert!(!one.is_sign_negative());
+ assert!(!one.is_nan());
+ assert!(one.is_normal());
+ assert_eq!(Fp::Normal, one.classify());
+}
+
+#[test]
+fn test_is_nan() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert!(nan.is_nan());
+ assert!(!0.0f64.is_nan());
+ assert!(!5.3f64.is_nan());
+ assert!(!(-10.732f64).is_nan());
+ assert!(!inf.is_nan());
+ assert!(!neg_inf.is_nan());
+}
+
+#[test]
+fn test_is_infinite() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert!(!nan.is_infinite());
+ assert!(inf.is_infinite());
+ assert!(neg_inf.is_infinite());
+ assert!(!0.0f64.is_infinite());
+ assert!(!42.8f64.is_infinite());
+ assert!(!(-109.2f64).is_infinite());
+}
+
+#[test]
+fn test_is_finite() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert!(!nan.is_finite());
+ assert!(!inf.is_finite());
+ assert!(!neg_inf.is_finite());
+ assert!(0.0f64.is_finite());
+ assert!(42.8f64.is_finite());
+ assert!((-109.2f64).is_finite());
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn test_is_normal() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let zero: f64 = 0.0f64;
+ let neg_zero: f64 = -0.0;
+ assert!(!nan.is_normal());
+ assert!(!inf.is_normal());
+ assert!(!neg_inf.is_normal());
+ assert!(!zero.is_normal());
+ assert!(!neg_zero.is_normal());
+ assert!(1f64.is_normal());
+ assert!(1e-307f64.is_normal());
+ assert!(!1e-308f64.is_normal());
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn test_classify() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let zero: f64 = 0.0f64;
+ let neg_zero: f64 = -0.0;
+ assert_eq!(nan.classify(), Fp::Nan);
+ assert_eq!(inf.classify(), Fp::Infinite);
+ assert_eq!(neg_inf.classify(), Fp::Infinite);
+ assert_eq!(zero.classify(), Fp::Zero);
+ assert_eq!(neg_zero.classify(), Fp::Zero);
+ assert_eq!(1e-307f64.classify(), Fp::Normal);
+ assert_eq!(1e-308f64.classify(), Fp::Subnormal);
+}
+
+#[test]
+fn test_floor() {
+ assert_approx_eq!(1.0f64.floor(), 1.0f64);
+ assert_approx_eq!(1.3f64.floor(), 1.0f64);
+ assert_approx_eq!(1.5f64.floor(), 1.0f64);
+ assert_approx_eq!(1.7f64.floor(), 1.0f64);
+ assert_approx_eq!(0.0f64.floor(), 0.0f64);
+ assert_approx_eq!((-0.0f64).floor(), -0.0f64);
+ assert_approx_eq!((-1.0f64).floor(), -1.0f64);
+ assert_approx_eq!((-1.3f64).floor(), -2.0f64);
+ assert_approx_eq!((-1.5f64).floor(), -2.0f64);
+ assert_approx_eq!((-1.7f64).floor(), -2.0f64);
+}
+
+#[test]
+fn test_ceil() {
+ assert_approx_eq!(1.0f64.ceil(), 1.0f64);
+ assert_approx_eq!(1.3f64.ceil(), 2.0f64);
+ assert_approx_eq!(1.5f64.ceil(), 2.0f64);
+ assert_approx_eq!(1.7f64.ceil(), 2.0f64);
+ assert_approx_eq!(0.0f64.ceil(), 0.0f64);
+ assert_approx_eq!((-0.0f64).ceil(), -0.0f64);
+ assert_approx_eq!((-1.0f64).ceil(), -1.0f64);
+ assert_approx_eq!((-1.3f64).ceil(), -1.0f64);
+ assert_approx_eq!((-1.5f64).ceil(), -1.0f64);
+ assert_approx_eq!((-1.7f64).ceil(), -1.0f64);
+}
+
+#[test]
+fn test_round() {
+ assert_approx_eq!(1.0f64.round(), 1.0f64);
+ assert_approx_eq!(1.3f64.round(), 1.0f64);
+ assert_approx_eq!(1.5f64.round(), 2.0f64);
+ assert_approx_eq!(1.7f64.round(), 2.0f64);
+ assert_approx_eq!(0.0f64.round(), 0.0f64);
+ assert_approx_eq!((-0.0f64).round(), -0.0f64);
+ assert_approx_eq!((-1.0f64).round(), -1.0f64);
+ assert_approx_eq!((-1.3f64).round(), -1.0f64);
+ assert_approx_eq!((-1.5f64).round(), -2.0f64);
+ assert_approx_eq!((-1.7f64).round(), -2.0f64);
+}
+
+#[test]
+fn test_trunc() {
+ assert_approx_eq!(1.0f64.trunc(), 1.0f64);
+ assert_approx_eq!(1.3f64.trunc(), 1.0f64);
+ assert_approx_eq!(1.5f64.trunc(), 1.0f64);
+ assert_approx_eq!(1.7f64.trunc(), 1.0f64);
+ assert_approx_eq!(0.0f64.trunc(), 0.0f64);
+ assert_approx_eq!((-0.0f64).trunc(), -0.0f64);
+ assert_approx_eq!((-1.0f64).trunc(), -1.0f64);
+ assert_approx_eq!((-1.3f64).trunc(), -1.0f64);
+ assert_approx_eq!((-1.5f64).trunc(), -1.0f64);
+ assert_approx_eq!((-1.7f64).trunc(), -1.0f64);
+}
+
+#[test]
+fn test_fract() {
+ assert_approx_eq!(1.0f64.fract(), 0.0f64);
+ assert_approx_eq!(1.3f64.fract(), 0.3f64);
+ assert_approx_eq!(1.5f64.fract(), 0.5f64);
+ assert_approx_eq!(1.7f64.fract(), 0.7f64);
+ assert_approx_eq!(0.0f64.fract(), 0.0f64);
+ assert_approx_eq!((-0.0f64).fract(), -0.0f64);
+ assert_approx_eq!((-1.0f64).fract(), -0.0f64);
+ assert_approx_eq!((-1.3f64).fract(), -0.3f64);
+ assert_approx_eq!((-1.5f64).fract(), -0.5f64);
+ assert_approx_eq!((-1.7f64).fract(), -0.7f64);
+}
+
+#[test]
+fn test_abs() {
+ assert_eq!(f64::INFINITY.abs(), f64::INFINITY);
+ assert_eq!(1f64.abs(), 1f64);
+ assert_eq!(0f64.abs(), 0f64);
+ assert_eq!((-0f64).abs(), 0f64);
+ assert_eq!((-1f64).abs(), 1f64);
+ assert_eq!(f64::NEG_INFINITY.abs(), f64::INFINITY);
+ assert_eq!((1f64 / f64::NEG_INFINITY).abs(), 0f64);
+ assert!(f64::NAN.abs().is_nan());
+}
+
+#[test]
+fn test_signum() {
+ assert_eq!(f64::INFINITY.signum(), 1f64);
+ assert_eq!(1f64.signum(), 1f64);
+ assert_eq!(0f64.signum(), 1f64);
+ assert_eq!((-0f64).signum(), -1f64);
+ assert_eq!((-1f64).signum(), -1f64);
+ assert_eq!(f64::NEG_INFINITY.signum(), -1f64);
+ assert_eq!((1f64 / f64::NEG_INFINITY).signum(), -1f64);
+ assert!(f64::NAN.signum().is_nan());
+}
+
+#[test]
+fn test_is_sign_positive() {
+ assert!(f64::INFINITY.is_sign_positive());
+ assert!(1f64.is_sign_positive());
+ assert!(0f64.is_sign_positive());
+ assert!(!(-0f64).is_sign_positive());
+ assert!(!(-1f64).is_sign_positive());
+ assert!(!f64::NEG_INFINITY.is_sign_positive());
+ assert!(!(1f64 / f64::NEG_INFINITY).is_sign_positive());
+ assert!(f64::NAN.is_sign_positive());
+ assert!(!(-f64::NAN).is_sign_positive());
+}
+
+#[test]
+fn test_is_sign_negative() {
+ assert!(!f64::INFINITY.is_sign_negative());
+ assert!(!1f64.is_sign_negative());
+ assert!(!0f64.is_sign_negative());
+ assert!((-0f64).is_sign_negative());
+ assert!((-1f64).is_sign_negative());
+ assert!(f64::NEG_INFINITY.is_sign_negative());
+ assert!((1f64 / f64::NEG_INFINITY).is_sign_negative());
+ assert!(!f64::NAN.is_sign_negative());
+ assert!((-f64::NAN).is_sign_negative());
+}
+
+#[test]
+fn test_mul_add() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_approx_eq!(12.3f64.mul_add(4.5, 6.7), 62.05);
+ assert_approx_eq!((-12.3f64).mul_add(-4.5, -6.7), 48.65);
+ assert_approx_eq!(0.0f64.mul_add(8.9, 1.2), 1.2);
+ assert_approx_eq!(3.4f64.mul_add(-0.0, 5.6), 5.6);
+ assert!(nan.mul_add(7.8, 9.0).is_nan());
+ assert_eq!(inf.mul_add(7.8, 9.0), inf);
+ assert_eq!(neg_inf.mul_add(7.8, 9.0), neg_inf);
+ assert_eq!(8.9f64.mul_add(inf, 3.2), inf);
+ assert_eq!((-3.2f64).mul_add(2.4, neg_inf), neg_inf);
+}
+
+#[test]
+fn test_recip() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(1.0f64.recip(), 1.0);
+ assert_eq!(2.0f64.recip(), 0.5);
+ assert_eq!((-0.4f64).recip(), -2.5);
+ assert_eq!(0.0f64.recip(), inf);
+ assert!(nan.recip().is_nan());
+ assert_eq!(inf.recip(), 0.0);
+ assert_eq!(neg_inf.recip(), 0.0);
+}
+
+#[test]
+fn test_powi() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(1.0f64.powi(1), 1.0);
+ assert_approx_eq!((-3.1f64).powi(2), 9.61);
+ assert_approx_eq!(5.9f64.powi(-2), 0.028727);
+ assert_eq!(8.3f64.powi(0), 1.0);
+ assert!(nan.powi(2).is_nan());
+ assert_eq!(inf.powi(3), inf);
+ assert_eq!(neg_inf.powi(2), inf);
+}
+
+#[test]
+fn test_powf() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(1.0f64.powf(1.0), 1.0);
+ assert_approx_eq!(3.4f64.powf(4.5), 246.408183);
+ assert_approx_eq!(2.7f64.powf(-3.2), 0.041652);
+ assert_approx_eq!((-3.1f64).powf(2.0), 9.61);
+ assert_approx_eq!(5.9f64.powf(-2.0), 0.028727);
+ assert_eq!(8.3f64.powf(0.0), 1.0);
+ assert!(nan.powf(2.0).is_nan());
+ assert_eq!(inf.powf(2.0), inf);
+ assert_eq!(neg_inf.powf(3.0), neg_inf);
+}
+
+#[test]
+fn test_sqrt_domain() {
+ assert!(f64::NAN.sqrt().is_nan());
+ assert!(f64::NEG_INFINITY.sqrt().is_nan());
+ assert!((-1.0f64).sqrt().is_nan());
+ assert_eq!((-0.0f64).sqrt(), -0.0);
+ assert_eq!(0.0f64.sqrt(), 0.0);
+ assert_eq!(1.0f64.sqrt(), 1.0);
+ assert_eq!(f64::INFINITY.sqrt(), f64::INFINITY);
+}
+
+#[test]
+fn test_exp() {
+ assert_eq!(1.0, 0.0f64.exp());
+ assert_approx_eq!(2.718282, 1.0f64.exp());
+ assert_approx_eq!(148.413159, 5.0f64.exp());
+
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let nan: f64 = f64::NAN;
+ assert_eq!(inf, inf.exp());
+ assert_eq!(0.0, neg_inf.exp());
+ assert!(nan.exp().is_nan());
+}
+
+#[test]
+fn test_exp2() {
+ assert_eq!(32.0, 5.0f64.exp2());
+ assert_eq!(1.0, 0.0f64.exp2());
+
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let nan: f64 = f64::NAN;
+ assert_eq!(inf, inf.exp2());
+ assert_eq!(0.0, neg_inf.exp2());
+ assert!(nan.exp2().is_nan());
+}
+
+#[test]
+fn test_ln() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_approx_eq!(1.0f64.exp().ln(), 1.0);
+ assert!(nan.ln().is_nan());
+ assert_eq!(inf.ln(), inf);
+ assert!(neg_inf.ln().is_nan());
+ assert!((-2.3f64).ln().is_nan());
+ assert_eq!((-0.0f64).ln(), neg_inf);
+ assert_eq!(0.0f64.ln(), neg_inf);
+ assert_approx_eq!(4.0f64.ln(), 1.386294);
+}
+
+#[test]
+fn test_log() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(10.0f64.log(10.0), 1.0);
+ assert_approx_eq!(2.3f64.log(3.5), 0.664858);
+ assert_eq!(1.0f64.exp().log(1.0f64.exp()), 1.0);
+ assert!(1.0f64.log(1.0).is_nan());
+ assert!(1.0f64.log(-13.9).is_nan());
+ assert!(nan.log(2.3).is_nan());
+ assert_eq!(inf.log(10.0), inf);
+ assert!(neg_inf.log(8.8).is_nan());
+ assert!((-2.3f64).log(0.1).is_nan());
+ assert_eq!((-0.0f64).log(2.0), neg_inf);
+ assert_eq!(0.0f64.log(7.0), neg_inf);
+}
+
+#[test]
+fn test_log2() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_approx_eq!(10.0f64.log2(), 3.321928);
+ assert_approx_eq!(2.3f64.log2(), 1.201634);
+ assert_approx_eq!(1.0f64.exp().log2(), 1.442695);
+ assert!(nan.log2().is_nan());
+ assert_eq!(inf.log2(), inf);
+ assert!(neg_inf.log2().is_nan());
+ assert!((-2.3f64).log2().is_nan());
+ assert_eq!((-0.0f64).log2(), neg_inf);
+ assert_eq!(0.0f64.log2(), neg_inf);
+}
+
+#[test]
+fn test_log10() {
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(10.0f64.log10(), 1.0);
+ assert_approx_eq!(2.3f64.log10(), 0.361728);
+ assert_approx_eq!(1.0f64.exp().log10(), 0.434294);
+ assert_eq!(1.0f64.log10(), 0.0);
+ assert!(nan.log10().is_nan());
+ assert_eq!(inf.log10(), inf);
+ assert!(neg_inf.log10().is_nan());
+ assert!((-2.3f64).log10().is_nan());
+ assert_eq!((-0.0f64).log10(), neg_inf);
+ assert_eq!(0.0f64.log10(), neg_inf);
+}
+
+#[test]
+fn test_to_degrees() {
+ let pi: f64 = consts::PI;
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(0.0f64.to_degrees(), 0.0);
+ assert_approx_eq!((-5.8f64).to_degrees(), -332.315521);
+ assert_eq!(pi.to_degrees(), 180.0);
+ assert!(nan.to_degrees().is_nan());
+ assert_eq!(inf.to_degrees(), inf);
+ assert_eq!(neg_inf.to_degrees(), neg_inf);
+}
+
+#[test]
+fn test_to_radians() {
+ let pi: f64 = consts::PI;
+ let nan: f64 = f64::NAN;
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ assert_eq!(0.0f64.to_radians(), 0.0);
+ assert_approx_eq!(154.6f64.to_radians(), 2.698279);
+ assert_approx_eq!((-332.31f64).to_radians(), -5.799903);
+ assert_eq!(180.0f64.to_radians(), pi);
+ assert!(nan.to_radians().is_nan());
+ assert_eq!(inf.to_radians(), inf);
+ assert_eq!(neg_inf.to_radians(), neg_inf);
+}
+
+#[test]
+fn test_asinh() {
+ assert_eq!(0.0f64.asinh(), 0.0f64);
+ assert_eq!((-0.0f64).asinh(), -0.0f64);
+
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let nan: f64 = f64::NAN;
+ assert_eq!(inf.asinh(), inf);
+ assert_eq!(neg_inf.asinh(), neg_inf);
+ assert!(nan.asinh().is_nan());
+ assert!((-0.0f64).asinh().is_sign_negative());
+ // issue 63271
+ assert_approx_eq!(2.0f64.asinh(), 1.443635475178810342493276740273105f64);
+ assert_approx_eq!((-2.0f64).asinh(), -1.443635475178810342493276740273105f64);
+ // regression test for the catastrophic cancellation fixed in 72486
+ assert_approx_eq!((-67452098.07139316f64).asinh(), -18.72007542627454439398548429400083);
+}
+
+#[test]
+fn test_acosh() {
+ assert_eq!(1.0f64.acosh(), 0.0f64);
+ assert!(0.999f64.acosh().is_nan());
+
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let nan: f64 = f64::NAN;
+ assert_eq!(inf.acosh(), inf);
+ assert!(neg_inf.acosh().is_nan());
+ assert!(nan.acosh().is_nan());
+ assert_approx_eq!(2.0f64.acosh(), 1.31695789692481670862504634730796844f64);
+ assert_approx_eq!(3.0f64.acosh(), 1.76274717403908605046521864995958461f64);
+}
+
+#[test]
+fn test_atanh() {
+ assert_eq!(0.0f64.atanh(), 0.0f64);
+ assert_eq!((-0.0f64).atanh(), -0.0f64);
+
+ let inf: f64 = f64::INFINITY;
+ let neg_inf: f64 = f64::NEG_INFINITY;
+ let nan: f64 = f64::NAN;
+ assert_eq!(1.0f64.atanh(), inf);
+ assert_eq!((-1.0f64).atanh(), neg_inf);
+ assert!(2f64.atanh().atanh().is_nan());
+ assert!((-2f64).atanh().atanh().is_nan());
+ assert!(inf.atanh().is_nan());
+ assert!(neg_inf.atanh().is_nan());
+ assert!(nan.atanh().is_nan());
+ assert_approx_eq!(0.5f64.atanh(), 0.54930614433405484569762261846126285f64);
+ assert_approx_eq!((-0.5f64).atanh(), -0.54930614433405484569762261846126285f64);
+}
+
+#[test]
+fn test_real_consts() {
+ use super::consts;
+ let pi: f64 = consts::PI;
+ let frac_pi_2: f64 = consts::FRAC_PI_2;
+ let frac_pi_3: f64 = consts::FRAC_PI_3;
+ let frac_pi_4: f64 = consts::FRAC_PI_4;
+ let frac_pi_6: f64 = consts::FRAC_PI_6;
+ let frac_pi_8: f64 = consts::FRAC_PI_8;
+ let frac_1_pi: f64 = consts::FRAC_1_PI;
+ let frac_2_pi: f64 = consts::FRAC_2_PI;
+ let frac_2_sqrtpi: f64 = consts::FRAC_2_SQRT_PI;
+ let sqrt2: f64 = consts::SQRT_2;
+ let frac_1_sqrt2: f64 = consts::FRAC_1_SQRT_2;
+ let e: f64 = consts::E;
+ let log2_e: f64 = consts::LOG2_E;
+ let log10_e: f64 = consts::LOG10_E;
+ let ln_2: f64 = consts::LN_2;
+ let ln_10: f64 = consts::LN_10;
+
+ assert_approx_eq!(frac_pi_2, pi / 2f64);
+ assert_approx_eq!(frac_pi_3, pi / 3f64);
+ assert_approx_eq!(frac_pi_4, pi / 4f64);
+ assert_approx_eq!(frac_pi_6, pi / 6f64);
+ assert_approx_eq!(frac_pi_8, pi / 8f64);
+ assert_approx_eq!(frac_1_pi, 1f64 / pi);
+ assert_approx_eq!(frac_2_pi, 2f64 / pi);
+ assert_approx_eq!(frac_2_sqrtpi, 2f64 / pi.sqrt());
+ assert_approx_eq!(sqrt2, 2f64.sqrt());
+ assert_approx_eq!(frac_1_sqrt2, 1f64 / 2f64.sqrt());
+ assert_approx_eq!(log2_e, e.log2());
+ assert_approx_eq!(log10_e, e.log10());
+ assert_approx_eq!(ln_2, 2f64.ln());
+ assert_approx_eq!(ln_10, 10f64.ln());
+}
+
+#[test]
+fn test_float_bits_conv() {
+ assert_eq!((1f64).to_bits(), 0x3ff0000000000000);
+ assert_eq!((12.5f64).to_bits(), 0x4029000000000000);
+ assert_eq!((1337f64).to_bits(), 0x4094e40000000000);
+ assert_eq!((-14.25f64).to_bits(), 0xc02c800000000000);
+ assert_approx_eq!(f64::from_bits(0x3ff0000000000000), 1.0);
+ assert_approx_eq!(f64::from_bits(0x4029000000000000), 12.5);
+ assert_approx_eq!(f64::from_bits(0x4094e40000000000), 1337.0);
+ assert_approx_eq!(f64::from_bits(0xc02c800000000000), -14.25);
+
+ // Check that NaNs roundtrip their bits regardless of signaling-ness
+ // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
+ let masked_nan1 = f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA;
+ let masked_nan2 = f64::NAN.to_bits() ^ 0x0005_5555_5555_5555;
+ assert!(f64::from_bits(masked_nan1).is_nan());
+ assert!(f64::from_bits(masked_nan2).is_nan());
+
+ assert_eq!(f64::from_bits(masked_nan1).to_bits(), masked_nan1);
+ assert_eq!(f64::from_bits(masked_nan2).to_bits(), masked_nan2);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_min_greater_than_max() {
+ let _ = 1.0f64.clamp(3.0, 1.0);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_min_is_nan() {
+ let _ = 1.0f64.clamp(f64::NAN, 1.0);
+}
+
+#[test]
+#[should_panic]
+fn test_clamp_max_is_nan() {
+ let _ = 1.0f64.clamp(3.0, f64::NAN);
+}
+
+#[test]
+fn test_total_cmp() {
+ use core::cmp::Ordering;
+
+ fn quiet_bit_mask() -> u64 {
+ 1 << (f64::MANTISSA_DIGITS - 2)
+ }
+
+ fn min_subnorm() -> f64 {
+ f64::MIN_POSITIVE / f64::powf(2.0, f64::MANTISSA_DIGITS as f64 - 1.0)
+ }
+
+ fn max_subnorm() -> f64 {
+ f64::MIN_POSITIVE - min_subnorm()
+ }
+
+ fn q_nan() -> f64 {
+ f64::from_bits(f64::NAN.to_bits() | quiet_bit_mask())
+ }
+
+ fn s_nan() -> f64 {
+ f64::from_bits((f64::NAN.to_bits() & !quiet_bit_mask()) + 42)
+ }
+
+ assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
+ assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Equal, (-f64::INFINITY).total_cmp(&-f64::INFINITY));
+ assert_eq!(Ordering::Equal, (-f64::MAX).total_cmp(&-f64::MAX));
+ assert_eq!(Ordering::Equal, (-2.5_f64).total_cmp(&-2.5));
+ assert_eq!(Ordering::Equal, (-1.0_f64).total_cmp(&-1.0));
+ assert_eq!(Ordering::Equal, (-1.5_f64).total_cmp(&-1.5));
+ assert_eq!(Ordering::Equal, (-0.5_f64).total_cmp(&-0.5));
+ assert_eq!(Ordering::Equal, (-f64::MIN_POSITIVE).total_cmp(&-f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Equal, (-0.0_f64).total_cmp(&-0.0));
+ assert_eq!(Ordering::Equal, 0.0_f64.total_cmp(&0.0));
+ assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Equal, f64::MIN_POSITIVE.total_cmp(&f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Equal, 0.5_f64.total_cmp(&0.5));
+ assert_eq!(Ordering::Equal, 1.0_f64.total_cmp(&1.0));
+ assert_eq!(Ordering::Equal, 1.5_f64.total_cmp(&1.5));
+ assert_eq!(Ordering::Equal, 2.5_f64.total_cmp(&2.5));
+ assert_eq!(Ordering::Equal, f64::MAX.total_cmp(&f64::MAX));
+ assert_eq!(Ordering::Equal, f64::INFINITY.total_cmp(&f64::INFINITY));
+ assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
+ assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
+
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::INFINITY));
+ assert_eq!(Ordering::Less, (-f64::INFINITY).total_cmp(&-f64::MAX));
+ assert_eq!(Ordering::Less, (-f64::MAX).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-2.5_f64).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-1.5_f64).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-1.0_f64).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-0.5_f64).total_cmp(&-f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-f64::MIN_POSITIVE).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-0.0_f64).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, 0.0_f64.total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, f64::MIN_POSITIVE.total_cmp(&0.5));
+ assert_eq!(Ordering::Less, 0.5_f64.total_cmp(&1.0));
+ assert_eq!(Ordering::Less, 1.0_f64.total_cmp(&1.5));
+ assert_eq!(Ordering::Less, 1.5_f64.total_cmp(&2.5));
+ assert_eq!(Ordering::Less, 2.5_f64.total_cmp(&f64::MAX));
+ assert_eq!(Ordering::Less, f64::MAX.total_cmp(&f64::INFINITY));
+ assert_eq!(Ordering::Less, f64::INFINITY.total_cmp(&s_nan()));
+ assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
+
+ assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
+ assert_eq!(Ordering::Greater, (-f64::INFINITY).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Greater, (-f64::MAX).total_cmp(&-f64::INFINITY));
+ assert_eq!(Ordering::Greater, (-2.5_f64).total_cmp(&-f64::MAX));
+ assert_eq!(Ordering::Greater, (-1.5_f64).total_cmp(&-2.5));
+ assert_eq!(Ordering::Greater, (-1.0_f64).total_cmp(&-1.5));
+ assert_eq!(Ordering::Greater, (-0.5_f64).total_cmp(&-1.0));
+ assert_eq!(Ordering::Greater, (-f64::MIN_POSITIVE).total_cmp(&-0.5));
+ assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Greater, (-0.0_f64).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Greater, 0.0_f64.total_cmp(&-0.0));
+ assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
+ assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Greater, f64::MIN_POSITIVE.total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Greater, 0.5_f64.total_cmp(&f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Greater, 1.0_f64.total_cmp(&0.5));
+ assert_eq!(Ordering::Greater, 1.5_f64.total_cmp(&1.0));
+ assert_eq!(Ordering::Greater, 2.5_f64.total_cmp(&1.5));
+ assert_eq!(Ordering::Greater, f64::MAX.total_cmp(&2.5));
+ assert_eq!(Ordering::Greater, f64::INFINITY.total_cmp(&f64::MAX));
+ assert_eq!(Ordering::Greater, s_nan().total_cmp(&f64::INFINITY));
+ assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
+
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::INFINITY));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::MAX));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::MAX));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f64::INFINITY));
+ assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
+
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::INFINITY));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::MAX));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::MIN_POSITIVE));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::MAX));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::INFINITY));
+ assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
+}
diff --git a/library/std/src/ffi/mod.rs b/library/std/src/ffi/mod.rs
new file mode 100644
index 000000000..d987bf69b
--- /dev/null
+++ b/library/std/src/ffi/mod.rs
@@ -0,0 +1,174 @@
+//! Utilities related to FFI bindings.
+//!
+//! This module provides utilities to handle data across non-Rust
+//! interfaces, like other programming languages and the underlying
+//! operating system. It is mainly of use for FFI (Foreign Function
+//! Interface) bindings and code that needs to exchange C-like strings
+//! with other languages.
+//!
+//! # Overview
+//!
+//! Rust represents owned strings with the [`String`] type, and
+//! borrowed slices of strings with the [`str`] primitive. Both are
+//! always in UTF-8 encoding, and may contain nul bytes in the middle,
+//! i.e., if you look at the bytes that make up the string, there may
+//! be a `\0` among them. Both `String` and `str` store their length
+//! explicitly; there are no nul terminators at the end of strings
+//! like in C.
+//!
+//! C strings are different from Rust strings:
+//!
+//! * **Encodings** - Rust strings are UTF-8, but C strings may use
+//! other encodings. If you are using a string from C, you should
+//! check its encoding explicitly, rather than just assuming that it
+//! is UTF-8 like you can do in Rust.
+//!
+//! * **Character size** - C strings may use `char` or `wchar_t`-sized
+//! characters; please **note** that C's `char` is different from Rust's.
+//! The C standard leaves the actual sizes of those types open to
+//! interpretation, but defines different APIs for strings made up of
+//! each character type. Rust strings are always UTF-8, so different
+//! Unicode characters will be encoded in a variable number of bytes
+//! each. The Rust type [`char`] represents a '[Unicode scalar
+//! value]', which is similar to, but not the same as, a '[Unicode
+//! code point]'.
+//!
+//! * **Nul terminators and implicit string lengths** - Often, C
+//! strings are nul-terminated, i.e., they have a `\0` character at the
+//! end. The length of a string buffer is not stored, but has to be
+//! calculated; to compute the length of a string, C code must
+//! manually call a function like `strlen()` for `char`-based strings,
+//! or `wcslen()` for `wchar_t`-based ones. Those functions return
+//! the number of characters in the string excluding the nul
+//! terminator, so the buffer length is really `len+1` characters.
+//! Rust strings don't have a nul terminator; their length is always
+//! stored and does not need to be calculated. While in Rust
+//! accessing a string's length is an *O*(1) operation (because the
+//! length is stored); in C it is an *O*(*n*) operation because the
+//! length needs to be computed by scanning the string for the nul
+//! terminator.
+//!
+//! * **Internal nul characters** - When C strings have a nul
+//! terminator character, this usually means that they cannot have nul
+//! characters in the middle — a nul character would essentially
+//! truncate the string. Rust strings *can* have nul characters in
+//! the middle, because nul does not have to mark the end of the
+//! string in Rust.
+//!
+//! # Representations of non-Rust strings
+//!
+//! [`CString`] and [`CStr`] are useful when you need to transfer
+//! UTF-8 strings to and from languages with a C ABI, like Python.
+//!
+//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
+//! string: it is nul-terminated, and has no internal nul characters.
+//! Rust code can create a [`CString`] out of a normal string (provided
+//! that the string doesn't have nul characters in the middle), and
+//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
+//! then be passed as an argument to functions which use the C
+//! conventions for strings.
+//!
+//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
+//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
+//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
+//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
+//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
+//! replacement characters.
+//!
+//! [`OsString`] and [`OsStr`] are useful when you need to transfer
+//! strings to and from the operating system itself, or when capturing
+//! the output of external commands. Conversions between [`OsString`],
+//! [`OsStr`] and Rust strings work similarly to those for [`CString`]
+//! and [`CStr`].
+//!
+//! * [`OsString`] losslessly represents an owned platform string. However, this
+//! representation is not necessarily in a form native to the platform.
+//! In the Rust standard library, various APIs that transfer strings to/from the operating
+//! system use [`OsString`] instead of plain strings. For example,
+//! [`env::var_os()`] is used to query environment variables; it
+//! returns an <code>[Option]<[OsString]></code>. If the environment variable
+//! exists you will get a <code>[Some]\(os_string)</code>, which you can
+//! *then* try to convert to a Rust string. This yields a [`Result`], so that
+//! your code can detect errors in case the environment variable did
+//! not in fact contain valid Unicode data.
+//!
+//! * [`OsStr`] losslessly represents a borrowed reference to a platform string.
+//! However, this representation is not necessarily in a form native to the platform.
+//! It can be converted into a UTF-8 Rust string slice in a similar way to
+//! [`OsString`].
+//!
+//! # Conversions
+//!
+//! ## On Unix
+//!
+//! On Unix, [`OsStr`] implements the
+//! <code>std::os::unix::ffi::[OsStrExt][unix.OsStrExt]</code> trait, which
+//! augments it with two methods, [`from_bytes`] and [`as_bytes`].
+//! These do inexpensive conversions from and to byte slices.
+//!
+//! Additionally, on Unix [`OsString`] implements the
+//! <code>std::os::unix::ffi::[OsStringExt][unix.OsStringExt]</code> trait,
+//! which provides [`from_vec`] and [`into_vec`] methods that consume
+//! their arguments, and take or produce vectors of [`u8`].
+//!
+//! ## On Windows
+//!
+//! An [`OsStr`] can be losslessly converted to a native Windows string. And
+//! a native Windows string can be losslessly converted to an [`OsString`].
+//!
+//! On Windows, [`OsStr`] implements the
+//! <code>std::os::windows::ffi::[OsStrExt][windows.OsStrExt]</code> trait,
+//! which provides an [`encode_wide`] method. This provides an
+//! iterator that can be [`collect`]ed into a vector of [`u16`]. After a nul
+//! characters is appended, this is the same as a native Windows string.
+//!
+//! Additionally, on Windows [`OsString`] implements the
+//! <code>std::os::windows:ffi::[OsStringExt][windows.OsStringExt]</code>
+//! trait, which provides a [`from_wide`] method to convert a native Windows
+//! string (without the terminating nul character) to an [`OsString`].
+//!
+//! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
+//! [Unicode code point]: https://www.unicode.org/glossary/#code_point
+//! [`env::set_var()`]: crate::env::set_var "env::set_var"
+//! [`env::var_os()`]: crate::env::var_os "env::var_os"
+//! [unix.OsStringExt]: crate::os::unix::ffi::OsStringExt "os::unix::ffi::OsStringExt"
+//! [`from_vec`]: crate::os::unix::ffi::OsStringExt::from_vec "os::unix::ffi::OsStringExt::from_vec"
+//! [`into_vec`]: crate::os::unix::ffi::OsStringExt::into_vec "os::unix::ffi::OsStringExt::into_vec"
+//! [unix.OsStrExt]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
+//! [`from_bytes`]: crate::os::unix::ffi::OsStrExt::from_bytes "os::unix::ffi::OsStrExt::from_bytes"
+//! [`as_bytes`]: crate::os::unix::ffi::OsStrExt::as_bytes "os::unix::ffi::OsStrExt::as_bytes"
+//! [`OsStrExt`]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
+//! [windows.OsStrExt]: crate::os::windows::ffi::OsStrExt "os::windows::ffi::OsStrExt"
+//! [`encode_wide`]: crate::os::windows::ffi::OsStrExt::encode_wide "os::windows::ffi::OsStrExt::encode_wide"
+//! [`collect`]: crate::iter::Iterator::collect "iter::Iterator::collect"
+//! [windows.OsStringExt]: crate::os::windows::ffi::OsStringExt "os::windows::ffi::OsStringExt"
+//! [`from_wide`]: crate::os::windows::ffi::OsStringExt::from_wide "os::windows::ffi::OsStringExt::from_wide"
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub use alloc::ffi::{CString, FromVecWithNulError, IntoStringError, NulError};
+#[stable(feature = "core_c_str", since = "1.64.0")]
+pub use core::ffi::{CStr, FromBytesWithNulError};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStr, OsString};
+
+#[stable(feature = "core_ffi_c", since = "1.64.0")]
+pub use core::ffi::{
+ c_char, c_double, c_float, c_int, c_long, c_longlong, c_schar, c_short, c_uchar, c_uint,
+ c_ulong, c_ulonglong, c_ushort,
+};
+
+#[stable(feature = "core_c_void", since = "1.30.0")]
+pub use core::ffi::c_void;
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+pub use core::ffi::{VaList, VaListImpl};
+
+mod os_str;
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
new file mode 100644
index 000000000..a0a5c003d
--- /dev/null
+++ b/library/std/src/ffi/os_str.rs
@@ -0,0 +1,1447 @@
+#[cfg(test)]
+mod tests;
+
+use crate::borrow::{Borrow, Cow};
+use crate::cmp;
+use crate::collections::TryReserveError;
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::iter::Extend;
+use crate::ops;
+use crate::rc::Rc;
+use crate::str::FromStr;
+use crate::sync::Arc;
+
+use crate::sys::os_str::{Buf, Slice};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+/// A type that can represent owned, mutable platform-native strings, but is
+/// cheaply inter-convertible with Rust strings.
+///
+/// The need for this type arises from the fact that:
+///
+/// * On Unix systems, strings are often arbitrary sequences of non-zero
+/// bytes, in many cases interpreted as UTF-8.
+///
+/// * On Windows, strings are often arbitrary sequences of non-zero 16-bit
+/// values, interpreted as UTF-16 when it is valid to do so.
+///
+/// * In Rust, strings are always valid UTF-8, which may contain zeros.
+///
+/// `OsString` and [`OsStr`] bridge this gap by simultaneously representing Rust
+/// and platform-native string values, and in particular allowing a Rust string
+/// to be converted into an "OS" string with no cost if possible. A consequence
+/// of this is that `OsString` instances are *not* `NUL` terminated; in order
+/// to pass to e.g., Unix system call, you should create a [`CStr`].
+///
+/// `OsString` is to <code>&[OsStr]</code> as [`String`] is to <code>&[str]</code>: the former
+/// in each pair are owned strings; the latter are borrowed
+/// references.
+///
+/// Note, `OsString` and [`OsStr`] internally do not necessarily hold strings in
+/// the form native to the platform; While on Unix, strings are stored as a
+/// sequence of 8-bit values, on Windows, where strings are 16-bit value based
+/// as just discussed, strings are also actually stored as a sequence of 8-bit
+/// values, encoded in a less-strict variant of UTF-8. This is useful to
+/// understand when handling capacity and length values.
+///
+/// # Capacity of `OsString`
+///
+/// Capacity uses units of UTF-8 bytes for OS strings which were created from valid unicode, and
+/// uses units of bytes in an unspecified encoding for other contents. On a given target, all
+/// `OsString` and `OsStr` values use the same units for capacity, so the following will work:
+/// ```
+/// use std::ffi::{OsStr, OsString};
+///
+/// fn concat_os_strings(a: &OsStr, b: &OsStr) -> OsString {
+/// let mut ret = OsString::with_capacity(a.len() + b.len()); // This will allocate
+/// ret.push(a); // This will not allocate further
+/// ret.push(b); // This will not allocate further
+/// ret
+/// }
+/// ```
+///
+/// # Creating an `OsString`
+///
+/// **From a Rust string**: `OsString` implements
+/// <code>[From]<[String]></code>, so you can use <code>my_string.[into]\()</code> to
+/// create an `OsString` from a normal Rust string.
+///
+/// **From slices:** Just like you can start with an empty Rust
+/// [`String`] and then [`String::push_str`] some <code>&[str]</code>
+/// sub-string slices into it, you can create an empty `OsString` with
+/// the [`OsString::new`] method and then push string slices into it with the
+/// [`OsString::push`] method.
+///
+/// # Extracting a borrowed reference to the whole OS string
+///
+/// You can use the [`OsString::as_os_str`] method to get an <code>&[OsStr]</code> from
+/// an `OsString`; this is effectively a borrowed reference to the
+/// whole string.
+///
+/// # Conversions
+///
+/// See the [module's toplevel documentation about conversions][conversions] for a discussion on
+/// the traits which `OsString` implements for [conversions] from/to native representations.
+///
+/// [`CStr`]: crate::ffi::CStr
+/// [conversions]: super#conversions
+/// [into]: Into::into
+#[cfg_attr(not(test), rustc_diagnostic_item = "OsString")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OsString {
+ inner: Buf,
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for OsString {}
+
+/// Borrowed reference to an OS string (see [`OsString`]).
+///
+/// This type represents a borrowed reference to a string in the operating system's preferred
+/// representation.
+///
+/// `&OsStr` is to [`OsString`] as <code>&[str]</code> is to [`String`]: the
+/// former in each pair are borrowed references; the latter are owned strings.
+///
+/// See the [module's toplevel documentation about conversions][conversions] for a discussion on
+/// the traits which `OsStr` implements for [conversions] from/to native representations.
+///
+/// [conversions]: super#conversions
+#[cfg_attr(not(test), rustc_diagnostic_item = "OsStr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+// FIXME:
+// `OsStr::from_inner` current implementation relies
+// on `OsStr` being layout-compatible with `Slice`.
+// When attribute privacy is implemented, `OsStr` should be annotated as `#[repr(transparent)]`.
+// Anyway, `OsStr` representation and layout are considered implementation details, are
+// not documented and must not be relied upon.
+pub struct OsStr {
+ inner: Slice,
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for OsStr {}
+
+impl OsString {
+ /// Constructs a new empty `OsString`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let os_string = OsString::new();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn new() -> OsString {
+ OsString { inner: Buf::from_string(String::new()) }
+ }
+
+ /// Converts to an [`OsStr`] slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsString, OsStr};
+ ///
+ /// let os_string = OsString::from("foo");
+ /// let os_str = OsStr::new("foo");
+ /// assert_eq!(os_string.as_os_str(), os_str);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_os_str(&self) -> &OsStr {
+ self
+ }
+
+ /// Converts the `OsString` into a [`String`] if it contains valid Unicode data.
+ ///
+ /// On failure, ownership of the original `OsString` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let os_string = OsString::from("foo");
+ /// let string = os_string.into_string();
+ /// assert_eq!(string, Ok(String::from("foo")));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn into_string(self) -> Result<String, OsString> {
+ self.inner.into_string().map_err(|buf| OsString { inner: buf })
+ }
+
+ /// Extends the string with the given <code>&[OsStr]</code> slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut os_string = OsString::from("foo");
+ /// os_string.push("bar");
+ /// assert_eq!(&os_string, "foobar");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn push<T: AsRef<OsStr>>(&mut self, s: T) {
+ self.inner.push_slice(&s.as_ref().inner)
+ }
+
+ /// Creates a new `OsString` with at least the given capacity.
+ ///
+ /// The string will be able to hold at least `capacity` length units of other
+ /// OS strings without reallocating. This method is allowed to allocate for
+ /// more units than `capacity`. If `capacity` is 0, the string will not
+ /// allocate.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut os_string = OsString::with_capacity(10);
+ /// let capacity = os_string.capacity();
+ ///
+ /// // This push is done without reallocating
+ /// os_string.push("foo");
+ ///
+ /// assert_eq!(capacity, os_string.capacity());
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> OsString {
+ OsString { inner: Buf::with_capacity(capacity) }
+ }
+
+ /// Truncates the `OsString` to zero length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut os_string = OsString::from("foo");
+ /// assert_eq!(&os_string, "foo");
+ ///
+ /// os_string.clear();
+ /// assert_eq!(&os_string, "");
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[inline]
+ pub fn clear(&mut self) {
+ self.inner.clear()
+ }
+
+ /// Returns the capacity this `OsString` can hold without reallocating.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let os_string = OsString::with_capacity(10);
+ /// assert!(os_string.capacity() >= 10);
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[must_use]
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` more capacity to be inserted
+ /// in the given `OsString`. Does nothing if the capacity is
+ /// already sufficient.
+ ///
+ /// The collection may reserve more space to speculatively avoid frequent reallocations.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::new();
+ /// s.reserve(10);
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ self.inner.reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more length units
+ /// in the given `OsString`. The string may reserve more space to speculatively avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsStr, OsString};
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<OsString, TryReserveError> {
+ /// let mut s = OsString::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// s.try_reserve(OsStr::new(data).len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// s.push(data);
+ ///
+ /// Ok(s)
+ /// }
+ /// # process_data("123").expect("why is the test harness OOMing on 3 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ #[inline]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve(additional)
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more capacity to
+ /// be inserted in the given `OsString`. Does nothing if the capacity is
+ /// already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`reserve`] if future insertions are expected.
+ ///
+ /// [`reserve`]: OsString::reserve
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::new();
+ /// s.reserve_exact(10);
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[inline]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.inner.reserve_exact(additional)
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional`
+ /// more length units in the given `OsString`. After calling
+ /// `try_reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the `OsString` more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: OsString::try_reserve
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsStr, OsString};
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<OsString, TryReserveError> {
+ /// let mut s = OsString::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// s.try_reserve_exact(OsStr::new(data).len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// s.push(data);
+ ///
+ /// Ok(s)
+ /// }
+ /// # process_data("123").expect("why is the test harness OOMing on 3 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ #[inline]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve_exact(additional)
+ }
+
+ /// Shrinks the capacity of the `OsString` to match its length.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to_fit();
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[stable(feature = "osstring_shrink_to_fit", since = "1.19.0")]
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.inner.shrink_to_fit()
+ }
+
+ /// Shrinks the capacity of the `OsString` with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to(10);
+ /// assert!(s.capacity() >= 10);
+ /// s.shrink_to(0);
+ /// assert!(s.capacity() >= 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.inner.shrink_to(min_capacity)
+ }
+
+ /// Converts this `OsString` into a boxed [`OsStr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsString, OsStr};
+ ///
+ /// let s = OsString::from("hello");
+ ///
+ /// let b: Box<OsStr> = s.into_boxed_os_str();
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_os_str", since = "1.20.0")]
+ pub fn into_boxed_os_str(self) -> Box<OsStr> {
+ let rw = Box::into_raw(self.inner.into_box()) as *mut OsStr;
+ unsafe { Box::from_raw(rw) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for OsString {
+ /// Converts a [`String`] into an [`OsString`].
+ ///
+ /// This conversion does not allocate or copy memory.
+ #[inline]
+ fn from(s: String) -> OsString {
+ OsString { inner: Buf::from_string(s) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + AsRef<OsStr>> From<&T> for OsString {
+ /// Copies any value implementing <code>[AsRef]&lt;[OsStr]&gt;</code>
+ /// into a newly allocated [`OsString`].
+ fn from(s: &T) -> OsString {
+ s.as_ref().to_os_string()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFull> for OsString {
+ type Output = OsStr;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &OsStr {
+ OsStr::from_inner(self.inner.as_slice())
+ }
+}
+
+#[stable(feature = "mut_osstr", since = "1.44.0")]
+impl ops::IndexMut<ops::RangeFull> for OsString {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut OsStr {
+ OsStr::from_inner_mut(self.inner.as_mut_slice())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for OsString {
+ type Target = OsStr;
+
+ #[inline]
+ fn deref(&self) -> &OsStr {
+ &self[..]
+ }
+}
+
+#[stable(feature = "mut_osstr", since = "1.44.0")]
+impl ops::DerefMut for OsString {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut OsStr {
+ &mut self[..]
+ }
+}
+
+#[stable(feature = "osstring_default", since = "1.9.0")]
+impl Default for OsString {
+ /// Constructs an empty `OsString`.
+ #[inline]
+ fn default() -> OsString {
+ OsString::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for OsString {
+ #[inline]
+ fn clone(&self) -> Self {
+ OsString { inner: self.inner.clone() }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ self.inner.clone_from(&source.inner)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for OsString {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, formatter)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for OsString {
+ #[inline]
+ fn eq(&self, other: &OsString) -> bool {
+ &**self == &**other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq<str> for OsString {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ &**self == other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq<OsString> for str {
+ #[inline]
+ fn eq(&self, other: &OsString) -> bool {
+ &**other == self
+ }
+}
+
+#[stable(feature = "os_str_str_ref_eq", since = "1.29.0")]
+impl PartialEq<&str> for OsString {
+ #[inline]
+ fn eq(&self, other: &&str) -> bool {
+ **self == **other
+ }
+}
+
+#[stable(feature = "os_str_str_ref_eq", since = "1.29.0")]
+impl<'a> PartialEq<OsString> for &'a str {
+ #[inline]
+ fn eq(&self, other: &OsString) -> bool {
+ **other == **self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for OsString {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for OsString {
+ #[inline]
+ fn partial_cmp(&self, other: &OsString) -> Option<cmp::Ordering> {
+ (&**self).partial_cmp(&**other)
+ }
+ #[inline]
+ fn lt(&self, other: &OsString) -> bool {
+ &**self < &**other
+ }
+ #[inline]
+ fn le(&self, other: &OsString) -> bool {
+ &**self <= &**other
+ }
+ #[inline]
+ fn gt(&self, other: &OsString) -> bool {
+ &**self > &**other
+ }
+ #[inline]
+ fn ge(&self, other: &OsString) -> bool {
+ &**self >= &**other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd<str> for OsString {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ (&**self).partial_cmp(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for OsString {
+ #[inline]
+ fn cmp(&self, other: &OsString) -> cmp::Ordering {
+ (&**self).cmp(&**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Hash for OsString {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (&**self).hash(state)
+ }
+}
+
+#[stable(feature = "os_string_fmt_write", since = "1.64.0")]
+impl fmt::Write for OsString {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.push(s);
+ Ok(())
+ }
+}
+
+impl OsStr {
+ /// Coerces into an `OsStr` slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new<S: AsRef<OsStr> + ?Sized>(s: &S) -> &OsStr {
+ s.as_ref()
+ }
+
+ #[inline]
+ fn from_inner(inner: &Slice) -> &OsStr {
+ // SAFETY: OsStr is just a wrapper of Slice,
+ // therefore converting &Slice to &OsStr is safe.
+ unsafe { &*(inner as *const Slice as *const OsStr) }
+ }
+
+ #[inline]
+ fn from_inner_mut(inner: &mut Slice) -> &mut OsStr {
+ // SAFETY: OsStr is just a wrapper of Slice,
+ // therefore converting &mut Slice to &mut OsStr is safe.
+ // Any method that mutates OsStr must be careful not to
+ // break platform-specific encoding, in particular Wtf8 on Windows.
+ unsafe { &mut *(inner as *mut Slice as *mut OsStr) }
+ }
+
+ /// Yields a <code>&[str]</code> slice if the `OsStr` is valid Unicode.
+ ///
+ /// This conversion may entail doing a check for UTF-8 validity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// assert_eq!(os_str.to_str(), Some("foo"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub fn to_str(&self) -> Option<&str> {
+ self.inner.to_str()
+ }
+
+ /// Converts an `OsStr` to a <code>[Cow]<[str]></code>.
+ ///
+ /// Any non-Unicode sequences are replaced with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD].
+ ///
+ /// [U+FFFD]: crate::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on an `OsStr` with invalid unicode:
+ ///
+ /// ```
+ /// // Note, due to differences in how Unix and Windows represent strings,
+ /// // we are forced to complicate this example, setting up example `OsStr`s
+ /// // with different source data and via different platform extensions.
+ /// // Understand that in reality you could end up with such example invalid
+ /// // sequences simply through collecting user command line arguments, for
+ /// // example.
+ ///
+ /// #[cfg(unix)] {
+ /// use std::ffi::OsStr;
+ /// use std::os::unix::ffi::OsStrExt;
+ ///
+ /// // Here, the values 0x66 and 0x6f correspond to 'f' and 'o'
+ /// // respectively. The value 0x80 is a lone continuation byte, invalid
+ /// // in a UTF-8 sequence.
+ /// let source = [0x66, 0x6f, 0x80, 0x6f];
+ /// let os_str = OsStr::from_bytes(&source[..]);
+ ///
+ /// assert_eq!(os_str.to_string_lossy(), "fo�o");
+ /// }
+ /// #[cfg(windows)] {
+ /// use std::ffi::OsString;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// // Here the values 0x0066 and 0x006f correspond to 'f' and 'o'
+ /// // respectively. The value 0xD800 is a lone surrogate half, invalid
+ /// // in a UTF-16 sequence.
+ /// let source = [0x0066, 0x006f, 0xD800, 0x006f];
+ /// let os_string = OsString::from_wide(&source[..]);
+ /// let os_str = os_string.as_os_str();
+ ///
+ /// assert_eq!(os_str.to_string_lossy(), "fo�o");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ self.inner.to_string_lossy()
+ }
+
+ /// Copies the slice into an owned [`OsString`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsStr, OsString};
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// let os_string = os_str.to_os_string();
+ /// assert_eq!(os_string, OsString::from("foo"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub fn to_os_string(&self) -> OsString {
+ OsString { inner: self.inner.to_owned() }
+ }
+
+ /// Checks whether the `OsStr` is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("");
+ /// assert!(os_str.is_empty());
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// assert!(!os_str.is_empty());
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[must_use]
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.inner.inner.is_empty()
+ }
+
+ /// Returns the length of this `OsStr`.
+ ///
+ /// Note that this does **not** return the number of bytes in the string in
+ /// OS string form.
+ ///
+ /// The length returned is that of the underlying storage used by `OsStr`.
+ /// As discussed in the [`OsString`] introduction, [`OsString`] and `OsStr`
+ /// store strings in a form best suited for cheap inter-conversion between
+ /// native-platform and Rust string forms, which may differ significantly
+ /// from both of them, including in storage size and encoding.
+ ///
+ /// This number is simply useful for passing to other methods, like
+ /// [`OsString::with_capacity`] to avoid reallocations.
+ ///
+ /// See the main `OsString` documentation information about encoding and capacity units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("");
+ /// assert_eq!(os_str.len(), 0);
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// assert_eq!(os_str.len(), 3);
+ /// ```
+ #[stable(feature = "osstring_simple_functions", since = "1.9.0")]
+ #[must_use]
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.inner.len()
+ }
+
+ /// Converts a <code>[Box]<[OsStr]></code> into an [`OsString`] without copying or allocating.
+ #[stable(feature = "into_boxed_os_str", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ pub fn into_os_string(self: Box<OsStr>) -> OsString {
+ let boxed = unsafe { Box::from_raw(Box::into_raw(self) as *mut Slice) };
+ OsString { inner: Buf::from_box(boxed) }
+ }
+
+ /// Gets the underlying byte representation.
+ ///
+ /// Note: it is *crucial* that this API is not externally public, to avoid
+ /// revealing the internal, platform-specific encodings.
+ #[inline]
+ pub(crate) fn bytes(&self) -> &[u8] {
+ unsafe { &*(&self.inner as *const _ as *const [u8]) }
+ }
+
+ /// Converts this string to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`OsStr::to_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::from("GRÜßE, JÜRGEN ❤");
+ ///
+ /// s.make_ascii_lowercase();
+ ///
+ /// assert_eq!("grÜße, jÜrgen ❤", s);
+ /// ```
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ self.inner.make_ascii_lowercase()
+ }
+
+ /// Converts this string to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`OsStr::to_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::from("Grüße, Jürgen ❤");
+ ///
+ /// s.make_ascii_uppercase();
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s);
+ /// ```
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ self.inner.make_ascii_uppercase()
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`OsStr::make_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ /// let s = OsString::from("Grüße, Jürgen ❤");
+ ///
+ /// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
+ /// ```
+ #[must_use = "to lowercase the value in-place, use `make_ascii_lowercase`"]
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ pub fn to_ascii_lowercase(&self) -> OsString {
+ OsString::from_inner(self.inner.to_ascii_lowercase())
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`OsStr::make_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ /// let s = OsString::from("Grüße, Jürgen ❤");
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
+ /// ```
+ #[must_use = "to uppercase the value in-place, use `make_ascii_uppercase`"]
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ pub fn to_ascii_uppercase(&self) -> OsString {
+ OsString::from_inner(self.inner.to_ascii_uppercase())
+ }
+
+ /// Checks if all characters in this string are within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let ascii = OsString::from("hello!\n");
+ /// let non_ascii = OsString::from("Grüße, Jürgen ❤");
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ #[must_use]
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ self.inner.is_ascii()
+ }
+
+ /// Checks that two strings are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// assert!(OsString::from("Ferris").eq_ignore_ascii_case("FERRIS"));
+ /// assert!(OsString::from("Ferrös").eq_ignore_ascii_case("FERRöS"));
+ /// assert!(!OsString::from("Ferrös").eq_ignore_ascii_case("FERRÖS"));
+ /// ```
+ #[stable(feature = "osstring_ascii", since = "1.53.0")]
+ pub fn eq_ignore_ascii_case<S: AsRef<OsStr>>(&self, other: S) -> bool {
+ self.inner.eq_ignore_ascii_case(&other.as_ref().inner)
+ }
+}
+
+#[stable(feature = "box_from_os_str", since = "1.17.0")]
+impl From<&OsStr> for Box<OsStr> {
+ /// Copies the string into a newly allocated <code>[Box]&lt;[OsStr]&gt;</code>.
+ #[inline]
+ fn from(s: &OsStr) -> Box<OsStr> {
+ let rw = Box::into_raw(s.inner.into_box()) as *mut OsStr;
+ unsafe { Box::from_raw(rw) }
+ }
+}
+
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, OsStr>> for Box<OsStr> {
+ /// Converts a `Cow<'a, OsStr>` into a <code>[Box]&lt;[OsStr]&gt;</code>,
+ /// by copying the contents if they are borrowed.
+ #[inline]
+ fn from(cow: Cow<'_, OsStr>) -> Box<OsStr> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "os_string_from_box", since = "1.18.0")]
+impl From<Box<OsStr>> for OsString {
+ /// Converts a <code>[Box]<[OsStr]></code> into an [`OsString`] without copying or
+ /// allocating.
+ #[inline]
+ fn from(boxed: Box<OsStr>) -> OsString {
+ boxed.into_os_string()
+ }
+}
+
+#[stable(feature = "box_from_os_string", since = "1.20.0")]
+impl From<OsString> for Box<OsStr> {
+ /// Converts an [`OsString`] into a <code>[Box]<[OsStr]></code> without copying or allocating.
+ #[inline]
+ fn from(s: OsString) -> Box<OsStr> {
+ s.into_boxed_os_str()
+ }
+}
+
+#[stable(feature = "more_box_slice_clone", since = "1.29.0")]
+impl Clone for Box<OsStr> {
+ #[inline]
+ fn clone(&self) -> Self {
+ self.to_os_string().into_boxed_os_str()
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<OsString> for Arc<OsStr> {
+ /// Converts an [`OsString`] into an <code>[Arc]<[OsStr]></code> by moving the [`OsString`]
+ /// data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: OsString) -> Arc<OsStr> {
+ let arc = s.inner.into_arc();
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const OsStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&OsStr> for Arc<OsStr> {
+ /// Copies the string into a newly allocated <code>[Arc]&lt;[OsStr]&gt;</code>.
+ #[inline]
+ fn from(s: &OsStr) -> Arc<OsStr> {
+ let arc = s.inner.into_arc();
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const OsStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<OsString> for Rc<OsStr> {
+ /// Converts an [`OsString`] into an <code>[Rc]<[OsStr]></code> by moving the [`OsString`]
+ /// data into a new [`Rc`] buffer.
+ #[inline]
+ fn from(s: OsString) -> Rc<OsStr> {
+ let rc = s.inner.into_rc();
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const OsStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&OsStr> for Rc<OsStr> {
+ /// Copies the string into a newly allocated <code>[Rc]&lt;[OsStr]&gt;</code>.
+ #[inline]
+ fn from(s: &OsStr) -> Rc<OsStr> {
+ let rc = s.inner.into_rc();
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const OsStr) }
+ }
+}
+
+#[stable(feature = "cow_from_osstr", since = "1.28.0")]
+impl<'a> From<OsString> for Cow<'a, OsStr> {
+ /// Moves the string into a [`Cow::Owned`].
+ #[inline]
+ fn from(s: OsString) -> Cow<'a, OsStr> {
+ Cow::Owned(s)
+ }
+}
+
+#[stable(feature = "cow_from_osstr", since = "1.28.0")]
+impl<'a> From<&'a OsStr> for Cow<'a, OsStr> {
+ /// Converts the string reference into a [`Cow::Borrowed`].
+ #[inline]
+ fn from(s: &'a OsStr) -> Cow<'a, OsStr> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[stable(feature = "cow_from_osstr", since = "1.28.0")]
+impl<'a> From<&'a OsString> for Cow<'a, OsStr> {
+ /// Converts the string reference into a [`Cow::Borrowed`].
+ #[inline]
+ fn from(s: &'a OsString) -> Cow<'a, OsStr> {
+ Cow::Borrowed(s.as_os_str())
+ }
+}
+
+#[stable(feature = "osstring_from_cow_osstr", since = "1.28.0")]
+impl<'a> From<Cow<'a, OsStr>> for OsString {
+ /// Converts a `Cow<'a, OsStr>` into an [`OsString`],
+ /// by copying the contents if they are borrowed.
+ #[inline]
+ fn from(s: Cow<'a, OsStr>) -> Self {
+ s.into_owned()
+ }
+}
+
+#[stable(feature = "box_default_extra", since = "1.17.0")]
+impl Default for Box<OsStr> {
+ #[inline]
+ fn default() -> Box<OsStr> {
+ let rw = Box::into_raw(Slice::empty_box()) as *mut OsStr;
+ unsafe { Box::from_raw(rw) }
+ }
+}
+
+#[stable(feature = "osstring_default", since = "1.9.0")]
+impl Default for &OsStr {
+ /// Creates an empty `OsStr`.
+ #[inline]
+ fn default() -> Self {
+ OsStr::new("")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for OsStr {
+ #[inline]
+ fn eq(&self, other: &OsStr) -> bool {
+ self.bytes().eq(other.bytes())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq<str> for OsStr {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ *self == *OsStr::new(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq<OsStr> for str {
+ #[inline]
+ fn eq(&self, other: &OsStr) -> bool {
+ *other == *OsStr::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for OsStr {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for OsStr {
+ #[inline]
+ fn partial_cmp(&self, other: &OsStr) -> Option<cmp::Ordering> {
+ self.bytes().partial_cmp(other.bytes())
+ }
+ #[inline]
+ fn lt(&self, other: &OsStr) -> bool {
+ self.bytes().lt(other.bytes())
+ }
+ #[inline]
+ fn le(&self, other: &OsStr) -> bool {
+ self.bytes().le(other.bytes())
+ }
+ #[inline]
+ fn gt(&self, other: &OsStr) -> bool {
+ self.bytes().gt(other.bytes())
+ }
+ #[inline]
+ fn ge(&self, other: &OsStr) -> bool {
+ self.bytes().ge(other.bytes())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd<str> for OsStr {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ self.partial_cmp(OsStr::new(other))
+ }
+}
+
+// FIXME (#19470): cannot provide PartialOrd<OsStr> for str until we
+// have more flexible coherence rules.
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for OsStr {
+ #[inline]
+ fn cmp(&self, other: &OsStr) -> cmp::Ordering {
+ self.bytes().cmp(other.bytes())
+ }
+}
+
+macro_rules! impl_cmp {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "cmp_os_str", since = "1.8.0")]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ <OsStr as PartialEq>::eq(self, other)
+ }
+ }
+
+ #[stable(feature = "cmp_os_str", since = "1.8.0")]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ <OsStr as PartialEq>::eq(self, other)
+ }
+ }
+
+ #[stable(feature = "cmp_os_str", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$rhs> for $lhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
+ <OsStr as PartialOrd>::partial_cmp(self, other)
+ }
+ }
+
+ #[stable(feature = "cmp_os_str", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$lhs> for $rhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
+ <OsStr as PartialOrd>::partial_cmp(self, other)
+ }
+ }
+ };
+}
+
+impl_cmp!(OsString, OsStr);
+impl_cmp!(OsString, &'a OsStr);
+impl_cmp!(Cow<'a, OsStr>, OsStr);
+impl_cmp!(Cow<'a, OsStr>, &'b OsStr);
+impl_cmp!(Cow<'a, OsStr>, OsString);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Hash for OsStr {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.bytes().hash(state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for OsStr {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, formatter)
+ }
+}
+
+impl OsStr {
+ pub(crate) fn display(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.inner, formatter)
+ }
+}
+
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<OsStr>> alloc::slice::Join<&OsStr> for [S] {
+ type Output = OsString;
+
+ fn join(slice: &Self, sep: &OsStr) -> OsString {
+ let Some((first, suffix)) = slice.split_first() else {
+ return OsString::new();
+ };
+ let first_owned = first.borrow().to_owned();
+ suffix.iter().fold(first_owned, |mut a, b| {
+ a.push(sep);
+ a.push(b.borrow());
+ a
+ })
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Borrow<OsStr> for OsString {
+ #[inline]
+ fn borrow(&self) -> &OsStr {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToOwned for OsStr {
+ type Owned = OsString;
+ #[inline]
+ fn to_owned(&self) -> OsString {
+ self.to_os_string()
+ }
+ #[inline]
+ fn clone_into(&self, target: &mut OsString) {
+ self.inner.clone_into(&mut target.inner)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for OsStr {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for OsString {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for str {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ OsStr::from_inner(Slice::from_str(self))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for String {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ (&**self).as_ref()
+ }
+}
+
+impl FromInner<Buf> for OsString {
+ #[inline]
+ fn from_inner(buf: Buf) -> OsString {
+ OsString { inner: buf }
+ }
+}
+
+impl IntoInner<Buf> for OsString {
+ #[inline]
+ fn into_inner(self) -> Buf {
+ self.inner
+ }
+}
+
+impl AsInner<Slice> for OsStr {
+ #[inline]
+ fn as_inner(&self) -> &Slice {
+ &self.inner
+ }
+}
+
+#[stable(feature = "osstring_from_str", since = "1.45.0")]
+impl FromStr for OsString {
+ type Err = core::convert::Infallible;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ Ok(OsString::from(s))
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl Extend<OsString> for OsString {
+ #[inline]
+ fn extend<T: IntoIterator<Item = OsString>>(&mut self, iter: T) {
+ for s in iter {
+ self.push(&s);
+ }
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl<'a> Extend<&'a OsStr> for OsString {
+ #[inline]
+ fn extend<T: IntoIterator<Item = &'a OsStr>>(&mut self, iter: T) {
+ for s in iter {
+ self.push(s);
+ }
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl<'a> Extend<Cow<'a, OsStr>> for OsString {
+ #[inline]
+ fn extend<T: IntoIterator<Item = Cow<'a, OsStr>>>(&mut self, iter: T) {
+ for s in iter {
+ self.push(&s);
+ }
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl FromIterator<OsString> for OsString {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = OsString>>(iter: I) -> Self {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over `OsString`s, we can avoid at least
+ // one allocation by getting the first string from the iterator
+ // and appending to it all the subsequent strings.
+ match iterator.next() {
+ None => OsString::new(),
+ Some(mut buf) => {
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl<'a> FromIterator<&'a OsStr> for OsString {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = &'a OsStr>>(iter: I) -> Self {
+ let mut buf = Self::new();
+ for s in iter {
+ buf.push(s);
+ }
+ buf
+ }
+}
+
+#[stable(feature = "osstring_extend", since = "1.52.0")]
+impl<'a> FromIterator<Cow<'a, OsStr>> for OsString {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = Cow<'a, OsStr>>>(iter: I) -> Self {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over `OsString`s, we can avoid at least
+ // one allocation by getting the first owned string from the iterator
+ // and appending to it all the subsequent strings.
+ match iterator.next() {
+ None => OsString::new(),
+ Some(Cow::Owned(mut buf)) => {
+ buf.extend(iterator);
+ buf
+ }
+ Some(Cow::Borrowed(buf)) => {
+ let mut buf = OsString::from(buf);
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
diff --git a/library/std/src/ffi/os_str/tests.rs b/library/std/src/ffi/os_str/tests.rs
new file mode 100644
index 000000000..d7926749a
--- /dev/null
+++ b/library/std/src/ffi/os_str/tests.rs
@@ -0,0 +1,179 @@
+use super::*;
+use crate::sys_common::{AsInner, IntoInner};
+
+use crate::rc::Rc;
+use crate::sync::Arc;
+
+#[test]
+fn test_os_string_with_capacity() {
+ let os_string = OsString::with_capacity(0);
+ assert_eq!(0, os_string.inner.into_inner().capacity());
+
+ let os_string = OsString::with_capacity(10);
+ assert_eq!(10, os_string.inner.into_inner().capacity());
+
+ let mut os_string = OsString::with_capacity(0);
+ os_string.push("abc");
+ assert!(os_string.inner.into_inner().capacity() >= 3);
+}
+
+#[test]
+fn test_os_string_clear() {
+ let mut os_string = OsString::from("abc");
+ assert_eq!(3, os_string.inner.as_inner().len());
+
+ os_string.clear();
+ assert_eq!(&os_string, "");
+ assert_eq!(0, os_string.inner.as_inner().len());
+}
+
+#[test]
+fn test_os_string_capacity() {
+ let os_string = OsString::with_capacity(0);
+ assert_eq!(0, os_string.capacity());
+
+ let os_string = OsString::with_capacity(10);
+ assert_eq!(10, os_string.capacity());
+
+ let mut os_string = OsString::with_capacity(0);
+ os_string.push("abc");
+ assert!(os_string.capacity() >= 3);
+}
+
+#[test]
+fn test_os_string_reserve() {
+ let mut os_string = OsString::new();
+ assert_eq!(os_string.capacity(), 0);
+
+ os_string.reserve(2);
+ assert!(os_string.capacity() >= 2);
+
+ for _ in 0..16 {
+ os_string.push("a");
+ }
+
+ assert!(os_string.capacity() >= 16);
+ os_string.reserve(16);
+ assert!(os_string.capacity() >= 32);
+
+ os_string.push("a");
+
+ os_string.reserve(16);
+ assert!(os_string.capacity() >= 33)
+}
+
+#[test]
+fn test_os_string_reserve_exact() {
+ let mut os_string = OsString::new();
+ assert_eq!(os_string.capacity(), 0);
+
+ os_string.reserve_exact(2);
+ assert!(os_string.capacity() >= 2);
+
+ for _ in 0..16 {
+ os_string.push("a");
+ }
+
+ assert!(os_string.capacity() >= 16);
+ os_string.reserve_exact(16);
+ assert!(os_string.capacity() >= 32);
+
+ os_string.push("a");
+
+ os_string.reserve_exact(16);
+ assert!(os_string.capacity() >= 33)
+}
+
+#[test]
+fn test_os_string_join() {
+ let strings = [OsStr::new("hello"), OsStr::new("dear"), OsStr::new("world")];
+ assert_eq!("hello", strings[..1].join(OsStr::new(" ")));
+ assert_eq!("hello dear world", strings.join(OsStr::new(" ")));
+ assert_eq!("hellodearworld", strings.join(OsStr::new("")));
+ assert_eq!("hello.\n dear.\n world", strings.join(OsStr::new(".\n ")));
+
+ assert_eq!("dear world", strings[1..].join(&OsString::from(" ")));
+
+ let strings_abc = [OsString::from("a"), OsString::from("b"), OsString::from("c")];
+ assert_eq!("a b c", strings_abc.join(OsStr::new(" ")));
+}
+
+#[test]
+fn test_os_string_default() {
+ let os_string: OsString = Default::default();
+ assert_eq!("", &os_string);
+}
+
+#[test]
+fn test_os_str_is_empty() {
+ let mut os_string = OsString::new();
+ assert!(os_string.is_empty());
+
+ os_string.push("abc");
+ assert!(!os_string.is_empty());
+
+ os_string.clear();
+ assert!(os_string.is_empty());
+}
+
+#[test]
+fn test_os_str_len() {
+ let mut os_string = OsString::new();
+ assert_eq!(0, os_string.len());
+
+ os_string.push("abc");
+ assert_eq!(3, os_string.len());
+
+ os_string.clear();
+ assert_eq!(0, os_string.len());
+}
+
+#[test]
+fn test_os_str_default() {
+ let os_str: &OsStr = Default::default();
+ assert_eq!("", os_str);
+}
+
+#[test]
+fn into_boxed() {
+ let orig = "Hello, world!";
+ let os_str = OsStr::new(orig);
+ let boxed: Box<OsStr> = Box::from(os_str);
+ let os_string = os_str.to_owned().into_boxed_os_str().into_os_string();
+ assert_eq!(os_str, &*boxed);
+ assert_eq!(&*boxed, &*os_string);
+ assert_eq!(&*os_string, os_str);
+}
+
+#[test]
+fn boxed_default() {
+ let boxed = <Box<OsStr>>::default();
+ assert!(boxed.is_empty());
+}
+
+#[test]
+fn test_os_str_clone_into() {
+ let mut os_string = OsString::with_capacity(123);
+ os_string.push("hello");
+ let os_str = OsStr::new("bonjour");
+ os_str.clone_into(&mut os_string);
+ assert_eq!(os_str, os_string);
+ assert!(os_string.capacity() >= 123);
+}
+
+#[test]
+fn into_rc() {
+ let orig = "Hello, world!";
+ let os_str = OsStr::new(orig);
+ let rc: Rc<OsStr> = Rc::from(os_str);
+ let arc: Arc<OsStr> = Arc::from(os_str);
+
+ assert_eq!(&*rc, os_str);
+ assert_eq!(&*arc, os_str);
+
+ let rc2: Rc<OsStr> = Rc::from(os_str.to_owned());
+ let arc2: Arc<OsStr> = Arc::from(os_str.to_owned());
+
+ assert_eq!(&*rc2, os_str);
+ assert_eq!(&*arc2, os_str);
+}
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
new file mode 100644
index 000000000..c8e131b6e
--- /dev/null
+++ b/library/std/src/fs.rs
@@ -0,0 +1,2428 @@
+//! Filesystem manipulation operations.
+//!
+//! This module contains basic methods to manipulate the contents of the local
+//! filesystem. All methods in this module represent cross-platform filesystem
+//! operations. Extra platform-specific functionality can be found in the
+//! extension traits of `std::os::$platform`.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+mod tests;
+
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, Write};
+use crate::path::{Path, PathBuf};
+use crate::sys::fs as fs_imp;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+use crate::time::SystemTime;
+
+/// A reference to an open file on the filesystem.
+///
+/// An instance of a `File` can be read and/or written depending on what options
+/// it was opened with. Files also implement [`Seek`] to alter the logical cursor
+/// that the file contains internally.
+///
+/// Files are automatically closed when they go out of scope. Errors detected
+/// on closing are ignored by the implementation of `Drop`. Use the method
+/// [`sync_all`] if these errors must be manually handled.
+///
+/// # Examples
+///
+/// Creates a new file and write bytes to it (you can also use [`write()`]):
+///
+/// ```no_run
+/// use std::fs::File;
+/// use std::io::prelude::*;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut file = File::create("foo.txt")?;
+/// file.write_all(b"Hello, world!")?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Read the contents of a file into a [`String`] (you can also use [`read`]):
+///
+/// ```no_run
+/// use std::fs::File;
+/// use std::io::prelude::*;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut file = File::open("foo.txt")?;
+/// let mut contents = String::new();
+/// file.read_to_string(&mut contents)?;
+/// assert_eq!(contents, "Hello, world!");
+/// Ok(())
+/// }
+/// ```
+///
+/// It can be more efficient to read the contents of a file with a buffered
+/// [`Read`]er. This can be accomplished with [`BufReader<R>`]:
+///
+/// ```no_run
+/// use std::fs::File;
+/// use std::io::BufReader;
+/// use std::io::prelude::*;
+///
+/// fn main() -> std::io::Result<()> {
+/// let file = File::open("foo.txt")?;
+/// let mut buf_reader = BufReader::new(file);
+/// let mut contents = String::new();
+/// buf_reader.read_to_string(&mut contents)?;
+/// assert_eq!(contents, "Hello, world!");
+/// Ok(())
+/// }
+/// ```
+///
+/// Note that, although read and write methods require a `&mut File`, because
+/// of the interfaces for [`Read`] and [`Write`], the holder of a `&File` can
+/// still modify the file, either through methods that take `&File` or by
+/// retrieving the underlying OS object and modifying the file that way.
+/// Additionally, many operating systems allow concurrent modification of files
+/// by different processes. Avoid assuming that holding a `&File` means that the
+/// file will not change.
+///
+/// # Platform-specific behavior
+///
+/// On Windows, the implementation of [`Read`] and [`Write`] traits for `File`
+/// perform synchronous I/O operations. Therefore the underlying file must not
+/// have been opened for asynchronous I/O (e.g. by using `FILE_FLAG_OVERLAPPED`).
+///
+/// [`BufReader<R>`]: io::BufReader
+/// [`sync_all`]: File::sync_all
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "File")]
+pub struct File {
+ inner: fs_imp::File,
+}
+
+/// Metadata information about a file.
+///
+/// This structure is returned from the [`metadata`] or
+/// [`symlink_metadata`] function or method and represents known
+/// metadata about a file such as its permissions, size, modification
+/// times, etc.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Metadata(fs_imp::FileAttr);
+
+/// Iterator over the entries in a directory.
+///
+/// This iterator is returned from the [`read_dir`] function of this module and
+/// will yield instances of <code>[io::Result]<[DirEntry]></code>. Through a [`DirEntry`]
+/// information like the entry's path and possibly other metadata can be
+/// learned.
+///
+/// The order in which this iterator returns entries is platform and filesystem
+/// dependent.
+///
+/// # Errors
+///
+/// This [`io::Result`] will be an [`Err`] if there's some sort of intermittent
+/// IO error during iteration.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct ReadDir(fs_imp::ReadDir);
+
+/// Entries returned by the [`ReadDir`] iterator.
+///
+/// An instance of `DirEntry` represents an entry inside of a directory on the
+/// filesystem. Each entry can be inspected via methods to learn about the full
+/// path or possibly other metadata through per-platform extension traits.
+///
+/// # Platform-specific behavior
+///
+/// On Unix, the `DirEntry` struct contains an internal reference to the open
+/// directory. Holding `DirEntry` objects will consume a file handle even
+/// after the `ReadDir` iterator is dropped.
+///
+/// Note that this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct DirEntry(fs_imp::DirEntry);
+
+/// Options and flags which can be used to configure how a file is opened.
+///
+/// This builder exposes the ability to configure how a [`File`] is opened and
+/// what operations are permitted on the open file. The [`File::open`] and
+/// [`File::create`] methods are aliases for commonly used options using this
+/// builder.
+///
+/// Generally speaking, when using `OpenOptions`, you'll first call
+/// [`OpenOptions::new`], then chain calls to methods to set each option, then
+/// call [`OpenOptions::open`], passing the path of the file you're trying to
+/// open. This will give you a [`io::Result`] with a [`File`] inside that you
+/// can further operate on.
+///
+/// # Examples
+///
+/// Opening a file to read:
+///
+/// ```no_run
+/// use std::fs::OpenOptions;
+///
+/// let file = OpenOptions::new().read(true).open("foo.txt");
+/// ```
+///
+/// Opening a file for both reading and writing, as well as creating it if it
+/// doesn't exist:
+///
+/// ```no_run
+/// use std::fs::OpenOptions;
+///
+/// let file = OpenOptions::new()
+/// .read(true)
+/// .write(true)
+/// .create(true)
+/// .open("foo.txt");
+/// ```
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OpenOptions(fs_imp::OpenOptions);
+
+/// Representation of the various timestamps on a file.
+#[derive(Copy, Clone, Debug, Default)]
+#[unstable(feature = "file_set_times", issue = "98245")]
+pub struct FileTimes(fs_imp::FileTimes);
+
+/// Representation of the various permissions on a file.
+///
+/// This module only currently provides one bit of information,
+/// [`Permissions::readonly`], which is exposed on all currently supported
+/// platforms. Unix-specific functionality, such as mode bits, is available
+/// through the [`PermissionsExt`] trait.
+///
+/// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Permissions(fs_imp::FilePermissions);
+
+/// A structure representing a type of file with accessors for each file type.
+/// It is returned by [`Metadata::file_type`] method.
+#[stable(feature = "file_type", since = "1.1.0")]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "FileType")]
+pub struct FileType(fs_imp::FileType);
+
+/// A builder used to create directories in various manners.
+///
+/// This builder also supports platform-specific options.
+#[stable(feature = "dir_builder", since = "1.6.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "DirBuilder")]
+#[derive(Debug)]
+pub struct DirBuilder {
+ inner: fs_imp::DirBuilder,
+ recursive: bool,
+}
+
+/// Read the entire contents of a file into a bytes vector.
+///
+/// This is a convenience function for using [`File::open`] and [`read_to_end`]
+/// with fewer imports and without an intermediate variable.
+///
+/// [`read_to_end`]: Read::read_to_end
+///
+/// # Errors
+///
+/// This function will return an error if `path` does not already exist.
+/// Other errors may also be returned according to [`OpenOptions::open`].
+///
+/// It will also return an error if it encounters while reading an error
+/// of a kind other than [`io::ErrorKind::Interrupted`].
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+/// use std::net::SocketAddr;
+///
+/// fn main() -> Result<(), Box<dyn std::error::Error + 'static>> {
+/// let foo: SocketAddr = String::from_utf8_lossy(&fs::read("address.txt")?).parse()?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "fs_read_write_bytes", since = "1.26.0")]
+pub fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
+ fn inner(path: &Path) -> io::Result<Vec<u8>> {
+ let mut file = File::open(path)?;
+ let mut bytes = Vec::new();
+ file.read_to_end(&mut bytes)?;
+ Ok(bytes)
+ }
+ inner(path.as_ref())
+}
+
+/// Read the entire contents of a file into a string.
+///
+/// This is a convenience function for using [`File::open`] and [`read_to_string`]
+/// with fewer imports and without an intermediate variable.
+///
+/// [`read_to_string`]: Read::read_to_string
+///
+/// # Errors
+///
+/// This function will return an error if `path` does not already exist.
+/// Other errors may also be returned according to [`OpenOptions::open`].
+///
+/// It will also return an error if it encounters while reading an error
+/// of a kind other than [`io::ErrorKind::Interrupted`],
+/// or if the contents of the file are not valid UTF-8.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+/// use std::net::SocketAddr;
+/// use std::error::Error;
+///
+/// fn main() -> Result<(), Box<dyn Error>> {
+/// let foo: SocketAddr = fs::read_to_string("address.txt")?.parse()?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "fs_read_write", since = "1.26.0")]
+pub fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> {
+ fn inner(path: &Path) -> io::Result<String> {
+ let mut file = File::open(path)?;
+ let mut string = String::new();
+ file.read_to_string(&mut string)?;
+ Ok(string)
+ }
+ inner(path.as_ref())
+}
+
+/// Write a slice as the entire contents of a file.
+///
+/// This function will create a file if it does not exist,
+/// and will entirely replace its contents if it does.
+///
+/// Depending on the platform, this function may fail if the
+/// full directory path does not exist.
+///
+/// This is a convenience function for using [`File::create`] and [`write_all`]
+/// with fewer imports.
+///
+/// [`write_all`]: Write::write_all
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::write("foo.txt", b"Lorem ipsum")?;
+/// fs::write("bar.txt", "dolor sit")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "fs_read_write_bytes", since = "1.26.0")]
+pub fn write<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, contents: C) -> io::Result<()> {
+ fn inner(path: &Path, contents: &[u8]) -> io::Result<()> {
+ File::create(path)?.write_all(contents)
+ }
+ inner(path.as_ref(), contents.as_ref())
+}
+
+impl File {
+ /// Attempts to open a file in read-only mode.
+ ///
+ /// See the [`OpenOptions::open`] method for more details.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if `path` does not already exist.
+ /// Other errors may also be returned according to [`OpenOptions::open`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn open<P: AsRef<Path>>(path: P) -> io::Result<File> {
+ OpenOptions::new().read(true).open(path.as_ref())
+ }
+
+ /// Opens a file in write-only mode.
+ ///
+ /// This function will create a file if it does not exist,
+ /// and will truncate it if it does.
+ ///
+ /// Depending on the platform, this function may fail if the
+ /// full directory path does not exist.
+ ///
+ /// See the [`OpenOptions::open`] function for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create("foo.txt")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn create<P: AsRef<Path>>(path: P) -> io::Result<File> {
+ OpenOptions::new().write(true).create(true).truncate(true).open(path.as_ref())
+ }
+
+ /// Returns a new OpenOptions object.
+ ///
+ /// This function returns a new OpenOptions object that you can use to
+ /// open or create a file with specific options if `open()` or `create()`
+ /// are not appropriate.
+ ///
+ /// It is equivalent to `OpenOptions::new()`, but allows you to write more
+ /// readable code. Instead of
+ /// `OpenOptions::new().append(true).open("example.log")`,
+ /// you can write `File::options().append(true).open("example.log")`. This
+ /// also avoids the need to import `OpenOptions`.
+ ///
+ /// See the [`OpenOptions::new`] function for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::options().append(true).open("example.log")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "with_options", since = "1.58.0")]
+ pub fn options() -> OpenOptions {
+ OpenOptions::new()
+ }
+
+ /// Attempts to sync all OS-internal metadata to disk.
+ ///
+ /// This function will attempt to ensure that all in-memory data reaches the
+ /// filesystem before returning.
+ ///
+ /// This can be used to handle errors that would otherwise only be caught
+ /// when the `File` is closed. Dropping a file will ignore errors in
+ /// synchronizing this in-memory data.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::prelude::*;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create("foo.txt")?;
+ /// f.write_all(b"Hello, world!")?;
+ ///
+ /// f.sync_all()?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn sync_all(&self) -> io::Result<()> {
+ self.inner.fsync()
+ }
+
+ /// This function is similar to [`sync_all`], except that it might not
+ /// synchronize file metadata to the filesystem.
+ ///
+ /// This is intended for use cases that must synchronize content, but don't
+ /// need the metadata on disk. The goal of this method is to reduce disk
+ /// operations.
+ ///
+ /// Note that some platforms may simply implement this in terms of
+ /// [`sync_all`].
+ ///
+ /// [`sync_all`]: File::sync_all
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::prelude::*;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create("foo.txt")?;
+ /// f.write_all(b"Hello, world!")?;
+ ///
+ /// f.sync_data()?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn sync_data(&self) -> io::Result<()> {
+ self.inner.datasync()
+ }
+
+ /// Truncates or extends the underlying file, updating the size of
+ /// this file to become `size`.
+ ///
+ /// If the `size` is less than the current file's size, then the file will
+ /// be shrunk. If it is greater than the current file's size, then the file
+ /// will be extended to `size` and have all of the intermediate data filled
+ /// in with 0s.
+ ///
+ /// The file's cursor isn't changed. In particular, if the cursor was at the
+ /// end and the file is shrunk using this operation, the cursor will now be
+ /// past the end.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the file is not opened for writing.
+ /// Also, std::io::ErrorKind::InvalidInput will be returned if the desired
+ /// length would cause an overflow due to the implementation specifics.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create("foo.txt")?;
+ /// f.set_len(10)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// Note that this method alters the content of the underlying file, even
+ /// though it takes `&self` rather than `&mut self`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set_len(&self, size: u64) -> io::Result<()> {
+ self.inner.truncate(size)
+ }
+
+ /// Queries metadata about the underlying file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let metadata = f.metadata()?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn metadata(&self) -> io::Result<Metadata> {
+ self.inner.file_attr().map(Metadata)
+ }
+
+ /// Creates a new `File` instance that shares the same underlying file handle
+ /// as the existing `File` instance. Reads, writes, and seeks will affect
+ /// both `File` instances simultaneously.
+ ///
+ /// # Examples
+ ///
+ /// Creates two handles for a file named `foo.txt`:
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ /// let file_copy = file.try_clone()?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// Assuming there’s a file named `foo.txt` with contents `abcdef\n`, create
+ /// two handles, seek one of them, and read the remaining bytes from the
+ /// other handle:
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::SeekFrom;
+ /// use std::io::prelude::*;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ /// let mut file_copy = file.try_clone()?;
+ ///
+ /// file.seek(SeekFrom::Start(3))?;
+ ///
+ /// let mut contents = vec![];
+ /// file_copy.read_to_end(&mut contents)?;
+ /// assert_eq!(contents, b"def\n");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_try_clone", since = "1.9.0")]
+ pub fn try_clone(&self) -> io::Result<File> {
+ Ok(File { inner: self.inner.duplicate()? })
+ }
+
+ /// Changes the permissions on the underlying file.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// This function currently corresponds to the `fchmod` function on Unix and
+ /// the `SetFileInformationByHandle` function on Windows. Note that, this
+ /// [may change in the future][changes].
+ ///
+ /// [changes]: io#platform-specific-behavior
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the user lacks permission change
+ /// attributes on the underlying file. It may also return an error in other
+ /// os-specific unspecified cases.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs::File;
+ ///
+ /// let file = File::open("foo.txt")?;
+ /// let mut perms = file.metadata()?.permissions();
+ /// perms.set_readonly(true);
+ /// file.set_permissions(perms)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// Note that this method alters the permissions of the underlying file,
+ /// even though it takes `&self` rather than `&mut self`.
+ #[stable(feature = "set_permissions_atomic", since = "1.16.0")]
+ pub fn set_permissions(&self, perm: Permissions) -> io::Result<()> {
+ self.inner.set_permissions(perm.0)
+ }
+
+ /// Changes the timestamps of the underlying file.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// This function currently corresponds to the `futimens` function on Unix (falling back to
+ /// `futimes` on macOS before 10.13) and the `SetFileTime` function on Windows. Note that this
+ /// [may change in the future][changes].
+ ///
+ /// [changes]: io#platform-specific-behavior
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the user lacks permission to change timestamps on the
+ /// underlying file. It may also return an error in other os-specific unspecified cases.
+ ///
+ /// This function may return an error if the operating system lacks support to change one or
+ /// more of the timestamps set in the `FileTimes` structure.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(file_set_times)]
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs::{self, File, FileTimes};
+ ///
+ /// let src = fs::metadata("src")?;
+ /// let dest = File::options().write(true).open("dest")?;
+ /// let times = FileTimes::new()
+ /// .set_accessed(src.accessed()?)
+ /// .set_modified(src.modified()?);
+ /// dest.set_times(times)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "file_set_times", issue = "98245")]
+ #[doc(alias = "futimens")]
+ #[doc(alias = "futimes")]
+ #[doc(alias = "SetFileTime")]
+ pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ self.inner.set_times(times.0)
+ }
+
+ /// Changes the modification time of the underlying file.
+ ///
+ /// This is an alias for `set_times(FileTimes::new().set_modified(time))`.
+ #[unstable(feature = "file_set_times", issue = "98245")]
+ #[inline]
+ pub fn set_modified(&self, time: SystemTime) -> io::Result<()> {
+ self.set_times(FileTimes::new().set_modified(time))
+ }
+}
+
+// In addition to the `impl`s here, `File` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and
+// `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows.
+
+impl AsInner<fs_imp::File> for File {
+ fn as_inner(&self) -> &fs_imp::File {
+ &self.inner
+ }
+}
+impl FromInner<fs_imp::File> for File {
+ fn from_inner(f: fs_imp::File) -> File {
+ File { inner: f }
+ }
+}
+impl IntoInner<fs_imp::File> for File {
+ fn into_inner(self) -> fs_imp::File {
+ self.inner
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for File {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+/// Indicates how much extra capacity is needed to read the rest of the file.
+fn buffer_capacity_required(mut file: &File) -> usize {
+ let size = file.metadata().map(|m| m.len()).unwrap_or(0);
+ let pos = file.stream_position().unwrap_or(0);
+ // Don't worry about `usize` overflow because reading will fail regardless
+ // in that case.
+ size.saturating_sub(pos) as usize
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for File {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ // Reserves space in the buffer based on the file size when available.
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ buf.reserve(buffer_capacity_required(self));
+ io::default_read_to_end(self, buf)
+ }
+
+ // Reserves space in the buffer based on the file size when available.
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ buf.reserve(buffer_capacity_required(self));
+ io::default_read_to_string(self, buf)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for File {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Seek for File {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ self.inner.seek(pos)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for &File {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ // Reserves space in the buffer based on the file size when available.
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ buf.reserve(buffer_capacity_required(self));
+ io::default_read_to_end(self, buf)
+ }
+
+ // Reserves space in the buffer based on the file size when available.
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ buf.reserve(buffer_capacity_required(self));
+ io::default_read_to_string(self, buf)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for &File {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Seek for &File {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ self.inner.seek(pos)
+ }
+}
+
+impl OpenOptions {
+ /// Creates a blank new set of options ready for configuration.
+ ///
+ /// All options are initially set to `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let mut options = OpenOptions::new();
+ /// let file = options.read(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> Self {
+ OpenOptions(fs_imp::OpenOptions::new())
+ }
+
+ /// Sets the option for read access.
+ ///
+ /// This option, when true, will indicate that the file should be
+ /// `read`-able if opened.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().read(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn read(&mut self, read: bool) -> &mut Self {
+ self.0.read(read);
+ self
+ }
+
+ /// Sets the option for write access.
+ ///
+ /// This option, when true, will indicate that the file should be
+ /// `write`-able if opened.
+ ///
+ /// If the file already exists, any write calls on it will overwrite its
+ /// contents, without truncating it.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().write(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write(&mut self, write: bool) -> &mut Self {
+ self.0.write(write);
+ self
+ }
+
+ /// Sets the option for the append mode.
+ ///
+ /// This option, when true, means that writes will append to a file instead
+ /// of overwriting previous contents.
+ /// Note that setting `.write(true).append(true)` has the same effect as
+ /// setting only `.append(true)`.
+ ///
+ /// For most filesystems, the operating system guarantees that all writes are
+ /// atomic: no writes get mangled because another process writes at the same
+ /// time.
+ ///
+ /// One maybe obvious note when using append-mode: make sure that all data
+ /// that belongs together is written to the file in one operation. This
+ /// can be done by concatenating strings before passing them to [`write()`],
+ /// or using a buffered writer (with a buffer of adequate size),
+ /// and calling [`flush()`] when the message is complete.
+ ///
+ /// If a file is opened with both read and append access, beware that after
+ /// opening, and after every write, the position for reading may be set at the
+ /// end of the file. So, before writing, save the current position (using
+ /// <code>[seek]\([SeekFrom]::[Current]\(0))</code>), and restore it before the next read.
+ ///
+ /// ## Note
+ ///
+ /// This function doesn't create the file if it doesn't exist. Use the
+ /// [`OpenOptions::create`] method to do so.
+ ///
+ /// [`write()`]: Write::write "io::Write::write"
+ /// [`flush()`]: Write::flush "io::Write::flush"
+ /// [seek]: Seek::seek "io::Seek::seek"
+ /// [Current]: SeekFrom::Current "io::SeekFrom::Current"
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().append(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn append(&mut self, append: bool) -> &mut Self {
+ self.0.append(append);
+ self
+ }
+
+ /// Sets the option for truncating a previous file.
+ ///
+ /// If a file is successfully opened with this option set it will truncate
+ /// the file to 0 length if it already exists.
+ ///
+ /// The file must be opened with write access for truncate to work.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().write(true).truncate(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, truncate: bool) -> &mut Self {
+ self.0.truncate(truncate);
+ self
+ }
+
+ /// Sets the option to create a new file, or open it if it already exists.
+ ///
+ /// In order for the file to be created, [`OpenOptions::write`] or
+ /// [`OpenOptions::append`] access must be used.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().write(true).create(true).open("foo.txt");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn create(&mut self, create: bool) -> &mut Self {
+ self.0.create(create);
+ self
+ }
+
+ /// Sets the option to create a new file, failing if it already exists.
+ ///
+ /// No file is allowed to exist at the target location, also no (dangling) symlink. In this
+ /// way, if the call succeeds, the file returned is guaranteed to be new.
+ ///
+ /// This option is useful because it is atomic. Otherwise between checking
+ /// whether a file exists and creating a new one, the file may have been
+ /// created by another process (a TOCTOU race condition / attack).
+ ///
+ /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are
+ /// ignored.
+ ///
+ /// The file must be opened with write or append access in order to create
+ /// a new file.
+ ///
+ /// [`.create()`]: OpenOptions::create
+ /// [`.truncate()`]: OpenOptions::truncate
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().write(true)
+ /// .create_new(true)
+ /// .open("foo.txt");
+ /// ```
+ #[stable(feature = "expand_open_options2", since = "1.9.0")]
+ pub fn create_new(&mut self, create_new: bool) -> &mut Self {
+ self.0.create_new(create_new);
+ self
+ }
+
+ /// Opens a file at `path` with the options specified by `self`.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error under a number of different
+ /// circumstances. Some of these error conditions are listed here, together
+ /// with their [`io::ErrorKind`]. The mapping to [`io::ErrorKind`]s is not
+ /// part of the compatibility contract of the function.
+ ///
+ /// * [`NotFound`]: The specified file does not exist and neither `create`
+ /// or `create_new` is set.
+ /// * [`NotFound`]: One of the directory components of the file path does
+ /// not exist.
+ /// * [`PermissionDenied`]: The user lacks permission to get the specified
+ /// access rights for the file.
+ /// * [`PermissionDenied`]: The user lacks permission to open one of the
+ /// directory components of the specified path.
+ /// * [`AlreadyExists`]: `create_new` was specified and the file already
+ /// exists.
+ /// * [`InvalidInput`]: Invalid combinations of open options (truncate
+ /// without write access, no access mode set, etc.).
+ ///
+ /// The following errors don't match any existing [`io::ErrorKind`] at the moment:
+ /// * One of the directory components of the specified file path
+ /// was not, in fact, a directory.
+ /// * Filesystem-level errors: full disk, write permission
+ /// requested on a read-only file system, exceeded disk quota, too many
+ /// open files, too long filename, too many symbolic links in the
+ /// specified path (Unix-like systems only), etc.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ ///
+ /// let file = OpenOptions::new().read(true).open("foo.txt");
+ /// ```
+ ///
+ /// [`AlreadyExists`]: io::ErrorKind::AlreadyExists
+ /// [`InvalidInput`]: io::ErrorKind::InvalidInput
+ /// [`NotFound`]: io::ErrorKind::NotFound
+ /// [`PermissionDenied`]: io::ErrorKind::PermissionDenied
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn open<P: AsRef<Path>>(&self, path: P) -> io::Result<File> {
+ self._open(path.as_ref())
+ }
+
+ fn _open(&self, path: &Path) -> io::Result<File> {
+ fs_imp::File::open(path, &self.0).map(|inner| File { inner })
+ }
+}
+
+impl AsInner<fs_imp::OpenOptions> for OpenOptions {
+ fn as_inner(&self) -> &fs_imp::OpenOptions {
+ &self.0
+ }
+}
+
+impl AsInnerMut<fs_imp::OpenOptions> for OpenOptions {
+ fn as_inner_mut(&mut self) -> &mut fs_imp::OpenOptions {
+ &mut self.0
+ }
+}
+
+impl Metadata {
+ /// Returns the file type for this metadata.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// println!("{:?}", metadata.file_type());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "file_type", since = "1.1.0")]
+ pub fn file_type(&self) -> FileType {
+ FileType(self.0.file_type())
+ }
+
+ /// Returns `true` if this metadata is for a directory. The
+ /// result is mutually exclusive to the result of
+ /// [`Metadata::is_file`], and will be false for symlink metadata
+ /// obtained from [`symlink_metadata`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// assert!(!metadata.is_dir());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_dir(&self) -> bool {
+ self.file_type().is_dir()
+ }
+
+ /// Returns `true` if this metadata is for a regular file. The
+ /// result is mutually exclusive to the result of
+ /// [`Metadata::is_dir`], and will be false for symlink metadata
+ /// obtained from [`symlink_metadata`].
+ ///
+ /// When the goal is simply to read from (or write to) the source, the most
+ /// reliable way to test the source can be read (or written to) is to open
+ /// it. Only using `is_file` can break workflows like `diff <( prog_a )` on
+ /// a Unix-like system for example. See [`File::open`] or
+ /// [`OpenOptions::open`] for more information.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// assert!(metadata.is_file());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_file(&self) -> bool {
+ self.file_type().is_file()
+ }
+
+ /// Returns `true` if this metadata is for a symbolic link.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(unix, doc = "```no_run")]
+ #[cfg_attr(not(unix), doc = "```ignore")]
+ /// use std::fs;
+ /// use std::path::Path;
+ /// use std::os::unix::fs::symlink;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let link_path = Path::new("link");
+ /// symlink("/origin_does_not_exist/", link_path)?;
+ ///
+ /// let metadata = fs::symlink_metadata(link_path)?;
+ ///
+ /// assert!(metadata.is_symlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "is_symlink", since = "1.58.0")]
+ pub fn is_symlink(&self) -> bool {
+ self.file_type().is_symlink()
+ }
+
+ /// Returns the size of the file, in bytes, this metadata is for.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// assert_eq!(0, metadata.len());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> u64 {
+ self.0.size()
+ }
+
+ /// Returns the permissions of the file this metadata is for.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// assert!(!metadata.permissions().readonly());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn permissions(&self) -> Permissions {
+ Permissions(self.0.perm())
+ }
+
+ /// Returns the last modification time listed in this metadata.
+ ///
+ /// The returned value corresponds to the `mtime` field of `stat` on Unix
+ /// platforms and the `ftLastWriteTime` field on Windows platforms.
+ ///
+ /// # Errors
+ ///
+ /// This field might not be available on all platforms, and will return an
+ /// `Err` on platforms where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// if let Ok(time) = metadata.modified() {
+ /// println!("{time:?}");
+ /// } else {
+ /// println!("Not supported on this platform");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "fs_time", since = "1.10.0")]
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ self.0.modified().map(FromInner::from_inner)
+ }
+
+ /// Returns the last access time of this metadata.
+ ///
+ /// The returned value corresponds to the `atime` field of `stat` on Unix
+ /// platforms and the `ftLastAccessTime` field on Windows platforms.
+ ///
+ /// Note that not all platforms will keep this field update in a file's
+ /// metadata, for example Windows has an option to disable updating this
+ /// time when files are accessed and Linux similarly has `noatime`.
+ ///
+ /// # Errors
+ ///
+ /// This field might not be available on all platforms, and will return an
+ /// `Err` on platforms where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// if let Ok(time) = metadata.accessed() {
+ /// println!("{time:?}");
+ /// } else {
+ /// println!("Not supported on this platform");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "fs_time", since = "1.10.0")]
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ self.0.accessed().map(FromInner::from_inner)
+ }
+
+ /// Returns the creation time listed in this metadata.
+ ///
+ /// The returned value corresponds to the `btime` field of `statx` on
+ /// Linux kernel starting from to 4.11, the `birthtime` field of `stat` on other
+ /// Unix platforms, and the `ftCreationTime` field on Windows platforms.
+ ///
+ /// # Errors
+ ///
+ /// This field might not be available on all platforms, and will return an
+ /// `Err` on platforms or filesystems where it is not available.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ ///
+ /// if let Ok(time) = metadata.created() {
+ /// println!("{time:?}");
+ /// } else {
+ /// println!("Not supported on this platform or filesystem");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "fs_time", since = "1.10.0")]
+ pub fn created(&self) -> io::Result<SystemTime> {
+ self.0.created().map(FromInner::from_inner)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Metadata {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Metadata")
+ .field("file_type", &self.file_type())
+ .field("is_dir", &self.is_dir())
+ .field("is_file", &self.is_file())
+ .field("permissions", &self.permissions())
+ .field("modified", &self.modified())
+ .field("accessed", &self.accessed())
+ .field("created", &self.created())
+ .finish_non_exhaustive()
+ }
+}
+
+impl AsInner<fs_imp::FileAttr> for Metadata {
+ fn as_inner(&self) -> &fs_imp::FileAttr {
+ &self.0
+ }
+}
+
+impl FromInner<fs_imp::FileAttr> for Metadata {
+ fn from_inner(attr: fs_imp::FileAttr) -> Metadata {
+ Metadata(attr)
+ }
+}
+
+impl FileTimes {
+ /// Create a new `FileTimes` with no times set.
+ ///
+ /// Using the resulting `FileTimes` in [`File::set_times`] will not modify any timestamps.
+ #[unstable(feature = "file_set_times", issue = "98245")]
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Set the last access time of a file.
+ #[unstable(feature = "file_set_times", issue = "98245")]
+ pub fn set_accessed(mut self, t: SystemTime) -> Self {
+ self.0.set_accessed(t.into_inner());
+ self
+ }
+
+ /// Set the last modified time of a file.
+ #[unstable(feature = "file_set_times", issue = "98245")]
+ pub fn set_modified(mut self, t: SystemTime) -> Self {
+ self.0.set_modified(t.into_inner());
+ self
+ }
+}
+
+impl Permissions {
+ /// Returns `true` if these permissions describe a readonly (unwritable) file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut f = File::create("foo.txt")?;
+ /// let metadata = f.metadata()?;
+ ///
+ /// assert_eq!(false, metadata.permissions().readonly());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use = "call `set_readonly` to modify the readonly flag"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn readonly(&self) -> bool {
+ self.0.readonly()
+ }
+
+ /// Modifies the readonly flag for this set of permissions. If the
+ /// `readonly` argument is `true`, using the resulting `Permission` will
+ /// update file permissions to forbid writing. Conversely, if it's `false`,
+ /// using the resulting `Permission` will update file permissions to allow
+ /// writing.
+ ///
+ /// This operation does **not** modify the filesystem. To modify the
+ /// filesystem use the [`set_permissions`] function.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::create("foo.txt")?;
+ /// let metadata = f.metadata()?;
+ /// let mut permissions = metadata.permissions();
+ ///
+ /// permissions.set_readonly(true);
+ ///
+ /// // filesystem doesn't change
+ /// assert_eq!(false, metadata.permissions().readonly());
+ ///
+ /// // just this particular `permissions`.
+ /// assert_eq!(true, permissions.readonly());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set_readonly(&mut self, readonly: bool) {
+ self.0.set_readonly(readonly)
+ }
+}
+
+impl FileType {
+ /// Tests whether this file type represents a directory. The
+ /// result is mutually exclusive to the results of
+ /// [`is_file`] and [`is_symlink`]; only zero or one of these
+ /// tests may pass.
+ ///
+ /// [`is_file`]: FileType::is_file
+ /// [`is_symlink`]: FileType::is_symlink
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let file_type = metadata.file_type();
+ ///
+ /// assert_eq!(file_type.is_dir(), false);
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "file_type", since = "1.1.0")]
+ pub fn is_dir(&self) -> bool {
+ self.0.is_dir()
+ }
+
+ /// Tests whether this file type represents a regular file.
+ /// The result is mutually exclusive to the results of
+ /// [`is_dir`] and [`is_symlink`]; only zero or one of these
+ /// tests may pass.
+ ///
+ /// When the goal is simply to read from (or write to) the source, the most
+ /// reliable way to test the source can be read (or written to) is to open
+ /// it. Only using `is_file` can break workflows like `diff <( prog_a )` on
+ /// a Unix-like system for example. See [`File::open`] or
+ /// [`OpenOptions::open`] for more information.
+ ///
+ /// [`is_dir`]: FileType::is_dir
+ /// [`is_symlink`]: FileType::is_symlink
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// fn main() -> std::io::Result<()> {
+ /// use std::fs;
+ ///
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let file_type = metadata.file_type();
+ ///
+ /// assert_eq!(file_type.is_file(), true);
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "file_type", since = "1.1.0")]
+ pub fn is_file(&self) -> bool {
+ self.0.is_file()
+ }
+
+ /// Tests whether this file type represents a symbolic link.
+ /// The result is mutually exclusive to the results of
+ /// [`is_dir`] and [`is_file`]; only zero or one of these
+ /// tests may pass.
+ ///
+ /// The underlying [`Metadata`] struct needs to be retrieved
+ /// with the [`fs::symlink_metadata`] function and not the
+ /// [`fs::metadata`] function. The [`fs::metadata`] function
+ /// follows symbolic links, so [`is_symlink`] would always
+ /// return `false` for the target file.
+ ///
+ /// [`fs::metadata`]: metadata
+ /// [`fs::symlink_metadata`]: symlink_metadata
+ /// [`is_dir`]: FileType::is_dir
+ /// [`is_file`]: FileType::is_file
+ /// [`is_symlink`]: FileType::is_symlink
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let metadata = fs::symlink_metadata("foo.txt")?;
+ /// let file_type = metadata.file_type();
+ ///
+ /// assert_eq!(file_type.is_symlink(), false);
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "file_type", since = "1.1.0")]
+ pub fn is_symlink(&self) -> bool {
+ self.0.is_symlink()
+ }
+}
+
+impl AsInner<fs_imp::FileType> for FileType {
+ fn as_inner(&self) -> &fs_imp::FileType {
+ &self.0
+ }
+}
+
+impl FromInner<fs_imp::FilePermissions> for Permissions {
+ fn from_inner(f: fs_imp::FilePermissions) -> Permissions {
+ Permissions(f)
+ }
+}
+
+impl AsInner<fs_imp::FilePermissions> for Permissions {
+ fn as_inner(&self) -> &fs_imp::FilePermissions {
+ &self.0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ self.0.next().map(|entry| entry.map(DirEntry))
+ }
+}
+
+impl DirEntry {
+ /// Returns the full path to the file that this entry represents.
+ ///
+ /// The full path is created by joining the original path to `read_dir`
+ /// with the filename of this entry.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// for entry in fs::read_dir(".")? {
+ /// let dir = entry?;
+ /// println!("{:?}", dir.path());
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// This prints output like:
+ ///
+ /// ```text
+ /// "./whatever.txt"
+ /// "./foo.html"
+ /// "./hello_world.rs"
+ /// ```
+ ///
+ /// The exact text, of course, depends on what files you have in `.`.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn path(&self) -> PathBuf {
+ self.0.path()
+ }
+
+ /// Returns the metadata for the file that this entry points at.
+ ///
+ /// This function will not traverse symlinks if this entry points at a
+ /// symlink. To traverse symlinks use [`fs::metadata`] or [`fs::File::metadata`].
+ ///
+ /// [`fs::metadata`]: metadata
+ /// [`fs::File::metadata`]: File::metadata
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// On Windows this function is cheap to call (no extra system calls
+ /// needed), but on Unix platforms this function is the equivalent of
+ /// calling `symlink_metadata` on the path.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs;
+ ///
+ /// if let Ok(entries) = fs::read_dir(".") {
+ /// for entry in entries {
+ /// if let Ok(entry) = entry {
+ /// // Here, `entry` is a `DirEntry`.
+ /// if let Ok(metadata) = entry.metadata() {
+ /// // Now let's show our entry's permissions!
+ /// println!("{:?}: {:?}", entry.path(), metadata.permissions());
+ /// } else {
+ /// println!("Couldn't get metadata for {:?}", entry.path());
+ /// }
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "dir_entry_ext", since = "1.1.0")]
+ pub fn metadata(&self) -> io::Result<Metadata> {
+ self.0.metadata().map(Metadata)
+ }
+
+ /// Returns the file type for the file that this entry points at.
+ ///
+ /// This function will not traverse symlinks if this entry points at a
+ /// symlink.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// On Windows and most Unix platforms this function is free (no extra
+ /// system calls needed), but some Unix platforms may require the equivalent
+ /// call to `symlink_metadata` to learn about the target file type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs;
+ ///
+ /// if let Ok(entries) = fs::read_dir(".") {
+ /// for entry in entries {
+ /// if let Ok(entry) = entry {
+ /// // Here, `entry` is a `DirEntry`.
+ /// if let Ok(file_type) = entry.file_type() {
+ /// // Now let's show our entry's file type!
+ /// println!("{:?}: {:?}", entry.path(), file_type);
+ /// } else {
+ /// println!("Couldn't get file type for {:?}", entry.path());
+ /// }
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "dir_entry_ext", since = "1.1.0")]
+ pub fn file_type(&self) -> io::Result<FileType> {
+ self.0.file_type().map(FileType)
+ }
+
+ /// Returns the bare file name of this directory entry without any other
+ /// leading path component.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs;
+ ///
+ /// if let Ok(entries) = fs::read_dir(".") {
+ /// for entry in entries {
+ /// if let Ok(entry) = entry {
+ /// // Here, `entry` is a `DirEntry`.
+ /// println!("{:?}", entry.file_name());
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "dir_entry_ext", since = "1.1.0")]
+ pub fn file_name(&self) -> OsString {
+ self.0.file_name()
+ }
+}
+
+#[stable(feature = "dir_entry_debug", since = "1.13.0")]
+impl fmt::Debug for DirEntry {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DirEntry").field(&self.path()).finish()
+ }
+}
+
+impl AsInner<fs_imp::DirEntry> for DirEntry {
+ fn as_inner(&self) -> &fs_imp::DirEntry {
+ &self.0
+ }
+}
+
+/// Removes a file from the filesystem.
+///
+/// Note that there is no
+/// guarantee that the file is immediately deleted (e.g., depending on
+/// platform, other open file descriptors may prevent immediate removal).
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `unlink` function on Unix
+/// and the `DeleteFile` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` points to a directory.
+/// * The file doesn't exist.
+/// * The user lacks permissions to remove the file.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::remove_file("a.txt")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ fs_imp::unlink(path.as_ref())
+}
+
+/// Given a path, query the file system to get information about a file,
+/// directory, etc.
+///
+/// This function will traverse symbolic links to query information about the
+/// destination file.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `stat` function on Unix
+/// and the `GetFileInformationByHandle` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The user lacks permissions to perform `metadata` call on `path`.
+/// * `path` does not exist.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let attr = fs::metadata("/some/file/path.txt")?;
+/// // inspect attr ...
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> {
+ fs_imp::stat(path.as_ref()).map(Metadata)
+}
+
+/// Query the metadata about a file without following symlinks.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `lstat` function on Unix
+/// and the `GetFileInformationByHandle` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The user lacks permissions to perform `metadata` call on `path`.
+/// * `path` does not exist.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let attr = fs::symlink_metadata("/some/file/path.txt")?;
+/// // inspect attr ...
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "symlink_metadata", since = "1.1.0")]
+pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> {
+ fs_imp::lstat(path.as_ref()).map(Metadata)
+}
+
+/// Rename a file or directory to a new name, replacing the original file if
+/// `to` already exists.
+///
+/// This will not work if the new name is on a different mount point.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `rename` function on Unix
+/// and the `MoveFileEx` function with the `MOVEFILE_REPLACE_EXISTING` flag on Windows.
+///
+/// Because of this, the behavior when both `from` and `to` exist differs. On
+/// Unix, if `from` is a directory, `to` must also be an (empty) directory. If
+/// `from` is not a directory, `to` must also be not a directory. In contrast,
+/// on Windows, `from` can be anything, but `to` must *not* be a directory.
+///
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `from` does not exist.
+/// * The user lacks permissions to view contents.
+/// * `from` and `to` are on separate filesystems.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::rename("a.txt", "b.txt")?; // Rename a.txt to b.txt
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
+ fs_imp::rename(from.as_ref(), to.as_ref())
+}
+
+/// Copies the contents of one file to another. This function will also
+/// copy the permission bits of the original file to the destination file.
+///
+/// This function will **overwrite** the contents of `to`.
+///
+/// Note that if `from` and `to` both point to the same file, then the file
+/// will likely get truncated by this operation.
+///
+/// On success, the total number of bytes copied is returned and it is equal to
+/// the length of the `to` file as reported by `metadata`.
+///
+/// If you’re wanting to copy the contents of one file to another and you’re
+/// working with [`File`]s, see the [`io::copy()`] function.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `open` function in Unix
+/// with `O_RDONLY` for `from` and `O_WRONLY`, `O_CREAT`, and `O_TRUNC` for `to`.
+/// `O_CLOEXEC` is set for returned file descriptors.
+///
+/// On Linux (including Android), this function attempts to use `copy_file_range(2)`,
+/// and falls back to reading and writing if that is not possible.
+///
+/// On Windows, this function currently corresponds to `CopyFileEx`. Alternate
+/// NTFS streams are copied but only the size of the main stream is returned by
+/// this function.
+///
+/// On MacOS, this function corresponds to `fclonefileat` and `fcopyfile`.
+///
+/// Note that platform-specific behavior [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `from` is neither a regular file nor a symlink to a regular file.
+/// * `from` does not exist.
+/// * The current process does not have the permission rights to read
+/// `from` or write `to`.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::copy("foo.txt", "bar.txt")?; // Copy foo.txt to bar.txt
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> {
+ fs_imp::copy(from.as_ref(), to.as_ref())
+}
+
+/// Creates a new hard link on the filesystem.
+///
+/// The `link` path will be a link pointing to the `original` path. Note that
+/// systems often require these two paths to both be located on the same
+/// filesystem.
+///
+/// If `original` names a symbolic link, it is platform-specific whether the
+/// symbolic link is followed. On platforms where it's possible to not follow
+/// it, it is not followed, and the created hard link points to the symbolic
+/// link itself.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds the `CreateHardLink` function on Windows.
+/// On most Unix systems, it corresponds to the `linkat` function with no flags.
+/// On Android, VxWorks, and Redox, it instead corresponds to the `link` function.
+/// On MacOS, it uses the `linkat` function if it is available, but on very old
+/// systems where `linkat` is not available, `link` is selected at runtime instead.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The `original` path is not a file or doesn't exist.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::hard_link("a.txt", "b.txt")?; // Hard link a.txt to b.txt
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn hard_link<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
+ fs_imp::link(original.as_ref(), link.as_ref())
+}
+
+/// Creates a new symbolic link on the filesystem.
+///
+/// The `link` path will be a symbolic link pointing to the `original` path.
+/// On Windows, this will be a file symlink, not a directory symlink;
+/// for this reason, the platform-specific [`std::os::unix::fs::symlink`]
+/// and [`std::os::windows::fs::symlink_file`] or [`symlink_dir`] should be
+/// used instead to make the intent explicit.
+///
+/// [`std::os::unix::fs::symlink`]: crate::os::unix::fs::symlink
+/// [`std::os::windows::fs::symlink_file`]: crate::os::windows::fs::symlink_file
+/// [`symlink_dir`]: crate::os::windows::fs::symlink_dir
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::soft_link("a.txt", "b.txt")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(
+ since = "1.1.0",
+ note = "replaced with std::os::unix::fs::symlink and \
+ std::os::windows::fs::{symlink_file, symlink_dir}"
+)]
+pub fn soft_link<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
+ fs_imp::symlink(original.as_ref(), link.as_ref())
+}
+
+/// Reads a symbolic link, returning the file that the link points to.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `readlink` function on Unix
+/// and the `CreateFile` function with `FILE_FLAG_OPEN_REPARSE_POINT` and
+/// `FILE_FLAG_BACKUP_SEMANTICS` flags on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` is not a symbolic link.
+/// * `path` does not exist.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let path = fs::read_link("a.txt")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn read_link<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
+ fs_imp::readlink(path.as_ref())
+}
+
+/// Returns the canonical, absolute form of a path with all intermediate
+/// components normalized and symbolic links resolved.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `realpath` function on Unix
+/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// On Windows, this converts the path to use [extended length path][path]
+/// syntax, which allows your program to use longer path names, but means you
+/// can only join backslash-delimited paths to it, and it may be incompatible
+/// with other applications (if passed to the application on the command-line,
+/// or written to a file another application may read).
+///
+/// [changes]: io#platform-specific-behavior
+/// [path]: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` does not exist.
+/// * A non-final component in path is not a directory.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let path = fs::canonicalize("../a/../foo.txt")?;
+/// Ok(())
+/// }
+/// ```
+#[doc(alias = "realpath")]
+#[doc(alias = "GetFinalPathNameByHandle")]
+#[stable(feature = "fs_canonicalize", since = "1.5.0")]
+pub fn canonicalize<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
+ fs_imp::canonicalize(path.as_ref())
+}
+
+/// Creates a new, empty directory at the provided path
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `mkdir` function on Unix
+/// and the `CreateDirectory` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// **NOTE**: If a parent of the given path doesn't exist, this function will
+/// return an error. To create a directory and all its missing parents at the
+/// same time, use the [`create_dir_all`] function.
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * User lacks permissions to create directory at `path`.
+/// * A parent of the given path doesn't exist. (To create a directory and all
+/// its missing parents at the same time, use the [`create_dir_all`]
+/// function.)
+/// * `path` already exists.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::create_dir("/some/dir")?;
+/// Ok(())
+/// }
+/// ```
+#[doc(alias = "mkdir")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn create_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ DirBuilder::new().create(path.as_ref())
+}
+
+/// Recursively create a directory and all of its parent components if they
+/// are missing.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `mkdir` function on Unix
+/// and the `CreateDirectory` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * If any directory in the path specified by `path`
+/// does not already exist and it could not be created otherwise. The specific
+/// error conditions for when a directory is being created (after it is
+/// determined to not exist) are outlined by [`fs::create_dir`].
+///
+/// Notable exception is made for situations where any of the directories
+/// specified in the `path` could not be created as it was being created concurrently.
+/// Such cases are considered to be successful. That is, calling `create_dir_all`
+/// concurrently from multiple threads or processes is guaranteed not to fail
+/// due to a race condition with itself.
+///
+/// [`fs::create_dir`]: create_dir
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::create_dir_all("/some/dir")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn create_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ DirBuilder::new().recursive(true).create(path.as_ref())
+}
+
+/// Removes an empty directory.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `rmdir` function on Unix
+/// and the `RemoveDirectory` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` doesn't exist.
+/// * `path` isn't a directory.
+/// * The user lacks permissions to remove the directory at the provided `path`.
+/// * The directory isn't empty.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::remove_dir("/some/dir")?;
+/// Ok(())
+/// }
+/// ```
+#[doc(alias = "rmdir")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ fs_imp::rmdir(path.as_ref())
+}
+
+/// Removes a directory at this path, after removing all its contents. Use
+/// carefully!
+///
+/// This function does **not** follow symbolic links and it will simply remove the
+/// symbolic link itself.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to `openat`, `fdopendir`, `unlinkat` and `lstat` functions
+/// on Unix (except for macOS before version 10.10 and REDOX) and the `CreateFileW`,
+/// `GetFileInformationByHandleEx`, `SetFileInformationByHandle`, and `NtCreateFile` functions on
+/// Windows. Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// On macOS before version 10.10 and REDOX, as well as when running in Miri for any target, this
+/// function is not protected against time-of-check to time-of-use (TOCTOU) race conditions, and
+/// should not be used in security-sensitive code on those platforms. All other platforms are
+/// protected.
+///
+/// # Errors
+///
+/// See [`fs::remove_file`] and [`fs::remove_dir`].
+///
+/// [`fs::remove_file`]: remove_file
+/// [`fs::remove_dir`]: remove_dir
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::remove_dir_all("/some/dir")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
+ fs_imp::remove_dir_all(path.as_ref())
+}
+
+/// Returns an iterator over the entries within a directory.
+///
+/// The iterator will yield instances of <code>[io::Result]<[DirEntry]></code>.
+/// New errors may be encountered after an iterator is initially constructed.
+/// Entries for the current and parent directories (typically `.` and `..`) are
+/// skipped.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `opendir` function on Unix
+/// and the `FindFirstFile` function on Windows. Advancing the iterator
+/// currently corresponds to `readdir` on Unix and `FindNextFile` on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// The order in which this iterator returns entries is platform and filesystem
+/// dependent.
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The provided `path` doesn't exist.
+/// * The process lacks permissions to view the contents.
+/// * The `path` points at a non-directory file.
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+/// use std::fs::{self, DirEntry};
+/// use std::path::Path;
+///
+/// // one possible implementation of walking a directory only visiting files
+/// fn visit_dirs(dir: &Path, cb: &dyn Fn(&DirEntry)) -> io::Result<()> {
+/// if dir.is_dir() {
+/// for entry in fs::read_dir(dir)? {
+/// let entry = entry?;
+/// let path = entry.path();
+/// if path.is_dir() {
+/// visit_dirs(&path, cb)?;
+/// } else {
+/// cb(&entry);
+/// }
+/// }
+/// }
+/// Ok(())
+/// }
+/// ```
+///
+/// ```rust,no_run
+/// use std::{fs, io};
+///
+/// fn main() -> io::Result<()> {
+/// let mut entries = fs::read_dir(".")?
+/// .map(|res| res.map(|e| e.path()))
+/// .collect::<Result<Vec<_>, io::Error>>()?;
+///
+/// // The order in which `read_dir` returns entries is not guaranteed. If reproducible
+/// // ordering is required the entries should be explicitly sorted.
+///
+/// entries.sort();
+///
+/// // The entries have now been sorted by their path.
+///
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn read_dir<P: AsRef<Path>>(path: P) -> io::Result<ReadDir> {
+ fs_imp::readdir(path.as_ref()).map(ReadDir)
+}
+
+/// Changes the permissions found on a file or a directory.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `chmod` function on Unix
+/// and the `SetFileAttributes` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: io#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` does not exist.
+/// * The user lacks the permission to change attributes of the file.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut perms = fs::metadata("foo.txt")?.permissions();
+/// perms.set_readonly(true);
+/// fs::set_permissions("foo.txt", perms)?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "set_permissions", since = "1.1.0")]
+pub fn set_permissions<P: AsRef<Path>>(path: P, perm: Permissions) -> io::Result<()> {
+ fs_imp::set_perm(path.as_ref(), perm.0)
+}
+
+impl DirBuilder {
+ /// Creates a new set of options with default mode/security settings for all
+ /// platforms and also non-recursive.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::DirBuilder;
+ ///
+ /// let builder = DirBuilder::new();
+ /// ```
+ #[stable(feature = "dir_builder", since = "1.6.0")]
+ #[must_use]
+ pub fn new() -> DirBuilder {
+ DirBuilder { inner: fs_imp::DirBuilder::new(), recursive: false }
+ }
+
+ /// Indicates that directories should be created recursively, creating all
+ /// parent directories. Parents that do not exist are created with the same
+ /// security and permissions settings.
+ ///
+ /// This option defaults to `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::DirBuilder;
+ ///
+ /// let mut builder = DirBuilder::new();
+ /// builder.recursive(true);
+ /// ```
+ #[stable(feature = "dir_builder", since = "1.6.0")]
+ pub fn recursive(&mut self, recursive: bool) -> &mut Self {
+ self.recursive = recursive;
+ self
+ }
+
+ /// Creates the specified directory with the options configured in this
+ /// builder.
+ ///
+ /// It is considered an error if the directory already exists unless
+ /// recursive mode is enabled.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::{self, DirBuilder};
+ ///
+ /// let path = "/tmp/foo/bar/baz";
+ /// DirBuilder::new()
+ /// .recursive(true)
+ /// .create(path).unwrap();
+ ///
+ /// assert!(fs::metadata(path).unwrap().is_dir());
+ /// ```
+ #[stable(feature = "dir_builder", since = "1.6.0")]
+ pub fn create<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self._create(path.as_ref())
+ }
+
+ fn _create(&self, path: &Path) -> io::Result<()> {
+ if self.recursive { self.create_dir_all(path) } else { self.inner.mkdir(path) }
+ }
+
+ fn create_dir_all(&self, path: &Path) -> io::Result<()> {
+ if path == Path::new("") {
+ return Ok(());
+ }
+
+ match self.inner.mkdir(path) {
+ Ok(()) => return Ok(()),
+ Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
+ Err(_) if path.is_dir() => return Ok(()),
+ Err(e) => return Err(e),
+ }
+ match path.parent() {
+ Some(p) => self.create_dir_all(p)?,
+ None => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "failed to create whole tree",
+ ));
+ }
+ }
+ match self.inner.mkdir(path) {
+ Ok(()) => Ok(()),
+ Err(_) if path.is_dir() => Ok(()),
+ Err(e) => Err(e),
+ }
+ }
+}
+
+impl AsInnerMut<fs_imp::DirBuilder> for DirBuilder {
+ fn as_inner_mut(&mut self) -> &mut fs_imp::DirBuilder {
+ &mut self.inner
+ }
+}
+
+/// Returns `Ok(true)` if the path points at an existing entity.
+///
+/// This function will traverse symbolic links to query information about the
+/// destination file. In case of broken symbolic links this will return `Ok(false)`.
+///
+/// As opposed to the [`Path::exists`] method, this one doesn't silently ignore errors
+/// unrelated to the path not existing. (E.g. it will return `Err(_)` in case of permission
+/// denied on some of the parent directories.)
+///
+/// Note that while this avoids some pitfalls of the `exists()` method, it still can not
+/// prevent time-of-check to time-of-use (TOCTOU) bugs. You should only use it in scenarios
+/// where those bugs are not an issue.
+///
+/// # Examples
+///
+/// ```no_run
+/// #![feature(fs_try_exists)]
+/// use std::fs;
+///
+/// assert!(!fs::try_exists("does_not_exist.txt").expect("Can't check existence of file does_not_exist.txt"));
+/// assert!(fs::try_exists("/root/secret_file.txt").is_err());
+/// ```
+///
+/// [`Path::exists`]: crate::path::Path::exists
+// FIXME: stabilization should modify documentation of `exists()` to recommend this method
+// instead.
+#[unstable(feature = "fs_try_exists", issue = "83186")]
+#[inline]
+pub fn try_exists<P: AsRef<Path>>(path: P) -> io::Result<bool> {
+ fs_imp::try_exists(path.as_ref())
+}
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
new file mode 100644
index 000000000..b8959316d
--- /dev/null
+++ b/library/std/src/fs/tests.rs
@@ -0,0 +1,1553 @@
+use crate::io::prelude::*;
+
+use crate::env;
+use crate::fs::{self, File, OpenOptions};
+use crate::io::{ErrorKind, SeekFrom};
+use crate::path::Path;
+use crate::str;
+use crate::sync::Arc;
+use crate::sys_common::io::test::{tmpdir, TempDir};
+use crate::thread;
+use crate::time::{Duration, Instant};
+
+use rand::{rngs::StdRng, RngCore, SeedableRng};
+
+#[cfg(unix)]
+use crate::os::unix::fs::symlink as symlink_dir;
+#[cfg(unix)]
+use crate::os::unix::fs::symlink as symlink_file;
+#[cfg(unix)]
+use crate::os::unix::fs::symlink as symlink_junction;
+#[cfg(windows)]
+use crate::os::windows::fs::{symlink_dir, symlink_file};
+#[cfg(windows)]
+use crate::sys::fs::symlink_junction;
+#[cfg(target_os = "macos")]
+use crate::sys::weak::weak;
+#[cfg(target_os = "macos")]
+use libc::{c_char, c_int};
+
+macro_rules! check {
+ ($e:expr) => {
+ match $e {
+ Ok(t) => t,
+ Err(e) => panic!("{} failed with: {e}", stringify!($e)),
+ }
+ };
+}
+
+#[cfg(windows)]
+macro_rules! error {
+ ($e:expr, $s:expr) => {
+ match $e {
+ Ok(_) => panic!("Unexpected success. Should've been: {:?}", $s),
+ Err(ref err) => {
+ assert!(err.raw_os_error() == Some($s), "`{}` did not have a code of `{}`", err, $s)
+ }
+ }
+ };
+}
+
+#[cfg(unix)]
+macro_rules! error {
+ ($e:expr, $s:expr) => {
+ error_contains!($e, $s)
+ };
+}
+
+macro_rules! error_contains {
+ ($e:expr, $s:expr) => {
+ match $e {
+ Ok(_) => panic!("Unexpected success. Should've been: {:?}", $s),
+ Err(ref err) => {
+ assert!(err.to_string().contains($s), "`{}` did not contain `{}`", err, $s)
+ }
+ }
+ };
+}
+
+// Several test fail on windows if the user does not have permission to
+// create symlinks (the `SeCreateSymbolicLinkPrivilege`). Instead of
+// disabling these test on Windows, use this function to test whether we
+// have permission, and return otherwise. This way, we still don't run these
+// tests most of the time, but at least we do if the user has the right
+// permissions.
+pub fn got_symlink_permission(tmpdir: &TempDir) -> bool {
+ if cfg!(unix) {
+ return true;
+ }
+ let link = tmpdir.join("some_hopefully_unique_link_name");
+
+ match symlink_file(r"nonexisting_target", link) {
+ // ERROR_PRIVILEGE_NOT_HELD = 1314
+ Err(ref err) if err.raw_os_error() == Some(1314) => false,
+ Ok(_) | Err(_) => true,
+ }
+}
+
+#[cfg(target_os = "macos")]
+fn able_to_not_follow_symlinks_while_hard_linking() -> bool {
+ weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
+ linkat.get().is_some()
+}
+
+#[cfg(not(target_os = "macos"))]
+fn able_to_not_follow_symlinks_while_hard_linking() -> bool {
+ return true;
+}
+
+#[test]
+fn file_test_io_smoke_test() {
+ let message = "it's alright. have a good time";
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_rt_io_file_test.txt");
+ {
+ let mut write_stream = check!(File::create(filename));
+ check!(write_stream.write(message.as_bytes()));
+ }
+ {
+ let mut read_stream = check!(File::open(filename));
+ let mut read_buf = [0; 1028];
+ let read_str = match check!(read_stream.read(&mut read_buf)) {
+ 0 => panic!("shouldn't happen"),
+ n => str::from_utf8(&read_buf[..n]).unwrap().to_string(),
+ };
+ assert_eq!(read_str, message);
+ }
+ check!(fs::remove_file(filename));
+}
+
+#[test]
+fn invalid_path_raises() {
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_that_does_not_exist.txt");
+ let result = File::open(filename);
+
+ #[cfg(all(unix, not(target_os = "vxworks")))]
+ error!(result, "No such file or directory");
+ #[cfg(target_os = "vxworks")]
+ error!(result, "no such file or directory");
+ #[cfg(windows)]
+ error!(result, 2); // ERROR_FILE_NOT_FOUND
+}
+
+#[test]
+fn file_test_iounlinking_invalid_path_should_raise_condition() {
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt");
+
+ let result = fs::remove_file(filename);
+
+ #[cfg(all(unix, not(target_os = "vxworks")))]
+ error!(result, "No such file or directory");
+ #[cfg(target_os = "vxworks")]
+ error!(result, "no such file or directory");
+ #[cfg(windows)]
+ error!(result, 2); // ERROR_FILE_NOT_FOUND
+}
+
+#[test]
+fn file_test_io_non_positional_read() {
+ let message: &str = "ten-four";
+ let mut read_mem = [0; 8];
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_rt_io_file_test_positional.txt");
+ {
+ let mut rw_stream = check!(File::create(filename));
+ check!(rw_stream.write(message.as_bytes()));
+ }
+ {
+ let mut read_stream = check!(File::open(filename));
+ {
+ let read_buf = &mut read_mem[0..4];
+ check!(read_stream.read(read_buf));
+ }
+ {
+ let read_buf = &mut read_mem[4..8];
+ check!(read_stream.read(read_buf));
+ }
+ }
+ check!(fs::remove_file(filename));
+ let read_str = str::from_utf8(&read_mem).unwrap();
+ assert_eq!(read_str, message);
+}
+
+#[test]
+fn file_test_io_seek_and_tell_smoke_test() {
+ let message = "ten-four";
+ let mut read_mem = [0; 4];
+ let set_cursor = 4 as u64;
+ let tell_pos_pre_read;
+ let tell_pos_post_read;
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_rt_io_file_test_seeking.txt");
+ {
+ let mut rw_stream = check!(File::create(filename));
+ check!(rw_stream.write(message.as_bytes()));
+ }
+ {
+ let mut read_stream = check!(File::open(filename));
+ check!(read_stream.seek(SeekFrom::Start(set_cursor)));
+ tell_pos_pre_read = check!(read_stream.seek(SeekFrom::Current(0)));
+ check!(read_stream.read(&mut read_mem));
+ tell_pos_post_read = check!(read_stream.seek(SeekFrom::Current(0)));
+ }
+ check!(fs::remove_file(filename));
+ let read_str = str::from_utf8(&read_mem).unwrap();
+ assert_eq!(read_str, &message[4..8]);
+ assert_eq!(tell_pos_pre_read, set_cursor);
+ assert_eq!(tell_pos_post_read, message.len() as u64);
+}
+
+#[test]
+fn file_test_io_seek_and_write() {
+ let initial_msg = "food-is-yummy";
+ let overwrite_msg = "-the-bar!!";
+ let final_msg = "foo-the-bar!!";
+ let seek_idx = 3;
+ let mut read_mem = [0; 13];
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_rt_io_file_test_seek_and_write.txt");
+ {
+ let mut rw_stream = check!(File::create(filename));
+ check!(rw_stream.write(initial_msg.as_bytes()));
+ check!(rw_stream.seek(SeekFrom::Start(seek_idx)));
+ check!(rw_stream.write(overwrite_msg.as_bytes()));
+ }
+ {
+ let mut read_stream = check!(File::open(filename));
+ check!(read_stream.read(&mut read_mem));
+ }
+ check!(fs::remove_file(filename));
+ let read_str = str::from_utf8(&read_mem).unwrap();
+ assert!(read_str == final_msg);
+}
+
+#[test]
+fn file_test_io_seek_shakedown() {
+ // 01234567890123
+ let initial_msg = "qwer-asdf-zxcv";
+ let chunk_one: &str = "qwer";
+ let chunk_two: &str = "asdf";
+ let chunk_three: &str = "zxcv";
+ let mut read_mem = [0; 4];
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_rt_io_file_test_seek_shakedown.txt");
+ {
+ let mut rw_stream = check!(File::create(filename));
+ check!(rw_stream.write(initial_msg.as_bytes()));
+ }
+ {
+ let mut read_stream = check!(File::open(filename));
+
+ check!(read_stream.seek(SeekFrom::End(-4)));
+ check!(read_stream.read(&mut read_mem));
+ assert_eq!(str::from_utf8(&read_mem).unwrap(), chunk_three);
+
+ check!(read_stream.seek(SeekFrom::Current(-9)));
+ check!(read_stream.read(&mut read_mem));
+ assert_eq!(str::from_utf8(&read_mem).unwrap(), chunk_two);
+
+ check!(read_stream.seek(SeekFrom::Start(0)));
+ check!(read_stream.read(&mut read_mem));
+ assert_eq!(str::from_utf8(&read_mem).unwrap(), chunk_one);
+ }
+ check!(fs::remove_file(filename));
+}
+
+#[test]
+fn file_test_io_eof() {
+ let tmpdir = tmpdir();
+ let filename = tmpdir.join("file_rt_io_file_test_eof.txt");
+ let mut buf = [0; 256];
+ {
+ let oo = OpenOptions::new().create_new(true).write(true).read(true).clone();
+ let mut rw = check!(oo.open(&filename));
+ assert_eq!(check!(rw.read(&mut buf)), 0);
+ assert_eq!(check!(rw.read(&mut buf)), 0);
+ }
+ check!(fs::remove_file(&filename));
+}
+
+#[test]
+#[cfg(unix)]
+fn file_test_io_read_write_at() {
+ use crate::os::unix::fs::FileExt;
+
+ let tmpdir = tmpdir();
+ let filename = tmpdir.join("file_rt_io_file_test_read_write_at.txt");
+ let mut buf = [0; 256];
+ let write1 = "asdf";
+ let write2 = "qwer-";
+ let write3 = "-zxcv";
+ let content = "qwer-asdf-zxcv";
+ {
+ let oo = OpenOptions::new().create_new(true).write(true).read(true).clone();
+ let mut rw = check!(oo.open(&filename));
+ assert_eq!(check!(rw.write_at(write1.as_bytes(), 5)), write1.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 0);
+ assert_eq!(check!(rw.read_at(&mut buf, 5)), write1.len());
+ assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 0);
+ assert_eq!(check!(rw.read_at(&mut buf[..write2.len()], 0)), write2.len());
+ assert_eq!(str::from_utf8(&buf[..write2.len()]), Ok("\0\0\0\0\0"));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 0);
+ assert_eq!(check!(rw.write(write2.as_bytes())), write2.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 5);
+ assert_eq!(check!(rw.read(&mut buf)), write1.len());
+ assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(rw.read_at(&mut buf[..write2.len()], 0)), write2.len());
+ assert_eq!(str::from_utf8(&buf[..write2.len()]), Ok(write2));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(rw.write_at(write3.as_bytes(), 9)), write3.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ }
+ {
+ let mut read = check!(File::open(&filename));
+ assert_eq!(check!(read.read_at(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 0);
+ assert_eq!(check!(read.seek(SeekFrom::End(-5))), 9);
+ assert_eq!(check!(read.read_at(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(read.read(&mut buf)), write3.len());
+ assert_eq!(str::from_utf8(&buf[..write3.len()]), Ok(write3));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.read_at(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.read_at(&mut buf, 14)), 0);
+ assert_eq!(check!(read.read_at(&mut buf, 15)), 0);
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ }
+ check!(fs::remove_file(&filename));
+}
+
+#[test]
+#[cfg(unix)]
+fn set_get_unix_permissions() {
+ use crate::os::unix::fs::PermissionsExt;
+
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("set_get_unix_permissions");
+ check!(fs::create_dir(filename));
+ let mask = 0o7777;
+
+ check!(fs::set_permissions(filename, fs::Permissions::from_mode(0)));
+ let metadata0 = check!(fs::metadata(filename));
+ assert_eq!(mask & metadata0.permissions().mode(), 0);
+
+ check!(fs::set_permissions(filename, fs::Permissions::from_mode(0o1777)));
+ let metadata1 = check!(fs::metadata(filename));
+ #[cfg(all(unix, not(target_os = "vxworks")))]
+ assert_eq!(mask & metadata1.permissions().mode(), 0o1777);
+ #[cfg(target_os = "vxworks")]
+ assert_eq!(mask & metadata1.permissions().mode(), 0o0777);
+}
+
+#[test]
+#[cfg(windows)]
+fn file_test_io_seek_read_write() {
+ use crate::os::windows::fs::FileExt;
+
+ let tmpdir = tmpdir();
+ let filename = tmpdir.join("file_rt_io_file_test_seek_read_write.txt");
+ let mut buf = [0; 256];
+ let write1 = "asdf";
+ let write2 = "qwer-";
+ let write3 = "-zxcv";
+ let content = "qwer-asdf-zxcv";
+ {
+ let oo = OpenOptions::new().create_new(true).write(true).read(true).clone();
+ let mut rw = check!(oo.open(&filename));
+ assert_eq!(check!(rw.seek_write(write1.as_bytes(), 5)), write1.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(rw.seek_read(&mut buf, 5)), write1.len());
+ assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(rw.seek(SeekFrom::Start(0))), 0);
+ assert_eq!(check!(rw.write(write2.as_bytes())), write2.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 5);
+ assert_eq!(check!(rw.read(&mut buf)), write1.len());
+ assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9);
+ assert_eq!(check!(rw.seek_read(&mut buf[..write2.len()], 0)), write2.len());
+ assert_eq!(str::from_utf8(&buf[..write2.len()]), Ok(write2));
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 5);
+ assert_eq!(check!(rw.seek_write(write3.as_bytes(), 9)), write3.len());
+ assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 14);
+ }
+ {
+ let mut read = check!(File::open(&filename));
+ assert_eq!(check!(read.seek_read(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.seek(SeekFrom::End(-5))), 9);
+ assert_eq!(check!(read.seek_read(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.seek(SeekFrom::End(-5))), 9);
+ assert_eq!(check!(read.read(&mut buf)), write3.len());
+ assert_eq!(str::from_utf8(&buf[..write3.len()]), Ok(write3));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.seek_read(&mut buf, 0)), content.len());
+ assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content));
+ assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14);
+ assert_eq!(check!(read.seek_read(&mut buf, 14)), 0);
+ assert_eq!(check!(read.seek_read(&mut buf, 15)), 0);
+ }
+ check!(fs::remove_file(&filename));
+}
+
+#[test]
+fn file_test_stat_is_correct_on_is_file() {
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_stat_correct_on_is_file.txt");
+ {
+ let mut opts = OpenOptions::new();
+ let mut fs = check!(opts.read(true).write(true).create(true).open(filename));
+ let msg = "hw";
+ fs.write(msg.as_bytes()).unwrap();
+
+ let fstat_res = check!(fs.metadata());
+ assert!(fstat_res.is_file());
+ }
+ let stat_res_fn = check!(fs::metadata(filename));
+ assert!(stat_res_fn.is_file());
+ let stat_res_meth = check!(filename.metadata());
+ assert!(stat_res_meth.is_file());
+ check!(fs::remove_file(filename));
+}
+
+#[test]
+fn file_test_stat_is_correct_on_is_dir() {
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("file_stat_correct_on_is_dir");
+ check!(fs::create_dir(filename));
+ let stat_res_fn = check!(fs::metadata(filename));
+ assert!(stat_res_fn.is_dir());
+ let stat_res_meth = check!(filename.metadata());
+ assert!(stat_res_meth.is_dir());
+ check!(fs::remove_dir(filename));
+}
+
+#[test]
+fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
+ let tmpdir = tmpdir();
+ let dir = &tmpdir.join("fileinfo_false_on_dir");
+ check!(fs::create_dir(dir));
+ assert!(!dir.is_file());
+ check!(fs::remove_dir(dir));
+}
+
+#[test]
+fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
+ let tmpdir = tmpdir();
+ let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt");
+ check!(check!(File::create(file)).write(b"foo"));
+ assert!(file.exists());
+ check!(fs::remove_file(file));
+ assert!(!file.exists());
+}
+
+#[test]
+fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
+ let tmpdir = tmpdir();
+ let dir = &tmpdir.join("before_and_after_dir");
+ assert!(!dir.exists());
+ check!(fs::create_dir(dir));
+ assert!(dir.exists());
+ assert!(dir.is_dir());
+ check!(fs::remove_dir(dir));
+ assert!(!dir.exists());
+}
+
+#[test]
+fn file_test_directoryinfo_readdir() {
+ let tmpdir = tmpdir();
+ let dir = &tmpdir.join("di_readdir");
+ check!(fs::create_dir(dir));
+ let prefix = "foo";
+ for n in 0..3 {
+ let f = dir.join(&format!("{n}.txt"));
+ let mut w = check!(File::create(&f));
+ let msg_str = format!("{}{}", prefix, n.to_string());
+ let msg = msg_str.as_bytes();
+ check!(w.write(msg));
+ }
+ let files = check!(fs::read_dir(dir));
+ let mut mem = [0; 4];
+ for f in files {
+ let f = f.unwrap().path();
+ {
+ let n = f.file_stem().unwrap();
+ check!(check!(File::open(&f)).read(&mut mem));
+ let read_str = str::from_utf8(&mem).unwrap();
+ let expected = format!("{}{}", prefix, n.to_str().unwrap());
+ assert_eq!(expected, read_str);
+ }
+ check!(fs::remove_file(&f));
+ }
+ check!(fs::remove_dir(dir));
+}
+
+#[test]
+fn file_create_new_already_exists_error() {
+ let tmpdir = tmpdir();
+ let file = &tmpdir.join("file_create_new_error_exists");
+ check!(fs::File::create(file));
+ let e = fs::OpenOptions::new().write(true).create_new(true).open(file).unwrap_err();
+ assert_eq!(e.kind(), ErrorKind::AlreadyExists);
+}
+
+#[test]
+fn mkdir_path_already_exists_error() {
+ let tmpdir = tmpdir();
+ let dir = &tmpdir.join("mkdir_error_twice");
+ check!(fs::create_dir(dir));
+ let e = fs::create_dir(dir).unwrap_err();
+ assert_eq!(e.kind(), ErrorKind::AlreadyExists);
+}
+
+#[test]
+fn recursive_mkdir() {
+ let tmpdir = tmpdir();
+ let dir = tmpdir.join("d1/d2");
+ check!(fs::create_dir_all(&dir));
+ assert!(dir.is_dir())
+}
+
+#[test]
+fn recursive_mkdir_failure() {
+ let tmpdir = tmpdir();
+ let dir = tmpdir.join("d1");
+ let file = dir.join("f1");
+
+ check!(fs::create_dir_all(&dir));
+ check!(File::create(&file));
+
+ let result = fs::create_dir_all(&file);
+
+ assert!(result.is_err());
+}
+
+#[test]
+fn concurrent_recursive_mkdir() {
+ for _ in 0..100 {
+ let dir = tmpdir();
+ let mut dir = dir.join("a");
+ for _ in 0..40 {
+ dir = dir.join("a");
+ }
+ let mut join = vec![];
+ for _ in 0..8 {
+ let dir = dir.clone();
+ join.push(thread::spawn(move || {
+ check!(fs::create_dir_all(&dir));
+ }))
+ }
+
+ // No `Display` on result of `join()`
+ join.drain(..).map(|join| join.join().unwrap()).count();
+ }
+}
+
+#[test]
+fn recursive_mkdir_slash() {
+ check!(fs::create_dir_all(Path::new("/")));
+}
+
+#[test]
+fn recursive_mkdir_dot() {
+ check!(fs::create_dir_all(Path::new(".")));
+}
+
+#[test]
+fn recursive_mkdir_empty() {
+ check!(fs::create_dir_all(Path::new("")));
+}
+
+#[test]
+fn recursive_rmdir() {
+ let tmpdir = tmpdir();
+ let d1 = tmpdir.join("d1");
+ let dt = d1.join("t");
+ let dtt = dt.join("t");
+ let d2 = tmpdir.join("d2");
+ let canary = d2.join("do_not_delete");
+ check!(fs::create_dir_all(&dtt));
+ check!(fs::create_dir_all(&d2));
+ check!(check!(File::create(&canary)).write(b"foo"));
+ check!(symlink_junction(&d2, &dt.join("d2")));
+ let _ = symlink_file(&canary, &d1.join("canary"));
+ check!(fs::remove_dir_all(&d1));
+
+ assert!(!d1.is_dir());
+ assert!(canary.exists());
+}
+
+#[test]
+fn recursive_rmdir_of_symlink() {
+ // test we do not recursively delete a symlink but only dirs.
+ let tmpdir = tmpdir();
+ let link = tmpdir.join("d1");
+ let dir = tmpdir.join("d2");
+ let canary = dir.join("do_not_delete");
+ check!(fs::create_dir_all(&dir));
+ check!(check!(File::create(&canary)).write(b"foo"));
+ check!(symlink_junction(&dir, &link));
+ check!(fs::remove_dir_all(&link));
+
+ assert!(!link.is_dir());
+ assert!(canary.exists());
+}
+
+#[test]
+fn recursive_rmdir_of_file_fails() {
+ // test we do not delete a directly specified file.
+ let tmpdir = tmpdir();
+ let canary = tmpdir.join("do_not_delete");
+ check!(check!(File::create(&canary)).write(b"foo"));
+ let result = fs::remove_dir_all(&canary);
+ #[cfg(unix)]
+ error!(result, "Not a directory");
+ #[cfg(windows)]
+ error!(result, 267); // ERROR_DIRECTORY - The directory name is invalid.
+ assert!(result.is_err());
+ assert!(canary.exists());
+}
+
+#[test]
+// only Windows makes a distinction between file and directory symlinks.
+#[cfg(windows)]
+fn recursive_rmdir_of_file_symlink() {
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+
+ let f1 = tmpdir.join("f1");
+ let f2 = tmpdir.join("f2");
+ check!(check!(File::create(&f1)).write(b"foo"));
+ check!(symlink_file(&f1, &f2));
+ match fs::remove_dir_all(&f2) {
+ Ok(..) => panic!("wanted a failure"),
+ Err(..) => {}
+ }
+}
+
+#[test]
+#[ignore] // takes too much time
+fn recursive_rmdir_toctou() {
+ // Test for time-of-check to time-of-use issues.
+ //
+ // Scenario:
+ // The attacker wants to get directory contents deleted, to which they do not have access.
+ // They have a way to get a privileged Rust binary call `std::fs::remove_dir_all()` on a
+ // directory they control, e.g. in their home directory.
+ //
+ // The POC sets up the `attack_dest/attack_file` which the attacker wants to have deleted.
+ // The attacker repeatedly creates a directory and replaces it with a symlink from
+ // `victim_del` to `attack_dest` while the victim code calls `std::fs::remove_dir_all()`
+ // on `victim_del`. After a few seconds the attack has succeeded and
+ // `attack_dest/attack_file` is deleted.
+ let tmpdir = tmpdir();
+ let victim_del_path = tmpdir.join("victim_del");
+ let victim_del_path_clone = victim_del_path.clone();
+
+ // setup dest
+ let attack_dest_dir = tmpdir.join("attack_dest");
+ let attack_dest_dir = attack_dest_dir.as_path();
+ fs::create_dir(attack_dest_dir).unwrap();
+ let attack_dest_file = tmpdir.join("attack_dest/attack_file");
+ File::create(&attack_dest_file).unwrap();
+
+ let drop_canary_arc = Arc::new(());
+ let drop_canary_weak = Arc::downgrade(&drop_canary_arc);
+
+ eprintln!("x: {:?}", &victim_del_path);
+
+ // victim just continuously removes `victim_del`
+ thread::spawn(move || {
+ while drop_canary_weak.upgrade().is_some() {
+ let _ = fs::remove_dir_all(&victim_del_path_clone);
+ }
+ });
+
+ // attacker (could of course be in a separate process)
+ let start_time = Instant::now();
+ while Instant::now().duration_since(start_time) < Duration::from_secs(1000) {
+ if !attack_dest_file.exists() {
+ panic!(
+ "Victim deleted symlinked file outside of victim_del. Attack succeeded in {:?}.",
+ Instant::now().duration_since(start_time)
+ );
+ }
+ let _ = fs::create_dir(&victim_del_path);
+ let _ = fs::remove_dir(&victim_del_path);
+ let _ = symlink_dir(attack_dest_dir, &victim_del_path);
+ }
+}
+
+#[test]
+fn unicode_path_is_dir() {
+ assert!(Path::new(".").is_dir());
+ assert!(!Path::new("test/stdtest/fs.rs").is_dir());
+
+ let tmpdir = tmpdir();
+
+ let mut dirpath = tmpdir.path().to_path_buf();
+ dirpath.push("test-가一ー你好");
+ check!(fs::create_dir(&dirpath));
+ assert!(dirpath.is_dir());
+
+ let mut filepath = dirpath;
+ filepath.push("unicode-file-\u{ac00}\u{4e00}\u{30fc}\u{4f60}\u{597d}.rs");
+ check!(File::create(&filepath)); // ignore return; touch only
+ assert!(!filepath.is_dir());
+ assert!(filepath.exists());
+}
+
+#[test]
+fn unicode_path_exists() {
+ assert!(Path::new(".").exists());
+ assert!(!Path::new("test/nonexistent-bogus-path").exists());
+
+ let tmpdir = tmpdir();
+ let unicode = tmpdir.path();
+ let unicode = unicode.join("test-각丁ー再见");
+ check!(fs::create_dir(&unicode));
+ assert!(unicode.exists());
+ assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists());
+}
+
+#[test]
+fn copy_file_does_not_exist() {
+ let from = Path::new("test/nonexistent-bogus-path");
+ let to = Path::new("test/other-bogus-path");
+
+ match fs::copy(&from, &to) {
+ Ok(..) => panic!(),
+ Err(..) => {
+ assert!(!from.exists());
+ assert!(!to.exists());
+ }
+ }
+}
+
+#[test]
+fn copy_src_does_not_exist() {
+ let tmpdir = tmpdir();
+ let from = Path::new("test/nonexistent-bogus-path");
+ let to = tmpdir.join("out.txt");
+ check!(check!(File::create(&to)).write(b"hello"));
+ assert!(fs::copy(&from, &to).is_err());
+ assert!(!from.exists());
+ let mut v = Vec::new();
+ check!(check!(File::open(&to)).read_to_end(&mut v));
+ assert_eq!(v, b"hello");
+}
+
+#[test]
+fn copy_file_ok() {
+ let tmpdir = tmpdir();
+ let input = tmpdir.join("in.txt");
+ let out = tmpdir.join("out.txt");
+
+ check!(check!(File::create(&input)).write(b"hello"));
+ check!(fs::copy(&input, &out));
+ let mut v = Vec::new();
+ check!(check!(File::open(&out)).read_to_end(&mut v));
+ assert_eq!(v, b"hello");
+
+ assert_eq!(check!(input.metadata()).permissions(), check!(out.metadata()).permissions());
+}
+
+#[test]
+fn copy_file_dst_dir() {
+ let tmpdir = tmpdir();
+ let out = tmpdir.join("out");
+
+ check!(File::create(&out));
+ match fs::copy(&*out, tmpdir.path()) {
+ Ok(..) => panic!(),
+ Err(..) => {}
+ }
+}
+
+#[test]
+fn copy_file_dst_exists() {
+ let tmpdir = tmpdir();
+ let input = tmpdir.join("in");
+ let output = tmpdir.join("out");
+
+ check!(check!(File::create(&input)).write("foo".as_bytes()));
+ check!(check!(File::create(&output)).write("bar".as_bytes()));
+ check!(fs::copy(&input, &output));
+
+ let mut v = Vec::new();
+ check!(check!(File::open(&output)).read_to_end(&mut v));
+ assert_eq!(v, b"foo".to_vec());
+}
+
+#[test]
+fn copy_file_src_dir() {
+ let tmpdir = tmpdir();
+ let out = tmpdir.join("out");
+
+ match fs::copy(tmpdir.path(), &out) {
+ Ok(..) => panic!(),
+ Err(..) => {}
+ }
+ assert!(!out.exists());
+}
+
+#[test]
+fn copy_file_preserves_perm_bits() {
+ let tmpdir = tmpdir();
+ let input = tmpdir.join("in.txt");
+ let out = tmpdir.join("out.txt");
+
+ let attr = check!(check!(File::create(&input)).metadata());
+ let mut p = attr.permissions();
+ p.set_readonly(true);
+ check!(fs::set_permissions(&input, p));
+ check!(fs::copy(&input, &out));
+ assert!(check!(out.metadata()).permissions().readonly());
+ check!(fs::set_permissions(&input, attr.permissions()));
+ check!(fs::set_permissions(&out, attr.permissions()));
+}
+
+#[test]
+#[cfg(windows)]
+fn copy_file_preserves_streams() {
+ let tmp = tmpdir();
+ check!(check!(File::create(tmp.join("in.txt:bunny"))).write("carrot".as_bytes()));
+ assert_eq!(check!(fs::copy(tmp.join("in.txt"), tmp.join("out.txt"))), 0);
+ assert_eq!(check!(tmp.join("out.txt").metadata()).len(), 0);
+ let mut v = Vec::new();
+ check!(check!(File::open(tmp.join("out.txt:bunny"))).read_to_end(&mut v));
+ assert_eq!(v, b"carrot".to_vec());
+}
+
+#[test]
+fn copy_file_returns_metadata_len() {
+ let tmp = tmpdir();
+ let in_path = tmp.join("in.txt");
+ let out_path = tmp.join("out.txt");
+ check!(check!(File::create(&in_path)).write(b"lettuce"));
+ #[cfg(windows)]
+ check!(check!(File::create(tmp.join("in.txt:bunny"))).write(b"carrot"));
+ let copied_len = check!(fs::copy(&in_path, &out_path));
+ assert_eq!(check!(out_path.metadata()).len(), copied_len);
+}
+
+#[test]
+fn copy_file_follows_dst_symlink() {
+ let tmp = tmpdir();
+ if !got_symlink_permission(&tmp) {
+ return;
+ };
+
+ let in_path = tmp.join("in.txt");
+ let out_path = tmp.join("out.txt");
+ let out_path_symlink = tmp.join("out_symlink.txt");
+
+ check!(fs::write(&in_path, "foo"));
+ check!(fs::write(&out_path, "bar"));
+ check!(symlink_file(&out_path, &out_path_symlink));
+
+ check!(fs::copy(&in_path, &out_path_symlink));
+
+ assert!(check!(out_path_symlink.symlink_metadata()).file_type().is_symlink());
+ assert_eq!(check!(fs::read(&out_path_symlink)), b"foo".to_vec());
+ assert_eq!(check!(fs::read(&out_path)), b"foo".to_vec());
+}
+
+#[test]
+fn symlinks_work() {
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+
+ let input = tmpdir.join("in.txt");
+ let out = tmpdir.join("out.txt");
+
+ check!(check!(File::create(&input)).write("foobar".as_bytes()));
+ check!(symlink_file(&input, &out));
+ assert!(check!(out.symlink_metadata()).file_type().is_symlink());
+ assert_eq!(check!(fs::metadata(&out)).len(), check!(fs::metadata(&input)).len());
+ let mut v = Vec::new();
+ check!(check!(File::open(&out)).read_to_end(&mut v));
+ assert_eq!(v, b"foobar".to_vec());
+}
+
+#[test]
+fn symlink_noexist() {
+ // Symlinks can point to things that don't exist
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+
+ // Use a relative path for testing. Symlinks get normalized by Windows,
+ // so we might not get the same path back for absolute paths
+ check!(symlink_file(&"foo", &tmpdir.join("bar")));
+ assert_eq!(check!(fs::read_link(&tmpdir.join("bar"))).to_str().unwrap(), "foo");
+}
+
+#[test]
+fn read_link() {
+ if cfg!(windows) {
+ // directory symlink
+ assert_eq!(check!(fs::read_link(r"C:\Users\All Users")), Path::new(r"C:\ProgramData"));
+ // junction
+ assert_eq!(check!(fs::read_link(r"C:\Users\Default User")), Path::new(r"C:\Users\Default"));
+ // junction with special permissions
+ // Since not all localized windows versions contain the folder "Documents and Settings" in english,
+ // we will briefly check, if it exists and otherwise skip the test. Except during CI we will always execute the test.
+ if Path::new(r"C:\Documents and Settings\").exists() || env::var_os("CI").is_some() {
+ assert_eq!(
+ check!(fs::read_link(r"C:\Documents and Settings\")),
+ Path::new(r"C:\Users")
+ );
+ }
+ }
+ let tmpdir = tmpdir();
+ let link = tmpdir.join("link");
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+ check!(symlink_file(&"foo", &link));
+ assert_eq!(check!(fs::read_link(&link)).to_str().unwrap(), "foo");
+}
+
+#[test]
+fn readlink_not_symlink() {
+ let tmpdir = tmpdir();
+ match fs::read_link(tmpdir.path()) {
+ Ok(..) => panic!("wanted a failure"),
+ Err(..) => {}
+ }
+}
+
+#[test]
+fn links_work() {
+ let tmpdir = tmpdir();
+ let input = tmpdir.join("in.txt");
+ let out = tmpdir.join("out.txt");
+
+ check!(check!(File::create(&input)).write("foobar".as_bytes()));
+ check!(fs::hard_link(&input, &out));
+ assert_eq!(check!(fs::metadata(&out)).len(), check!(fs::metadata(&input)).len());
+ assert_eq!(check!(fs::metadata(&out)).len(), check!(input.metadata()).len());
+ let mut v = Vec::new();
+ check!(check!(File::open(&out)).read_to_end(&mut v));
+ assert_eq!(v, b"foobar".to_vec());
+
+ // can't link to yourself
+ match fs::hard_link(&input, &input) {
+ Ok(..) => panic!("wanted a failure"),
+ Err(..) => {}
+ }
+ // can't link to something that doesn't exist
+ match fs::hard_link(&tmpdir.join("foo"), &tmpdir.join("bar")) {
+ Ok(..) => panic!("wanted a failure"),
+ Err(..) => {}
+ }
+}
+
+#[test]
+fn chmod_works() {
+ let tmpdir = tmpdir();
+ let file = tmpdir.join("in.txt");
+
+ check!(File::create(&file));
+ let attr = check!(fs::metadata(&file));
+ assert!(!attr.permissions().readonly());
+ let mut p = attr.permissions();
+ p.set_readonly(true);
+ check!(fs::set_permissions(&file, p.clone()));
+ let attr = check!(fs::metadata(&file));
+ assert!(attr.permissions().readonly());
+
+ match fs::set_permissions(&tmpdir.join("foo"), p.clone()) {
+ Ok(..) => panic!("wanted an error"),
+ Err(..) => {}
+ }
+
+ p.set_readonly(false);
+ check!(fs::set_permissions(&file, p));
+}
+
+#[test]
+fn fchmod_works() {
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("in.txt");
+
+ let file = check!(File::create(&path));
+ let attr = check!(fs::metadata(&path));
+ assert!(!attr.permissions().readonly());
+ let mut p = attr.permissions();
+ p.set_readonly(true);
+ check!(file.set_permissions(p.clone()));
+ let attr = check!(fs::metadata(&path));
+ assert!(attr.permissions().readonly());
+
+ p.set_readonly(false);
+ check!(file.set_permissions(p));
+}
+
+#[test]
+fn sync_doesnt_kill_anything() {
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("in.txt");
+
+ let mut file = check!(File::create(&path));
+ check!(file.sync_all());
+ check!(file.sync_data());
+ check!(file.write(b"foo"));
+ check!(file.sync_all());
+ check!(file.sync_data());
+}
+
+#[test]
+fn truncate_works() {
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("in.txt");
+
+ let mut file = check!(File::create(&path));
+ check!(file.write(b"foo"));
+ check!(file.sync_all());
+
+ // Do some simple things with truncation
+ assert_eq!(check!(file.metadata()).len(), 3);
+ check!(file.set_len(10));
+ assert_eq!(check!(file.metadata()).len(), 10);
+ check!(file.write(b"bar"));
+ check!(file.sync_all());
+ assert_eq!(check!(file.metadata()).len(), 10);
+
+ let mut v = Vec::new();
+ check!(check!(File::open(&path)).read_to_end(&mut v));
+ assert_eq!(v, b"foobar\0\0\0\0".to_vec());
+
+ // Truncate to a smaller length, don't seek, and then write something.
+ // Ensure that the intermediate zeroes are all filled in (we have `seek`ed
+ // past the end of the file).
+ check!(file.set_len(2));
+ assert_eq!(check!(file.metadata()).len(), 2);
+ check!(file.write(b"wut"));
+ check!(file.sync_all());
+ assert_eq!(check!(file.metadata()).len(), 9);
+ let mut v = Vec::new();
+ check!(check!(File::open(&path)).read_to_end(&mut v));
+ assert_eq!(v, b"fo\0\0\0\0wut".to_vec());
+}
+
+#[test]
+fn open_flavors() {
+ use crate::fs::OpenOptions as OO;
+ fn c<T: Clone>(t: &T) -> T {
+ t.clone()
+ }
+
+ let tmpdir = tmpdir();
+
+ let mut r = OO::new();
+ r.read(true);
+ let mut w = OO::new();
+ w.write(true);
+ let mut rw = OO::new();
+ rw.read(true).write(true);
+ let mut a = OO::new();
+ a.append(true);
+ let mut ra = OO::new();
+ ra.read(true).append(true);
+
+ #[cfg(windows)]
+ let invalid_options = 87; // ERROR_INVALID_PARAMETER
+ #[cfg(all(unix, not(target_os = "vxworks")))]
+ let invalid_options = "Invalid argument";
+ #[cfg(target_os = "vxworks")]
+ let invalid_options = "invalid argument";
+
+ // Test various combinations of creation modes and access modes.
+ //
+ // Allowed:
+ // creation mode | read | write | read-write | append | read-append |
+ // :-----------------------|:-----:|:-----:|:----------:|:------:|:-----------:|
+ // not set (open existing) | X | X | X | X | X |
+ // create | | X | X | X | X |
+ // truncate | | X | X | | |
+ // create and truncate | | X | X | | |
+ // create_new | | X | X | X | X |
+ //
+ // tested in reverse order, so 'create_new' creates the file, and 'open existing' opens it.
+
+ // write-only
+ check!(c(&w).create_new(true).open(&tmpdir.join("a")));
+ check!(c(&w).create(true).truncate(true).open(&tmpdir.join("a")));
+ check!(c(&w).truncate(true).open(&tmpdir.join("a")));
+ check!(c(&w).create(true).open(&tmpdir.join("a")));
+ check!(c(&w).open(&tmpdir.join("a")));
+
+ // read-only
+ error!(c(&r).create_new(true).open(&tmpdir.join("b")), invalid_options);
+ error!(c(&r).create(true).truncate(true).open(&tmpdir.join("b")), invalid_options);
+ error!(c(&r).truncate(true).open(&tmpdir.join("b")), invalid_options);
+ error!(c(&r).create(true).open(&tmpdir.join("b")), invalid_options);
+ check!(c(&r).open(&tmpdir.join("a"))); // try opening the file created with write_only
+
+ // read-write
+ check!(c(&rw).create_new(true).open(&tmpdir.join("c")));
+ check!(c(&rw).create(true).truncate(true).open(&tmpdir.join("c")));
+ check!(c(&rw).truncate(true).open(&tmpdir.join("c")));
+ check!(c(&rw).create(true).open(&tmpdir.join("c")));
+ check!(c(&rw).open(&tmpdir.join("c")));
+
+ // append
+ check!(c(&a).create_new(true).open(&tmpdir.join("d")));
+ error!(c(&a).create(true).truncate(true).open(&tmpdir.join("d")), invalid_options);
+ error!(c(&a).truncate(true).open(&tmpdir.join("d")), invalid_options);
+ check!(c(&a).create(true).open(&tmpdir.join("d")));
+ check!(c(&a).open(&tmpdir.join("d")));
+
+ // read-append
+ check!(c(&ra).create_new(true).open(&tmpdir.join("e")));
+ error!(c(&ra).create(true).truncate(true).open(&tmpdir.join("e")), invalid_options);
+ error!(c(&ra).truncate(true).open(&tmpdir.join("e")), invalid_options);
+ check!(c(&ra).create(true).open(&tmpdir.join("e")));
+ check!(c(&ra).open(&tmpdir.join("e")));
+
+ // Test opening a file without setting an access mode
+ let mut blank = OO::new();
+ error!(blank.create(true).open(&tmpdir.join("f")), invalid_options);
+
+ // Test write works
+ check!(check!(File::create(&tmpdir.join("h"))).write("foobar".as_bytes()));
+
+ // Test write fails for read-only
+ check!(r.open(&tmpdir.join("h")));
+ {
+ let mut f = check!(r.open(&tmpdir.join("h")));
+ assert!(f.write("wut".as_bytes()).is_err());
+ }
+
+ // Test write overwrites
+ {
+ let mut f = check!(c(&w).open(&tmpdir.join("h")));
+ check!(f.write("baz".as_bytes()));
+ }
+ {
+ let mut f = check!(c(&r).open(&tmpdir.join("h")));
+ let mut b = vec![0; 6];
+ check!(f.read(&mut b));
+ assert_eq!(b, "bazbar".as_bytes());
+ }
+
+ // Test truncate works
+ {
+ let mut f = check!(c(&w).truncate(true).open(&tmpdir.join("h")));
+ check!(f.write("foo".as_bytes()));
+ }
+ assert_eq!(check!(fs::metadata(&tmpdir.join("h"))).len(), 3);
+
+ // Test append works
+ assert_eq!(check!(fs::metadata(&tmpdir.join("h"))).len(), 3);
+ {
+ let mut f = check!(c(&a).open(&tmpdir.join("h")));
+ check!(f.write("bar".as_bytes()));
+ }
+ assert_eq!(check!(fs::metadata(&tmpdir.join("h"))).len(), 6);
+
+ // Test .append(true) equals .write(true).append(true)
+ {
+ let mut f = check!(c(&w).append(true).open(&tmpdir.join("h")));
+ check!(f.write("baz".as_bytes()));
+ }
+ assert_eq!(check!(fs::metadata(&tmpdir.join("h"))).len(), 9);
+}
+
+#[test]
+fn _assert_send_sync() {
+ fn _assert_send_sync<T: Send + Sync>() {}
+ _assert_send_sync::<OpenOptions>();
+}
+
+#[test]
+fn binary_file() {
+ let mut bytes = [0; 1024];
+ StdRng::from_entropy().fill_bytes(&mut bytes);
+
+ let tmpdir = tmpdir();
+
+ check!(check!(File::create(&tmpdir.join("test"))).write(&bytes));
+ let mut v = Vec::new();
+ check!(check!(File::open(&tmpdir.join("test"))).read_to_end(&mut v));
+ assert!(v == &bytes[..]);
+}
+
+#[test]
+fn write_then_read() {
+ let mut bytes = [0; 1024];
+ StdRng::from_entropy().fill_bytes(&mut bytes);
+
+ let tmpdir = tmpdir();
+
+ check!(fs::write(&tmpdir.join("test"), &bytes[..]));
+ let v = check!(fs::read(&tmpdir.join("test")));
+ assert!(v == &bytes[..]);
+
+ check!(fs::write(&tmpdir.join("not-utf8"), &[0xFF]));
+ error_contains!(
+ fs::read_to_string(&tmpdir.join("not-utf8")),
+ "stream did not contain valid UTF-8"
+ );
+
+ let s = "𐁁𐀓𐀠𐀴𐀍";
+ check!(fs::write(&tmpdir.join("utf8"), s.as_bytes()));
+ let string = check!(fs::read_to_string(&tmpdir.join("utf8")));
+ assert_eq!(string, s);
+}
+
+#[test]
+fn file_try_clone() {
+ let tmpdir = tmpdir();
+
+ let mut f1 =
+ check!(OpenOptions::new().read(true).write(true).create(true).open(&tmpdir.join("test")));
+ let mut f2 = check!(f1.try_clone());
+
+ check!(f1.write_all(b"hello world"));
+ check!(f1.seek(SeekFrom::Start(2)));
+
+ let mut buf = vec![];
+ check!(f2.read_to_end(&mut buf));
+ assert_eq!(buf, b"llo world");
+ drop(f2);
+
+ check!(f1.write_all(b"!"));
+}
+
+#[test]
+#[cfg(not(windows))]
+fn unlink_readonly() {
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("file");
+ check!(File::create(&path));
+ let mut perm = check!(fs::metadata(&path)).permissions();
+ perm.set_readonly(true);
+ check!(fs::set_permissions(&path, perm));
+ check!(fs::remove_file(&path));
+}
+
+#[test]
+fn mkdir_trailing_slash() {
+ let tmpdir = tmpdir();
+ let path = tmpdir.join("file");
+ check!(fs::create_dir_all(&path.join("a/")));
+}
+
+#[test]
+fn canonicalize_works_simple() {
+ let tmpdir = tmpdir();
+ let tmpdir = fs::canonicalize(tmpdir.path()).unwrap();
+ let file = tmpdir.join("test");
+ File::create(&file).unwrap();
+ assert_eq!(fs::canonicalize(&file).unwrap(), file);
+}
+
+#[test]
+fn realpath_works() {
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+
+ let tmpdir = fs::canonicalize(tmpdir.path()).unwrap();
+ let file = tmpdir.join("test");
+ let dir = tmpdir.join("test2");
+ let link = dir.join("link");
+ let linkdir = tmpdir.join("test3");
+
+ File::create(&file).unwrap();
+ fs::create_dir(&dir).unwrap();
+ symlink_file(&file, &link).unwrap();
+ symlink_dir(&dir, &linkdir).unwrap();
+
+ assert!(link.symlink_metadata().unwrap().file_type().is_symlink());
+
+ assert_eq!(fs::canonicalize(&tmpdir).unwrap(), tmpdir);
+ assert_eq!(fs::canonicalize(&file).unwrap(), file);
+ assert_eq!(fs::canonicalize(&link).unwrap(), file);
+ assert_eq!(fs::canonicalize(&linkdir).unwrap(), dir);
+ assert_eq!(fs::canonicalize(&linkdir.join("link")).unwrap(), file);
+}
+
+#[test]
+fn realpath_works_tricky() {
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+
+ let tmpdir = fs::canonicalize(tmpdir.path()).unwrap();
+ let a = tmpdir.join("a");
+ let b = a.join("b");
+ let c = b.join("c");
+ let d = a.join("d");
+ let e = d.join("e");
+ let f = a.join("f");
+
+ fs::create_dir_all(&b).unwrap();
+ fs::create_dir_all(&d).unwrap();
+ File::create(&f).unwrap();
+ if cfg!(not(windows)) {
+ symlink_file("../d/e", &c).unwrap();
+ symlink_file("../f", &e).unwrap();
+ }
+ if cfg!(windows) {
+ symlink_file(r"..\d\e", &c).unwrap();
+ symlink_file(r"..\f", &e).unwrap();
+ }
+
+ assert_eq!(fs::canonicalize(&c).unwrap(), f);
+ assert_eq!(fs::canonicalize(&e).unwrap(), f);
+}
+
+#[test]
+fn dir_entry_methods() {
+ let tmpdir = tmpdir();
+
+ fs::create_dir_all(&tmpdir.join("a")).unwrap();
+ File::create(&tmpdir.join("b")).unwrap();
+
+ for file in tmpdir.path().read_dir().unwrap().map(|f| f.unwrap()) {
+ let fname = file.file_name();
+ match fname.to_str() {
+ Some("a") => {
+ assert!(file.file_type().unwrap().is_dir());
+ assert!(file.metadata().unwrap().is_dir());
+ }
+ Some("b") => {
+ assert!(file.file_type().unwrap().is_file());
+ assert!(file.metadata().unwrap().is_file());
+ }
+ f => panic!("unknown file name: {f:?}"),
+ }
+ }
+}
+
+#[test]
+fn dir_entry_debug() {
+ let tmpdir = tmpdir();
+ File::create(&tmpdir.join("b")).unwrap();
+ let mut read_dir = tmpdir.path().read_dir().unwrap();
+ let dir_entry = read_dir.next().unwrap().unwrap();
+ let actual = format!("{dir_entry:?}");
+ let expected = format!("DirEntry({:?})", dir_entry.0.path());
+ assert_eq!(actual, expected);
+}
+
+#[test]
+fn read_dir_not_found() {
+ let res = fs::read_dir("/path/that/does/not/exist");
+ assert_eq!(res.err().unwrap().kind(), ErrorKind::NotFound);
+}
+
+#[test]
+fn file_open_not_found() {
+ let res = File::open("/path/that/does/not/exist");
+ assert_eq!(res.err().unwrap().kind(), ErrorKind::NotFound);
+}
+
+#[test]
+fn create_dir_all_with_junctions() {
+ let tmpdir = tmpdir();
+ let target = tmpdir.join("target");
+
+ let junction = tmpdir.join("junction");
+ let b = junction.join("a/b");
+
+ let link = tmpdir.join("link");
+ let d = link.join("c/d");
+
+ fs::create_dir(&target).unwrap();
+
+ check!(symlink_junction(&target, &junction));
+ check!(fs::create_dir_all(&b));
+ // the junction itself is not a directory, but `is_dir()` on a Path
+ // follows links
+ assert!(junction.is_dir());
+ assert!(b.exists());
+
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+ check!(symlink_dir(&target, &link));
+ check!(fs::create_dir_all(&d));
+ assert!(link.is_dir());
+ assert!(d.exists());
+}
+
+#[test]
+fn metadata_access_times() {
+ let tmpdir = tmpdir();
+
+ let b = tmpdir.join("b");
+ File::create(&b).unwrap();
+
+ let a = check!(fs::metadata(&tmpdir.path()));
+ let b = check!(fs::metadata(&b));
+
+ assert_eq!(check!(a.accessed()), check!(a.accessed()));
+ assert_eq!(check!(a.modified()), check!(a.modified()));
+ assert_eq!(check!(b.accessed()), check!(b.modified()));
+
+ if cfg!(target_os = "macos") || cfg!(target_os = "windows") {
+ check!(a.created());
+ check!(b.created());
+ }
+
+ if cfg!(target_os = "linux") {
+ // Not always available
+ match (a.created(), b.created()) {
+ (Ok(t1), Ok(t2)) => assert!(t1 <= t2),
+ (Err(e1), Err(e2))
+ if e1.kind() == ErrorKind::Uncategorized
+ && e2.kind() == ErrorKind::Uncategorized
+ || e1.kind() == ErrorKind::Unsupported
+ && e2.kind() == ErrorKind::Unsupported => {}
+ (a, b) => {
+ panic!("creation time must be always supported or not supported: {a:?} {b:?}")
+ }
+ }
+ }
+}
+
+/// Test creating hard links to symlinks.
+#[test]
+fn symlink_hard_link() {
+ let tmpdir = tmpdir();
+ if !got_symlink_permission(&tmpdir) {
+ return;
+ };
+ if !able_to_not_follow_symlinks_while_hard_linking() {
+ return;
+ }
+
+ // Create "file", a file.
+ check!(fs::File::create(tmpdir.join("file")));
+
+ // Create "symlink", a symlink to "file".
+ check!(symlink_file("file", tmpdir.join("symlink")));
+
+ // Create "hard_link", a hard link to "symlink".
+ check!(fs::hard_link(tmpdir.join("symlink"), tmpdir.join("hard_link")));
+
+ // "hard_link" should appear as a symlink.
+ assert!(check!(fs::symlink_metadata(tmpdir.join("hard_link"))).file_type().is_symlink());
+
+ // We should be able to open "file" via any of the above names.
+ let _ = check!(fs::File::open(tmpdir.join("file")));
+ assert!(fs::File::open(tmpdir.join("file.renamed")).is_err());
+ let _ = check!(fs::File::open(tmpdir.join("symlink")));
+ let _ = check!(fs::File::open(tmpdir.join("hard_link")));
+
+ // Rename "file" to "file.renamed".
+ check!(fs::rename(tmpdir.join("file"), tmpdir.join("file.renamed")));
+
+ // Now, the symlink and the hard link should be dangling.
+ assert!(fs::File::open(tmpdir.join("file")).is_err());
+ let _ = check!(fs::File::open(tmpdir.join("file.renamed")));
+ assert!(fs::File::open(tmpdir.join("symlink")).is_err());
+ assert!(fs::File::open(tmpdir.join("hard_link")).is_err());
+
+ // The symlink and the hard link should both still point to "file".
+ assert!(fs::read_link(tmpdir.join("file")).is_err());
+ assert!(fs::read_link(tmpdir.join("file.renamed")).is_err());
+ assert_eq!(check!(fs::read_link(tmpdir.join("symlink"))), Path::new("file"));
+ assert_eq!(check!(fs::read_link(tmpdir.join("hard_link"))), Path::new("file"));
+
+ // Remove "file.renamed".
+ check!(fs::remove_file(tmpdir.join("file.renamed")));
+
+ // Now, we can't open the file by any name.
+ assert!(fs::File::open(tmpdir.join("file")).is_err());
+ assert!(fs::File::open(tmpdir.join("file.renamed")).is_err());
+ assert!(fs::File::open(tmpdir.join("symlink")).is_err());
+ assert!(fs::File::open(tmpdir.join("hard_link")).is_err());
+
+ // "hard_link" should still appear as a symlink.
+ assert!(check!(fs::symlink_metadata(tmpdir.join("hard_link"))).file_type().is_symlink());
+}
+
+/// Ensure `fs::create_dir` works on Windows with longer paths.
+#[test]
+#[cfg(windows)]
+fn create_dir_long_paths() {
+ use crate::{ffi::OsStr, iter, os::windows::ffi::OsStrExt};
+ const PATH_LEN: usize = 247;
+
+ let tmpdir = tmpdir();
+ let mut path = tmpdir.path().to_path_buf();
+ path.push("a");
+ let mut path = path.into_os_string();
+
+ let utf16_len = path.encode_wide().count();
+ if utf16_len >= PATH_LEN {
+ // Skip the test in the unlikely event the local user has a long temp directory path.
+ // This should not affect CI.
+ return;
+ }
+ // Increase the length of the path.
+ path.extend(iter::repeat(OsStr::new("a")).take(PATH_LEN - utf16_len));
+
+ // This should succeed.
+ fs::create_dir(&path).unwrap();
+
+ // This will fail if the path isn't converted to verbatim.
+ path.push("a");
+ fs::create_dir(&path).unwrap();
+
+ // #90940: Ensure an empty path returns the "Not Found" error.
+ let path = Path::new("");
+ assert_eq!(path.canonicalize().unwrap_err().kind(), crate::io::ErrorKind::NotFound);
+}
+
+/// Ensure ReadDir works on large directories.
+/// Regression test for https://github.com/rust-lang/rust/issues/93384.
+#[test]
+fn read_large_dir() {
+ let tmpdir = tmpdir();
+
+ let count = 32 * 1024;
+ for i in 0..count {
+ check!(fs::File::create(tmpdir.join(&i.to_string())));
+ }
+
+ for entry in fs::read_dir(tmpdir.path()).unwrap() {
+ entry.unwrap();
+ }
+}
+
+/// Test the fallback for getting the metadata of files like hiberfil.sys that
+/// Windows holds a special lock on, preventing normal means of querying
+/// metadata. See #96980.
+///
+/// Note this fails in CI because `hiberfil.sys` does not actually exist there.
+/// Therefore it's marked as ignored.
+#[test]
+#[ignore]
+#[cfg(windows)]
+fn hiberfil_sys() {
+ let hiberfil = Path::new(r"C:\hiberfil.sys");
+ assert_eq!(true, hiberfil.try_exists().unwrap());
+ fs::symlink_metadata(hiberfil).unwrap();
+ fs::metadata(hiberfil).unwrap();
+ assert_eq!(true, hiberfil.exists());
+}
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
new file mode 100644
index 000000000..f7fbaa9c2
--- /dev/null
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -0,0 +1,496 @@
+mod buffer;
+
+use crate::fmt;
+use crate::io::{
+ self, BufRead, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
+};
+use buffer::Buffer;
+
+/// The `BufReader<R>` struct adds buffering to any reader.
+///
+/// It can be excessively inefficient to work directly with a [`Read`] instance.
+/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
+/// results in a system call. A `BufReader<R>` performs large, infrequent reads on
+/// the underlying [`Read`] and maintains an in-memory buffer of the results.
+///
+/// `BufReader<R>` can improve the speed of programs that make *small* and
+/// *repeated* read calls to the same file or network socket. It does not
+/// help when reading very large amounts at once, or reading just one or a few
+/// times. It also provides no advantage when reading from a source that is
+/// already in memory, like a <code>[Vec]\<u8></code>.
+///
+/// When the `BufReader<R>` is dropped, the contents of its buffer will be
+/// discarded. Creating multiple instances of a `BufReader<R>` on the same
+/// stream can cause data loss. Reading from the underlying reader after
+/// unwrapping the `BufReader<R>` with [`BufReader::into_inner`] can also cause
+/// data loss.
+///
+// HACK(#78696): can't use `crate` for associated items
+/// [`TcpStream::read`]: super::super::super::net::TcpStream::read
+/// [`TcpStream`]: crate::net::TcpStream
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::io::BufReader;
+/// use std::fs::File;
+///
+/// fn main() -> std::io::Result<()> {
+/// let f = File::open("log.txt")?;
+/// let mut reader = BufReader::new(f);
+///
+/// let mut line = String::new();
+/// let len = reader.read_line(&mut line)?;
+/// println!("First line is {len} bytes long");
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct BufReader<R> {
+ inner: R,
+ buf: Buffer,
+}
+
+impl<R: Read> BufReader<R> {
+ /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::open("log.txt")?;
+ /// let reader = BufReader::new(f);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(inner: R) -> BufReader<R> {
+ BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufReader<R>` with the specified buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// Creating a buffer with ten bytes of capacity:
+ ///
+ /// ```no_run
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::open("log.txt")?;
+ /// let reader = BufReader::with_capacity(10, f);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
+ BufReader { inner, buf: Buffer::with_capacity(capacity) }
+ }
+}
+
+impl<R> BufReader<R> {
+ /// Gets a reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f1 = File::open("log.txt")?;
+ /// let reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.get_ref();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_ref(&self) -> &R {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f1 = File::open("log.txt")?;
+ /// let mut reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.get_mut();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut R {
+ &mut self.inner
+ }
+
+ /// Returns a reference to the internally buffered data.
+ ///
+ /// Unlike [`fill_buf`], this will not attempt to fill the buffer if it is empty.
+ ///
+ /// [`fill_buf`]: BufRead::fill_buf
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::{BufReader, BufRead};
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::open("log.txt")?;
+ /// let mut reader = BufReader::new(f);
+ /// assert!(reader.buffer().is_empty());
+ ///
+ /// if reader.fill_buf()?.len() > 0 {
+ /// assert!(!reader.buffer().is_empty());
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "bufreader_buffer", since = "1.37.0")]
+ pub fn buffer(&self) -> &[u8] {
+ self.buf.buffer()
+ }
+
+ /// Returns the number of bytes the internal buffer can hold at once.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::{BufReader, BufRead};
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::open("log.txt")?;
+ /// let mut reader = BufReader::new(f);
+ ///
+ /// let capacity = reader.capacity();
+ /// let buffer = reader.fill_buf()?;
+ /// assert!(buffer.len() <= capacity);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Unwraps this `BufReader<R>`, returning the underlying reader.
+ ///
+ /// Note that any leftover data in the internal buffer is lost. Therefore,
+ /// a following read from the underlying reader may lead to data loss.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufReader;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f1 = File::open("log.txt")?;
+ /// let reader = BufReader::new(f1);
+ ///
+ /// let f2 = reader.into_inner();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+
+ /// Invalidates all data in the internal buffer.
+ #[inline]
+ fn discard_buffer(&mut self) {
+ self.buf.discard_buffer()
+ }
+}
+
+impl<R: Seek> BufReader<R> {
+ /// Seeks relative to the current position. If the new position lies within the buffer,
+ /// the buffer will not be flushed, allowing for more efficient seeks.
+ /// This method does not return the location of the underlying reader, so the caller
+ /// must track this information themselves if it is required.
+ #[stable(feature = "bufreader_seek_relative", since = "1.53.0")]
+ pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
+ let pos = self.buf.pos() as u64;
+ if offset < 0 {
+ if let Some(_) = pos.checked_sub((-offset) as u64) {
+ self.buf.unconsume((-offset) as usize);
+ return Ok(());
+ }
+ } else if let Some(new_pos) = pos.checked_add(offset as u64) {
+ if new_pos <= self.buf.filled() as u64 {
+ self.buf.consume(offset as usize);
+ return Ok(());
+ }
+ }
+
+ self.seek(SeekFrom::Current(offset)).map(drop)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Read> Read for BufReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ // If we don't have any buffered data and we're doing a massive read
+ // (larger than our internal buffer), bypass our internal buffer
+ // entirely.
+ if self.buf.pos() == self.buf.filled() && buf.len() >= self.capacity() {
+ self.discard_buffer();
+ return self.inner.read(buf);
+ }
+ let nread = {
+ let mut rem = self.fill_buf()?;
+ rem.read(buf)?
+ };
+ self.consume(nread);
+ Ok(nread)
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ // If we don't have any buffered data and we're doing a massive read
+ // (larger than our internal buffer), bypass our internal buffer
+ // entirely.
+ if self.buf.pos() == self.buf.filled() && buf.remaining() >= self.capacity() {
+ self.discard_buffer();
+ return self.inner.read_buf(buf);
+ }
+
+ let prev = buf.filled_len();
+
+ let mut rem = self.fill_buf()?;
+ rem.read_buf(buf)?;
+
+ self.consume(buf.filled_len() - prev); //slice impl of read_buf known to never unfill buf
+
+ Ok(())
+ }
+
+ // Small read_exacts from a BufReader are extremely common when used with a deserializer.
+ // The default implementation calls read in a loop, which results in surprisingly poor code
+ // generation for the common path where the buffer has enough bytes to fill the passed-in
+ // buffer.
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ if self.buf.consume_with(buf.len(), |claimed| buf.copy_from_slice(claimed)) {
+ return Ok(());
+ }
+
+ crate::io::default_read_exact(self, buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
+ if self.buf.pos() == self.buf.filled() && total_len >= self.capacity() {
+ self.discard_buffer();
+ return self.inner.read_vectored(bufs);
+ }
+ let nread = {
+ let mut rem = self.fill_buf()?;
+ rem.read_vectored(bufs)?
+ };
+ self.consume(nread);
+ Ok(nread)
+ }
+
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ // The inner reader might have an optimized `read_to_end`. Drain our buffer and then
+ // delegate to the inner implementation.
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ let inner_buf = self.buffer();
+ buf.extend_from_slice(inner_buf);
+ let nread = inner_buf.len();
+ self.discard_buffer();
+ Ok(nread + self.inner.read_to_end(buf)?)
+ }
+
+ // The inner reader might have an optimized `read_to_end`. Drain our buffer and then
+ // delegate to the inner implementation.
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ // In the general `else` case below we must read bytes into a side buffer, check
+ // that they are valid UTF-8, and then append them to `buf`. This requires a
+ // potentially large memcpy.
+ //
+ // If `buf` is empty--the most common case--we can leverage `append_to_string`
+ // to read directly into `buf`'s internal byte buffer, saving an allocation and
+ // a memcpy.
+ if buf.is_empty() {
+ // `append_to_string`'s safety relies on the buffer only being appended to since
+ // it only checks the UTF-8 validity of new data. If there were existing content in
+ // `buf` then an untrustworthy reader (i.e. `self.inner`) could not only append
+ // bytes but also modify existing bytes and render them invalid. On the other hand,
+ // if `buf` is empty then by definition any writes must be appends and
+ // `append_to_string` will validate all of the new bytes.
+ unsafe { crate::io::append_to_string(buf, |b| self.read_to_end(b)) }
+ } else {
+ // We cannot append our byte buffer directly onto the `buf` String as there could
+ // be an incomplete UTF-8 sequence that has only been partially read. We must read
+ // everything into a side buffer first and then call `from_utf8` on the complete
+ // buffer.
+ let mut bytes = Vec::new();
+ self.read_to_end(&mut bytes)?;
+ let string = crate::str::from_utf8(&bytes).map_err(|_| {
+ io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "stream did not contain valid UTF-8",
+ )
+ })?;
+ *buf += string;
+ Ok(string.len())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Read> BufRead for BufReader<R> {
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ self.buf.fill_buf(&mut self.inner)
+ }
+
+ fn consume(&mut self, amt: usize) {
+ self.buf.consume(amt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R> fmt::Debug for BufReader<R>
+where
+ R: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("BufReader")
+ .field("reader", &self.inner)
+ .field(
+ "buffer",
+ &format_args!("{}/{}", self.buf.filled() - self.buf.pos(), self.capacity()),
+ )
+ .finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Seek> Seek for BufReader<R> {
+ /// Seek to an offset, in bytes, in the underlying reader.
+ ///
+ /// The position used for seeking with <code>[SeekFrom::Current]\(_)</code> is the
+ /// position the underlying reader would be at if the `BufReader<R>` had no
+ /// internal buffer.
+ ///
+ /// Seeking always discards the internal buffer, even if the seek position
+ /// would otherwise fall within it. This guarantees that calling
+ /// [`BufReader::into_inner()`] immediately after a seek yields the underlying reader
+ /// at the same position.
+ ///
+ /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
+ ///
+ /// See [`std::io::Seek`] for more details.
+ ///
+ /// Note: In the edge case where you're seeking with <code>[SeekFrom::Current]\(n)</code>
+ /// where `n` minus the internal buffer length overflows an `i64`, two
+ /// seeks will be performed instead of one. If the second seek returns
+ /// [`Err`], the underlying reader will be left at the same position it would
+ /// have if you called `seek` with <code>[SeekFrom::Current]\(0)</code>.
+ ///
+ /// [`std::io::Seek`]: Seek
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ let result: u64;
+ if let SeekFrom::Current(n) = pos {
+ let remainder = (self.buf.filled() - self.buf.pos()) as i64;
+ // it should be safe to assume that remainder fits within an i64 as the alternative
+ // means we managed to allocate 8 exbibytes and that's absurd.
+ // But it's not out of the realm of possibility for some weird underlying reader to
+ // support seeking by i64::MIN so we need to handle underflow when subtracting
+ // remainder.
+ if let Some(offset) = n.checked_sub(remainder) {
+ result = self.inner.seek(SeekFrom::Current(offset))?;
+ } else {
+ // seek backwards by our remainder, and then by the offset
+ self.inner.seek(SeekFrom::Current(-remainder))?;
+ self.discard_buffer();
+ result = self.inner.seek(SeekFrom::Current(n))?;
+ }
+ } else {
+ // Seeking with Start/End doesn't care about our buffer length.
+ result = self.inner.seek(pos)?;
+ }
+ self.discard_buffer();
+ Ok(result)
+ }
+
+ /// Returns the current seek position from the start of the stream.
+ ///
+ /// The value returned is equivalent to `self.seek(SeekFrom::Current(0))`
+ /// but does not flush the internal buffer. Due to this optimization the
+ /// function does not guarantee that calling `.into_inner()` immediately
+ /// afterwards will yield the underlying reader at the same position. Use
+ /// [`BufReader::seek`] instead if you require that guarantee.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the position of the inner reader is smaller
+ /// than the amount of buffered data. That can happen if the inner reader
+ /// has an incorrect implementation of [`Seek::stream_position`], or if the
+ /// position has gone out of sync due to calling [`Seek::seek`] directly on
+ /// the underlying reader.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::{
+ /// io::{self, BufRead, BufReader, Seek},
+ /// fs::File,
+ /// };
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = BufReader::new(File::open("foo.txt")?);
+ ///
+ /// let before = f.stream_position()?;
+ /// f.read_line(&mut String::new())?;
+ /// let after = f.stream_position()?;
+ ///
+ /// println!("The first line was {} bytes long", after - before);
+ /// Ok(())
+ /// }
+ /// ```
+ fn stream_position(&mut self) -> io::Result<u64> {
+ let remainder = (self.buf.filled() - self.buf.pos()) as u64;
+ self.inner.stream_position().map(|pos| {
+ pos.checked_sub(remainder).expect(
+ "overflow when subtracting remaining buffer size from inner stream position",
+ )
+ })
+ }
+}
+
+impl<T> SizeHint for BufReader<T> {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ SizeHint::lower_bound(self.get_ref()) + self.buffer().len()
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ SizeHint::upper_bound(self.get_ref()).and_then(|up| self.buffer().len().checked_add(up))
+ }
+}
diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs
new file mode 100644
index 000000000..8ae01f3b0
--- /dev/null
+++ b/library/std/src/io/buffered/bufreader/buffer.rs
@@ -0,0 +1,105 @@
+///! An encapsulation of `BufReader`'s buffer management logic.
+///
+/// This module factors out the basic functionality of `BufReader` in order to protect two core
+/// invariants:
+/// * `filled` bytes of `buf` are always initialized
+/// * `pos` is always <= `filled`
+/// Since this module encapsulates the buffer management logic, we can ensure that the range
+/// `pos..filled` is always a valid index into the initialized region of the buffer. This means
+/// that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
+/// without encountering any runtime bounds checks.
+use crate::cmp;
+use crate::io::{self, Read, ReadBuf};
+use crate::mem::MaybeUninit;
+
+pub struct Buffer {
+ // The buffer.
+ buf: Box<[MaybeUninit<u8>]>,
+ // The current seek offset into `buf`, must always be <= `filled`.
+ pos: usize,
+ // Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
+ // initialized with bytes from a read.
+ filled: usize,
+}
+
+impl Buffer {
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ let buf = Box::new_uninit_slice(capacity);
+ Self { buf, pos: 0, filled: 0 }
+ }
+
+ #[inline]
+ pub fn buffer(&self) -> &[u8] {
+ // SAFETY: self.pos and self.cap are valid, and self.cap => self.pos, and
+ // that region is initialized because those are all invariants of this type.
+ unsafe { MaybeUninit::slice_assume_init_ref(self.buf.get_unchecked(self.pos..self.filled)) }
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.buf.len()
+ }
+
+ #[inline]
+ pub fn filled(&self) -> usize {
+ self.filled
+ }
+
+ #[inline]
+ pub fn pos(&self) -> usize {
+ self.pos
+ }
+
+ #[inline]
+ pub fn discard_buffer(&mut self) {
+ self.pos = 0;
+ self.filled = 0;
+ }
+
+ #[inline]
+ pub fn consume(&mut self, amt: usize) {
+ self.pos = cmp::min(self.pos + amt, self.filled);
+ }
+
+ /// If there are `amt` bytes available in the buffer, pass a slice containing those bytes to
+ /// `visitor` and return true. If there are not enough bytes available, return false.
+ #[inline]
+ pub fn consume_with<V>(&mut self, amt: usize, mut visitor: V) -> bool
+ where
+ V: FnMut(&[u8]),
+ {
+ if let Some(claimed) = self.buffer().get(..amt) {
+ visitor(claimed);
+ // If the indexing into self.buffer() succeeds, amt must be a valid increment.
+ self.pos += amt;
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub fn unconsume(&mut self, amt: usize) {
+ self.pos = self.pos.saturating_sub(amt);
+ }
+
+ #[inline]
+ pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> {
+ // If we've reached the end of our internal buffer then we need to fetch
+ // some more data from the reader.
+ // Branch using `>=` instead of the more correct `==`
+ // to tell the compiler that the pos..cap slice is always valid.
+ if self.pos >= self.filled {
+ debug_assert!(self.pos == self.filled);
+
+ let mut readbuf = ReadBuf::uninit(&mut self.buf);
+
+ reader.read_buf(&mut readbuf)?;
+
+ self.filled = readbuf.filled_len();
+ self.pos = 0;
+ }
+ Ok(self.buffer())
+ }
+}
diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs
new file mode 100644
index 000000000..6acb937e7
--- /dev/null
+++ b/library/std/src/io/buffered/bufwriter.rs
@@ -0,0 +1,674 @@
+use crate::error;
+use crate::fmt;
+use crate::io::{
+ self, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE,
+};
+use crate::mem;
+use crate::ptr;
+
+/// Wraps a writer and buffers its output.
+///
+/// It can be excessively inefficient to work directly with something that
+/// implements [`Write`]. For example, every call to
+/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
+/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
+/// writer in large, infrequent batches.
+///
+/// `BufWriter<W>` can improve the speed of programs that make *small* and
+/// *repeated* write calls to the same file or network socket. It does not
+/// help when writing very large amounts at once, or writing just one or a few
+/// times. It also provides no advantage when writing to a destination that is
+/// in memory, like a <code>[Vec]\<u8></code>.
+///
+/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
+/// dropping will attempt to flush the contents of the buffer, any errors
+/// that happen in the process of dropping will be ignored. Calling [`flush`]
+/// ensures that the buffer is empty and thus dropping will not even attempt
+/// file operations.
+///
+/// # Examples
+///
+/// Let's write the numbers one through ten to a [`TcpStream`]:
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::net::TcpStream;
+///
+/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
+///
+/// for i in 0..10 {
+/// stream.write(&[i+1]).unwrap();
+/// }
+/// ```
+///
+/// Because we're not buffering, we write each one in turn, incurring the
+/// overhead of a system call per byte written. We can fix this with a
+/// `BufWriter<W>`:
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::io::BufWriter;
+/// use std::net::TcpStream;
+///
+/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+///
+/// for i in 0..10 {
+/// stream.write(&[i+1]).unwrap();
+/// }
+/// stream.flush().unwrap();
+/// ```
+///
+/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
+/// together by the buffer and will all be written out in one system call when
+/// the `stream` is flushed.
+///
+// HACK(#78696): can't use `crate` for associated items
+/// [`TcpStream::write`]: super::super::super::net::TcpStream::write
+/// [`TcpStream`]: crate::net::TcpStream
+/// [`flush`]: BufWriter::flush
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct BufWriter<W: Write> {
+ inner: W,
+ // The buffer. Avoid using this like a normal `Vec` in common code paths.
+ // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
+ // methods that require bounds checking or the like. This makes an enormous
+ // difference to performance (we may want to stop using a `Vec` entirely).
+ buf: Vec<u8>,
+ // #30888: If the inner writer panics in a call to write, we don't want to
+ // write the buffered data a second time in BufWriter's destructor. This
+ // flag tells the Drop impl if it should skip the flush.
+ panicked: bool,
+}
+
+impl<W: Write> BufWriter<W> {
+ /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(inner: W) -> BufWriter<W> {
+ BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
+ ///
+ /// # Examples
+ ///
+ /// Creating a buffer with a buffer of at least a hundred bytes.
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
+ /// let mut buffer = BufWriter::with_capacity(100, stream);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
+ BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false }
+ }
+
+ /// Send data in our local buffer into the inner writer, looping as
+ /// necessary until either it's all been sent or an error occurs.
+ ///
+ /// Because all the data in the buffer has been reported to our owner as
+ /// "successfully written" (by returning nonzero success values from
+ /// `write`), any 0-length writes from `inner` must be reported as i/o
+ /// errors from this method.
+ pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
+ /// Helper struct to ensure the buffer is updated after all the writes
+ /// are complete. It tracks the number of written bytes and drains them
+ /// all from the front of the buffer when dropped.
+ struct BufGuard<'a> {
+ buffer: &'a mut Vec<u8>,
+ written: usize,
+ }
+
+ impl<'a> BufGuard<'a> {
+ fn new(buffer: &'a mut Vec<u8>) -> Self {
+ Self { buffer, written: 0 }
+ }
+
+ /// The unwritten part of the buffer
+ fn remaining(&self) -> &[u8] {
+ &self.buffer[self.written..]
+ }
+
+ /// Flag some bytes as removed from the front of the buffer
+ fn consume(&mut self, amt: usize) {
+ self.written += amt;
+ }
+
+ /// true if all of the bytes have been written
+ fn done(&self) -> bool {
+ self.written >= self.buffer.len()
+ }
+ }
+
+ impl Drop for BufGuard<'_> {
+ fn drop(&mut self) {
+ if self.written > 0 {
+ self.buffer.drain(..self.written);
+ }
+ }
+ }
+
+ let mut guard = BufGuard::new(&mut self.buf);
+ while !guard.done() {
+ self.panicked = true;
+ let r = self.inner.write(guard.remaining());
+ self.panicked = false;
+
+ match r {
+ Ok(0) => {
+ return Err(io::const_io_error!(
+ ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ }
+ Ok(n) => guard.consume(n),
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(())
+ }
+
+ /// Buffer some data without flushing it, regardless of the size of the
+ /// data. Writes as much as possible without exceeding capacity. Returns
+ /// the number of bytes written.
+ pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
+ let available = self.spare_capacity();
+ let amt_to_buffer = available.min(buf.len());
+
+ // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
+ unsafe {
+ self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
+ }
+
+ amt_to_buffer
+ }
+
+ /// Gets a reference to the underlying writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // we can use reference just like buffer
+ /// let reference = buffer.get_ref();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_ref(&self) -> &W {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying writer.
+ ///
+ /// It is inadvisable to directly write to the underlying writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // we can use reference just like buffer
+ /// let reference = buffer.get_mut();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut W {
+ &mut self.inner
+ }
+
+ /// Returns a reference to the internally buffered data.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // See how many bytes are currently buffered
+ /// let bytes_buffered = buf_writer.buffer().len();
+ /// ```
+ #[stable(feature = "bufreader_buffer", since = "1.37.0")]
+ pub fn buffer(&self) -> &[u8] {
+ &self.buf
+ }
+
+ /// Returns a mutable reference to the internal buffer.
+ ///
+ /// This can be used to write data directly into the buffer without triggering writers
+ /// to the underlying writer.
+ ///
+ /// That the buffer is a `Vec` is an implementation detail.
+ /// Callers should not modify the capacity as there currently is no public API to do so
+ /// and thus any capacity changes would be unexpected by the user.
+ pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
+ &mut self.buf
+ }
+
+ /// Returns the number of bytes the internal buffer can hold without flushing.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // Check the capacity of the inner buffer
+ /// let capacity = buf_writer.capacity();
+ /// // Calculate how many bytes can be written without flushing
+ /// let without_flush = capacity - buf_writer.buffer().len();
+ /// ```
+ #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Unwraps this `BufWriter<W>`, returning the underlying writer.
+ ///
+ /// The buffer is written out before returning the writer.
+ ///
+ /// # Errors
+ ///
+ /// An [`Err`] will be returned if an error occurs while flushing the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // unwrap the TcpStream and flush the buffer
+ /// let stream = buffer.into_inner().unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
+ match self.flush_buf() {
+ Err(e) => Err(IntoInnerError::new(self, e)),
+ Ok(()) => Ok(self.into_parts().0),
+ }
+ }
+
+ /// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
+ /// unwritten data.
+ ///
+ /// If the underlying writer panicked, it is not known what portion of the data was written.
+ /// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
+ /// contents can still be recovered).
+ ///
+ /// `into_parts` makes no attempt to flush data and cannot fail.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{BufWriter, Write};
+ ///
+ /// let mut buffer = [0u8; 10];
+ /// let mut stream = BufWriter::new(buffer.as_mut());
+ /// write!(stream, "too much data").unwrap();
+ /// stream.flush().expect_err("it doesn't fit");
+ /// let (recovered_writer, buffered_data) = stream.into_parts();
+ /// assert_eq!(recovered_writer.len(), 0);
+ /// assert_eq!(&buffered_data.unwrap(), b"ata");
+ /// ```
+ #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+ pub fn into_parts(mut self) -> (W, Result<Vec<u8>, WriterPanicked>) {
+ let buf = mem::take(&mut self.buf);
+ let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
+
+ // SAFETY: forget(self) prevents double dropping inner
+ let inner = unsafe { ptr::read(&mut self.inner) };
+ mem::forget(self);
+
+ (inner, buf)
+ }
+
+ // Ensure this function does not get inlined into `write`, so that it
+ // remains inlineable and its common path remains as short as possible.
+ // If this function ends up being called frequently relative to `write`,
+ // it's likely a sign that the client is using an improperly sized buffer
+ // or their write patterns are somewhat pathological.
+ #[cold]
+ #[inline(never)]
+ fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
+ if buf.len() > self.spare_capacity() {
+ self.flush_buf()?;
+ }
+
+ // Why not len > capacity? To avoid a needless trip through the buffer when the input
+ // exactly fills it. We'd just need to flush it to the underlying writer anyway.
+ if buf.len() >= self.buf.capacity() {
+ self.panicked = true;
+ let r = self.get_mut().write(buf);
+ self.panicked = false;
+ r
+ } else {
+ // Write to the buffer. In this case, we write to the buffer even if it fills it
+ // exactly. Doing otherwise would mean flushing the buffer, then writing this
+ // input to the inner writer, which in many cases would be a worse strategy.
+
+ // SAFETY: There was either enough spare capacity already, or there wasn't and we
+ // flushed the buffer to ensure that there is. In the latter case, we know that there
+ // is because flushing ensured that our entire buffer is spare capacity, and we entered
+ // this block because the input buffer length is less than that capacity. In either
+ // case, it's safe to write the input buffer to our buffer.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(buf.len())
+ }
+ }
+
+ // Ensure this function does not get inlined into `write_all`, so that it
+ // remains inlineable and its common path remains as short as possible.
+ // If this function ends up being called frequently relative to `write_all`,
+ // it's likely a sign that the client is using an improperly sized buffer
+ // or their write patterns are somewhat pathological.
+ #[cold]
+ #[inline(never)]
+ fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
+ // Normally, `write_all` just calls `write` in a loop. We can do better
+ // by calling `self.get_mut().write_all()` directly, which avoids
+ // round trips through the buffer in the event of a series of partial
+ // writes in some circumstances.
+
+ if buf.len() > self.spare_capacity() {
+ self.flush_buf()?;
+ }
+
+ // Why not len > capacity? To avoid a needless trip through the buffer when the input
+ // exactly fills it. We'd just need to flush it to the underlying writer anyway.
+ if buf.len() >= self.buf.capacity() {
+ self.panicked = true;
+ let r = self.get_mut().write_all(buf);
+ self.panicked = false;
+ r
+ } else {
+ // Write to the buffer. In this case, we write to the buffer even if it fills it
+ // exactly. Doing otherwise would mean flushing the buffer, then writing this
+ // input to the inner writer, which in many cases would be a worse strategy.
+
+ // SAFETY: There was either enough spare capacity already, or there wasn't and we
+ // flushed the buffer to ensure that there is. In the latter case, we know that there
+ // is because flushing ensured that our entire buffer is spare capacity, and we entered
+ // this block because the input buffer length is less than that capacity. In either
+ // case, it's safe to write the input buffer to our buffer.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(())
+ }
+ }
+
+ // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
+ // i.e., that input buffer length is less than or equal to spare capacity.
+ #[inline]
+ unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
+ debug_assert!(buf.len() <= self.spare_capacity());
+ let old_len = self.buf.len();
+ let buf_len = buf.len();
+ let src = buf.as_ptr();
+ let dst = self.buf.as_mut_ptr().add(old_len);
+ ptr::copy_nonoverlapping(src, dst, buf_len);
+ self.buf.set_len(old_len + buf_len);
+ }
+
+ #[inline]
+ fn spare_capacity(&self) -> usize {
+ self.buf.capacity() - self.buf.len()
+ }
+}
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+/// Error returned for the buffered data from `BufWriter::into_parts`, when the underlying
+/// writer has previously panicked. Contains the (possibly partly written) buffered data.
+///
+/// # Example
+///
+/// ```
+/// use std::io::{self, BufWriter, Write};
+/// use std::panic::{catch_unwind, AssertUnwindSafe};
+///
+/// struct PanickingWriter;
+/// impl Write for PanickingWriter {
+/// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
+/// fn flush(&mut self) -> io::Result<()> { panic!() }
+/// }
+///
+/// let mut stream = BufWriter::new(PanickingWriter);
+/// write!(stream, "some data").unwrap();
+/// let result = catch_unwind(AssertUnwindSafe(|| {
+/// stream.flush().unwrap()
+/// }));
+/// assert!(result.is_err());
+/// let (recovered_writer, buffered_data) = stream.into_parts();
+/// assert!(matches!(recovered_writer, PanickingWriter));
+/// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
+/// ```
+pub struct WriterPanicked {
+ buf: Vec<u8>,
+}
+
+impl WriterPanicked {
+ /// Returns the perhaps-unwritten data. Some of this data may have been written by the
+ /// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+ pub fn into_inner(self) -> Vec<u8> {
+ self.buf
+ }
+
+ const DESCRIPTION: &'static str =
+ "BufWriter inner writer panicked, what data remains unwritten is not known";
+}
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+impl error::Error for WriterPanicked {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ Self::DESCRIPTION
+ }
+}
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+impl fmt::Display for WriterPanicked {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", Self::DESCRIPTION)
+ }
+}
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+impl fmt::Debug for WriterPanicked {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("WriterPanicked")
+ .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
+ .finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write> Write for BufWriter<W> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // Use < instead of <= to avoid a needless trip through the buffer in some cases.
+ // See `write_cold` for details.
+ if buf.len() < self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(buf.len())
+ } else {
+ self.write_cold(buf)
+ }
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ // Use < instead of <= to avoid a needless trip through the buffer in some cases.
+ // See `write_all_cold` for details.
+ if buf.len() < self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(())
+ } else {
+ self.write_all_cold(buf)
+ }
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
+ // to `write` and `write_all`. The performance benefits can be significant. See #79930.
+ if self.get_ref().is_write_vectored() {
+ // We have to handle the possibility that the total length of the buffers overflows
+ // `usize` (even though this can only happen if multiple `IoSlice`s reference the
+ // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
+ // computation overflows, then surely the input cannot fit in our buffer, so we forward
+ // to the inner writer's `write_vectored` method to let it handle it appropriately.
+ let saturated_total_len =
+ bufs.iter().fold(0usize, |acc, b| acc.saturating_add(b.len()));
+
+ if saturated_total_len > self.spare_capacity() {
+ // Flush if the total length of the input exceeds our buffer's spare capacity.
+ // If we would have overflowed, this condition also holds, and we need to flush.
+ self.flush_buf()?;
+ }
+
+ if saturated_total_len >= self.buf.capacity() {
+ // Forward to our inner writer if the total length of the input is greater than or
+ // equal to our buffer capacity. If we would have overflowed, this condition also
+ // holds, and we punt to the inner writer.
+ self.panicked = true;
+ let r = self.get_mut().write_vectored(bufs);
+ self.panicked = false;
+ r
+ } else {
+ // `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
+
+ // SAFETY: We checked whether or not the spare capacity was large enough above. If
+ // it was, then we're safe already. If it wasn't, we flushed, making sufficient
+ // room for any input <= the buffer size, which includes this input.
+ unsafe {
+ bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
+ };
+
+ Ok(saturated_total_len)
+ }
+ } else {
+ let mut iter = bufs.iter();
+ let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
+ // This is the first non-empty slice to write, so if it does
+ // not fit in the buffer, we still get to flush and proceed.
+ if buf.len() > self.spare_capacity() {
+ self.flush_buf()?;
+ }
+ if buf.len() >= self.buf.capacity() {
+ // The slice is at least as large as the buffering capacity,
+ // so it's better to write it directly, bypassing the buffer.
+ self.panicked = true;
+ let r = self.get_mut().write(buf);
+ self.panicked = false;
+ return r;
+ } else {
+ // SAFETY: We checked whether or not the spare capacity was large enough above.
+ // If it was, then we're safe already. If it wasn't, we flushed, making
+ // sufficient room for any input <= the buffer size, which includes this input.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ buf.len()
+ }
+ } else {
+ return Ok(0);
+ };
+ debug_assert!(total_written != 0);
+ for buf in iter {
+ if buf.len() <= self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ // This cannot overflow `usize`. If we are here, we've written all of the bytes
+ // so far to our buffer, and we've ensured that we never exceed the buffer's
+ // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
+ total_written += buf.len();
+ } else {
+ break;
+ }
+ }
+ Ok(total_written)
+ }
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.flush_buf().and_then(|()| self.get_mut().flush())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write> fmt::Debug for BufWriter<W>
+where
+ W: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("BufWriter")
+ .field("writer", &self.inner)
+ .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
+ .finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write + Seek> Seek for BufWriter<W> {
+ /// Seek to the offset, in bytes, in the underlying writer.
+ ///
+ /// Seeking always writes out the internal buffer before seeking.
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ self.flush_buf()?;
+ self.get_mut().seek(pos)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write> Drop for BufWriter<W> {
+ fn drop(&mut self) {
+ if !self.panicked {
+ // dtors should not panic, so we ignore a failed flush
+ let _r = self.flush_buf();
+ }
+ }
+}
diff --git a/library/std/src/io/buffered/linewriter.rs b/library/std/src/io/buffered/linewriter.rs
new file mode 100644
index 000000000..a26a4ab33
--- /dev/null
+++ b/library/std/src/io/buffered/linewriter.rs
@@ -0,0 +1,232 @@
+use crate::fmt;
+use crate::io::{self, buffered::LineWriterShim, BufWriter, IntoInnerError, IoSlice, Write};
+
+/// Wraps a writer and buffers output to it, flushing whenever a newline
+/// (`0x0a`, `'\n'`) is detected.
+///
+/// The [`BufWriter`] struct wraps a writer and buffers its output.
+/// But it only does this batched write when it goes out of scope, or when the
+/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
+/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
+/// does exactly that.
+///
+/// Like [`BufWriter`], a `LineWriter`’s buffer will also be flushed when the
+/// `LineWriter` goes out of scope or when its internal buffer is full.
+///
+/// If there's still a partial line in the buffer when the `LineWriter` is
+/// dropped, it will flush those contents.
+///
+/// # Examples
+///
+/// We can use `LineWriter` to write one line at a time, significantly
+/// reducing the number of actual writes to the file.
+///
+/// ```no_run
+/// use std::fs::{self, File};
+/// use std::io::prelude::*;
+/// use std::io::LineWriter;
+///
+/// fn main() -> std::io::Result<()> {
+/// let road_not_taken = b"I shall be telling this with a sigh
+/// Somewhere ages and ages hence:
+/// Two roads diverged in a wood, and I -
+/// I took the one less traveled by,
+/// And that has made all the difference.";
+///
+/// let file = File::create("poem.txt")?;
+/// let mut file = LineWriter::new(file);
+///
+/// file.write_all(b"I shall be telling this with a sigh")?;
+///
+/// // No bytes are written until a newline is encountered (or
+/// // the internal buffer is filled).
+/// assert_eq!(fs::read_to_string("poem.txt")?, "");
+/// file.write_all(b"\n")?;
+/// assert_eq!(
+/// fs::read_to_string("poem.txt")?,
+/// "I shall be telling this with a sigh\n",
+/// );
+///
+/// // Write the rest of the poem.
+/// file.write_all(b"Somewhere ages and ages hence:
+/// Two roads diverged in a wood, and I -
+/// I took the one less traveled by,
+/// And that has made all the difference.")?;
+///
+/// // The last line of the poem doesn't end in a newline, so
+/// // we have to flush or drop the `LineWriter` to finish
+/// // writing.
+/// file.flush()?;
+///
+/// // Confirm the whole poem was written.
+/// assert_eq!(fs::read("poem.txt")?, &road_not_taken[..]);
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct LineWriter<W: Write> {
+ inner: BufWriter<W>,
+}
+
+impl<W: Write> LineWriter<W> {
+ /// Creates a new `LineWriter`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let file = File::create("poem.txt")?;
+ /// let file = LineWriter::new(file);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(inner: W) -> LineWriter<W> {
+ // Lines typically aren't that long, don't use a giant buffer
+ LineWriter::with_capacity(1024, inner)
+ }
+
+ /// Creates a new `LineWriter` with at least the specified capacity for the
+ /// internal buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let file = File::create("poem.txt")?;
+ /// let file = LineWriter::with_capacity(100, file);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
+ LineWriter { inner: BufWriter::with_capacity(capacity, inner) }
+ }
+
+ /// Gets a reference to the underlying writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let file = File::create("poem.txt")?;
+ /// let file = LineWriter::new(file);
+ ///
+ /// let reference = file.get_ref();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_ref(&self) -> &W {
+ self.inner.get_ref()
+ }
+
+ /// Gets a mutable reference to the underlying writer.
+ ///
+ /// Caution must be taken when calling methods on the mutable reference
+ /// returned as extra writes could corrupt the output stream.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let file = File::create("poem.txt")?;
+ /// let mut file = LineWriter::new(file);
+ ///
+ /// // we can use reference just like file
+ /// let reference = file.get_mut();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut W {
+ self.inner.get_mut()
+ }
+
+ /// Unwraps this `LineWriter`, returning the underlying writer.
+ ///
+ /// The internal buffer is written out before returning the writer.
+ ///
+ /// # Errors
+ ///
+ /// An [`Err`] will be returned if an error occurs while flushing the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::LineWriter;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let file = File::create("poem.txt")?;
+ ///
+ /// let writer: LineWriter<File> = LineWriter::new(file);
+ ///
+ /// let file: File = writer.into_inner()?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
+ self.inner.into_inner().map_err(|err| err.new_wrapped(|inner| LineWriter { inner }))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write> Write for LineWriter<W> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ LineWriterShim::new(&mut self.inner).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ LineWriterShim::new(&mut self.inner).write_vectored(bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ LineWriterShim::new(&mut self.inner).write_all(buf)
+ }
+
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ LineWriterShim::new(&mut self.inner).write_all_vectored(bufs)
+ }
+
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ LineWriterShim::new(&mut self.inner).write_fmt(fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write> fmt::Debug for LineWriter<W>
+where
+ W: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("LineWriter")
+ .field("writer", &self.get_ref())
+ .field(
+ "buffer",
+ &format_args!("{}/{}", self.inner.buffer().len(), self.inner.capacity()),
+ )
+ .finish_non_exhaustive()
+ }
+}
diff --git a/library/std/src/io/buffered/linewritershim.rs b/library/std/src/io/buffered/linewritershim.rs
new file mode 100644
index 000000000..0175d2693
--- /dev/null
+++ b/library/std/src/io/buffered/linewritershim.rs
@@ -0,0 +1,276 @@
+use crate::io::{self, BufWriter, IoSlice, Write};
+use crate::sys_common::memchr;
+
+/// Private helper struct for implementing the line-buffered writing logic.
+/// This shim temporarily wraps a BufWriter, and uses its internals to
+/// implement a line-buffered writer (specifically by using the internal
+/// methods like write_to_buf and flush_buf). In this way, a more
+/// efficient abstraction can be created than one that only had access to
+/// `write` and `flush`, without needlessly duplicating a lot of the
+/// implementation details of BufWriter. This also allows existing
+/// `BufWriters` to be temporarily given line-buffering logic; this is what
+/// enables Stdout to be alternately in line-buffered or block-buffered mode.
+#[derive(Debug)]
+pub struct LineWriterShim<'a, W: Write> {
+ buffer: &'a mut BufWriter<W>,
+}
+
+impl<'a, W: Write> LineWriterShim<'a, W> {
+ pub fn new(buffer: &'a mut BufWriter<W>) -> Self {
+ Self { buffer }
+ }
+
+ /// Get a reference to the inner writer (that is, the writer
+ /// wrapped by the BufWriter).
+ fn inner(&self) -> &W {
+ self.buffer.get_ref()
+ }
+
+ /// Get a mutable reference to the inner writer (that is, the writer
+ /// wrapped by the BufWriter). Be careful with this writer, as writes to
+ /// it will bypass the buffer.
+ fn inner_mut(&mut self) -> &mut W {
+ self.buffer.get_mut()
+ }
+
+ /// Get the content currently buffered in self.buffer
+ fn buffered(&self) -> &[u8] {
+ self.buffer.buffer()
+ }
+
+ /// Flush the buffer iff the last byte is a newline (indicating that an
+ /// earlier write only succeeded partially, and we want to retry flushing
+ /// the buffered line before continuing with a subsequent write)
+ fn flush_if_completed_line(&mut self) -> io::Result<()> {
+ match self.buffered().last().copied() {
+ Some(b'\n') => self.buffer.flush_buf(),
+ _ => Ok(()),
+ }
+ }
+}
+
+impl<'a, W: Write> Write for LineWriterShim<'a, W> {
+ /// Write some data into this BufReader with line buffering. This means
+ /// that, if any newlines are present in the data, the data up to the last
+ /// newline is sent directly to the underlying writer, and data after it
+ /// is buffered. Returns the number of bytes written.
+ ///
+ /// This function operates on a "best effort basis"; in keeping with the
+ /// convention of `Write::write`, it makes at most one attempt to write
+ /// new data to the underlying writer. If that write only reports a partial
+ /// success, the remaining data will be buffered.
+ ///
+ /// Because this function attempts to send completed lines to the underlying
+ /// writer, it will also flush the existing buffer if it ends with a
+ /// newline, even if the incoming data does not contain any newlines.
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let newline_idx = match memchr::memrchr(b'\n', buf) {
+ // If there are no new newlines (that is, if this write is less than
+ // one line), just do a regular buffered write (which may flush if
+ // we exceed the inner buffer's size)
+ None => {
+ self.flush_if_completed_line()?;
+ return self.buffer.write(buf);
+ }
+ // Otherwise, arrange for the lines to be written directly to the
+ // inner writer.
+ Some(newline_idx) => newline_idx + 1,
+ };
+
+ // Flush existing content to prepare for our write. We have to do this
+ // before attempting to write `buf` in order to maintain consistency;
+ // if we add `buf` to the buffer then try to flush it all at once,
+ // we're obligated to return Ok(), which would mean suppressing any
+ // errors that occur during flush.
+ self.buffer.flush_buf()?;
+
+ // This is what we're going to try to write directly to the inner
+ // writer. The rest will be buffered, if nothing goes wrong.
+ let lines = &buf[..newline_idx];
+
+ // Write `lines` directly to the inner writer. In keeping with the
+ // `write` convention, make at most one attempt to add new (unbuffered)
+ // data. Because this write doesn't touch the BufWriter state directly,
+ // and the buffer is known to be empty, we don't need to worry about
+ // self.buffer.panicked here.
+ let flushed = self.inner_mut().write(lines)?;
+
+ // If buffer returns Ok(0), propagate that to the caller without
+ // doing additional buffering; otherwise we're just guaranteeing
+ // an "ErrorKind::WriteZero" later.
+ if flushed == 0 {
+ return Ok(0);
+ }
+
+ // Now that the write has succeeded, buffer the rest (or as much of
+ // the rest as possible). If there were any unwritten newlines, we
+ // only buffer out to the last unwritten newline that fits in the
+ // buffer; this helps prevent flushing partial lines on subsequent
+ // calls to LineWriterShim::write.
+
+ // Handle the cases in order of most-common to least-common, under
+ // the presumption that most writes succeed in totality, and that most
+ // writes are smaller than the buffer.
+ // - Is this a partial line (ie, no newlines left in the unwritten tail)
+ // - If not, does the data out to the last unwritten newline fit in
+ // the buffer?
+ // - If not, scan for the last newline that *does* fit in the buffer
+ let tail = if flushed >= newline_idx {
+ &buf[flushed..]
+ } else if newline_idx - flushed <= self.buffer.capacity() {
+ &buf[flushed..newline_idx]
+ } else {
+ let scan_area = &buf[flushed..];
+ let scan_area = &scan_area[..self.buffer.capacity()];
+ match memchr::memrchr(b'\n', scan_area) {
+ Some(newline_idx) => &scan_area[..newline_idx + 1],
+ None => scan_area,
+ }
+ };
+
+ let buffered = self.buffer.write_to_buf(tail);
+ Ok(flushed + buffered)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.buffer.flush()
+ }
+
+ /// Write some vectored data into this BufReader with line buffering. This
+ /// means that, if any newlines are present in the data, the data up to
+ /// and including the buffer containing the last newline is sent directly
+ /// to the inner writer, and the data after it is buffered. Returns the
+ /// number of bytes written.
+ ///
+ /// This function operates on a "best effort basis"; in keeping with the
+ /// convention of `Write::write`, it makes at most one attempt to write
+ /// new data to the underlying writer.
+ ///
+ /// Because this function attempts to send completed lines to the underlying
+ /// writer, it will also flush the existing buffer if it contains any
+ /// newlines.
+ ///
+ /// Because sorting through an array of `IoSlice` can be a bit convoluted,
+ /// This method differs from write in the following ways:
+ ///
+ /// - It attempts to write the full content of all the buffers up to and
+ /// including the one containing the last newline. This means that it
+ /// may attempt to write a partial line, that buffer has data past the
+ /// newline.
+ /// - If the write only reports partial success, it does not attempt to
+ /// find the precise location of the written bytes and buffer the rest.
+ ///
+ /// If the underlying vector doesn't support vectored writing, we instead
+ /// simply write the first non-empty buffer with `write`. This way, we
+ /// get the benefits of more granular partial-line handling without losing
+ /// anything in efficiency
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ // If there's no specialized behavior for write_vectored, just use
+ // write. This has the benefit of more granular partial-line handling.
+ if !self.is_write_vectored() {
+ return match bufs.iter().find(|buf| !buf.is_empty()) {
+ Some(buf) => self.write(buf),
+ None => Ok(0),
+ };
+ }
+
+ // Find the buffer containing the last newline
+ let last_newline_buf_idx = bufs
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i));
+
+ // If there are no new newlines (that is, if this write is less than
+ // one line), just do a regular buffered write
+ let last_newline_buf_idx = match last_newline_buf_idx {
+ // No newlines; just do a normal buffered write
+ None => {
+ self.flush_if_completed_line()?;
+ return self.buffer.write_vectored(bufs);
+ }
+ Some(i) => i,
+ };
+
+ // Flush existing content to prepare for our write
+ self.buffer.flush_buf()?;
+
+ // This is what we're going to try to write directly to the inner
+ // writer. The rest will be buffered, if nothing goes wrong.
+ let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1);
+
+ // Write `lines` directly to the inner writer. In keeping with the
+ // `write` convention, make at most one attempt to add new (unbuffered)
+ // data. Because this write doesn't touch the BufWriter state directly,
+ // and the buffer is known to be empty, we don't need to worry about
+ // self.panicked here.
+ let flushed = self.inner_mut().write_vectored(lines)?;
+
+ // If inner returns Ok(0), propagate that to the caller without
+ // doing additional buffering; otherwise we're just guaranteeing
+ // an "ErrorKind::WriteZero" later.
+ if flushed == 0 {
+ return Ok(0);
+ }
+
+ // Don't try to reconstruct the exact amount written; just bail
+ // in the event of a partial write
+ let lines_len = lines.iter().map(|buf| buf.len()).sum();
+ if flushed < lines_len {
+ return Ok(flushed);
+ }
+
+ // Now that the write has succeeded, buffer the rest (or as much of the
+ // rest as possible)
+ let buffered: usize = tail
+ .iter()
+ .filter(|buf| !buf.is_empty())
+ .map(|buf| self.buffer.write_to_buf(buf))
+ .take_while(|&n| n > 0)
+ .sum();
+
+ Ok(flushed + buffered)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.inner().is_write_vectored()
+ }
+
+ /// Write some data into this BufReader with line buffering. This means
+ /// that, if any newlines are present in the data, the data up to the last
+ /// newline is sent directly to the underlying writer, and data after it
+ /// is buffered.
+ ///
+ /// Because this function attempts to send completed lines to the underlying
+ /// writer, it will also flush the existing buffer if it contains any
+ /// newlines, even if the incoming data does not contain any newlines.
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ match memchr::memrchr(b'\n', buf) {
+ // If there are no new newlines (that is, if this write is less than
+ // one line), just do a regular buffered write (which may flush if
+ // we exceed the inner buffer's size)
+ None => {
+ self.flush_if_completed_line()?;
+ self.buffer.write_all(buf)
+ }
+ Some(newline_idx) => {
+ let (lines, tail) = buf.split_at(newline_idx + 1);
+
+ if self.buffered().is_empty() {
+ self.inner_mut().write_all(lines)?;
+ } else {
+ // If there is any buffered data, we add the incoming lines
+ // to that buffer before flushing, which saves us at least
+ // one write call. We can't really do this with `write`,
+ // since we can't do this *and* not suppress errors *and*
+ // report a consistent state to the caller in a return
+ // value, but here in write_all it's fine.
+ self.buffer.write_all(lines)?;
+ self.buffer.flush_buf()?;
+ }
+
+ self.buffer.write_all(tail)
+ }
+ }
+ }
+}
diff --git a/library/std/src/io/buffered/mod.rs b/library/std/src/io/buffered/mod.rs
new file mode 100644
index 000000000..100dab1e2
--- /dev/null
+++ b/library/std/src/io/buffered/mod.rs
@@ -0,0 +1,196 @@
+//! Buffering wrappers for I/O traits
+
+mod bufreader;
+mod bufwriter;
+mod linewriter;
+mod linewritershim;
+
+#[cfg(test)]
+mod tests;
+
+use crate::error;
+use crate::fmt;
+use crate::io::Error;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::{bufreader::BufReader, bufwriter::BufWriter, linewriter::LineWriter};
+use linewritershim::LineWriterShim;
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+pub use bufwriter::WriterPanicked;
+
+/// An error returned by [`BufWriter::into_inner`] which combines an error that
+/// happened while writing out the buffer, and the buffered writer object
+/// which may be used to recover from the condition.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::BufWriter;
+/// use std::net::TcpStream;
+///
+/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+///
+/// // do stuff with the stream
+///
+/// // we want to get our `TcpStream` back, so let's try:
+///
+/// let stream = match stream.into_inner() {
+/// Ok(s) => s,
+/// Err(e) => {
+/// // Here, e is an IntoInnerError
+/// panic!("An error occurred");
+/// }
+/// };
+/// ```
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoInnerError<W>(W, Error);
+
+impl<W> IntoInnerError<W> {
+ /// Construct a new IntoInnerError
+ fn new(writer: W, error: Error) -> Self {
+ Self(writer, error)
+ }
+
+ /// Helper to construct a new IntoInnerError; intended to help with
+ /// adapters that wrap other adapters
+ fn new_wrapped<W2>(self, f: impl FnOnce(W) -> W2) -> IntoInnerError<W2> {
+ let Self(writer, error) = self;
+ IntoInnerError::new(f(writer), error)
+ }
+
+ /// Returns the error which caused the call to [`BufWriter::into_inner()`]
+ /// to fail.
+ ///
+ /// This error was returned when attempting to write the internal buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // do stuff with the stream
+ ///
+ /// // we want to get our `TcpStream` back, so let's try:
+ ///
+ /// let stream = match stream.into_inner() {
+ /// Ok(s) => s,
+ /// Err(e) => {
+ /// // Here, e is an IntoInnerError, let's log the inner error.
+ /// //
+ /// // We'll just 'log' to stdout for this example.
+ /// println!("{}", e.error());
+ ///
+ /// panic!("An unexpected error occurred.");
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn error(&self) -> &Error {
+ &self.1
+ }
+
+ /// Returns the buffered writer instance which generated the error.
+ ///
+ /// The returned object can be used for error recovery, such as
+ /// re-inspecting the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::BufWriter;
+ /// use std::net::TcpStream;
+ ///
+ /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
+ ///
+ /// // do stuff with the stream
+ ///
+ /// // we want to get our `TcpStream` back, so let's try:
+ ///
+ /// let stream = match stream.into_inner() {
+ /// Ok(s) => s,
+ /// Err(e) => {
+ /// // Here, e is an IntoInnerError, let's re-examine the buffer:
+ /// let buffer = e.into_inner();
+ ///
+ /// // do stuff to try to recover
+ ///
+ /// // afterwards, let's just return the stream
+ /// buffer.into_inner().unwrap()
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_inner(self) -> W {
+ self.0
+ }
+
+ /// Consumes the [`IntoInnerError`] and returns the error which caused the call to
+ /// [`BufWriter::into_inner()`] to fail. Unlike `error`, this can be used to
+ /// obtain ownership of the underlying error.
+ ///
+ /// # Example
+ /// ```
+ /// use std::io::{BufWriter, ErrorKind, Write};
+ ///
+ /// let mut not_enough_space = [0u8; 10];
+ /// let mut stream = BufWriter::new(not_enough_space.as_mut());
+ /// write!(stream, "this cannot be actually written").unwrap();
+ /// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
+ /// let err = into_inner_err.into_error();
+ /// assert_eq!(err.kind(), ErrorKind::WriteZero);
+ /// ```
+ #[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
+ pub fn into_error(self) -> Error {
+ self.1
+ }
+
+ /// Consumes the [`IntoInnerError`] and returns the error which caused the call to
+ /// [`BufWriter::into_inner()`] to fail, and the underlying writer.
+ ///
+ /// This can be used to simply obtain ownership of the underlying error; it can also be used for
+ /// advanced error recovery.
+ ///
+ /// # Example
+ /// ```
+ /// use std::io::{BufWriter, ErrorKind, Write};
+ ///
+ /// let mut not_enough_space = [0u8; 10];
+ /// let mut stream = BufWriter::new(not_enough_space.as_mut());
+ /// write!(stream, "this cannot be actually written").unwrap();
+ /// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
+ /// let (err, recovered_writer) = into_inner_err.into_parts();
+ /// assert_eq!(err.kind(), ErrorKind::WriteZero);
+ /// assert_eq!(recovered_writer.buffer(), b"t be actually written");
+ /// ```
+ #[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
+ pub fn into_parts(self) -> (Error, W) {
+ (self.1, self.0)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W> From<IntoInnerError<W>> for Error {
+ fn from(iie: IntoInnerError<W>) -> Error {
+ iie.1
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ error::Error::description(self.error())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W> fmt::Display for IntoInnerError<W> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.error().fmt(f)
+ }
+}
diff --git a/library/std/src/io/buffered/tests.rs b/library/std/src/io/buffered/tests.rs
new file mode 100644
index 000000000..fe45b1326
--- /dev/null
+++ b/library/std/src/io/buffered/tests.rs
@@ -0,0 +1,1039 @@
+use crate::io::prelude::*;
+use crate::io::{self, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, ReadBuf, SeekFrom};
+use crate::mem::MaybeUninit;
+use crate::panic;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::thread;
+
+/// A dummy reader intended at testing short-reads propagation.
+pub struct ShortReader {
+ lengths: Vec<usize>,
+}
+
+// FIXME: rustfmt and tidy disagree about the correct formatting of this
+// function. This leads to issues for users with editors configured to
+// rustfmt-on-save.
+impl Read for ShortReader {
+ fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
+ if self.lengths.is_empty() { Ok(0) } else { Ok(self.lengths.remove(0)) }
+ }
+}
+
+#[test]
+fn test_buffered_reader() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, inner);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf);
+ assert_eq!(nread.unwrap(), 3);
+ assert_eq!(buf, [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0, 0];
+ let nread = reader.read(&mut buf);
+ assert_eq!(nread.unwrap(), 2);
+ assert_eq!(buf, [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0];
+ let nread = reader.read(&mut buf);
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf);
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [3, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ let nread = reader.read(&mut buf);
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [4, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_buffered_reader_read_buf() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, inner);
+
+ let mut buf = [MaybeUninit::uninit(); 3];
+ let mut buf = ReadBuf::uninit(&mut buf);
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled(), [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [MaybeUninit::uninit(); 2];
+ let mut buf = ReadBuf::uninit(&mut buf);
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled(), [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [MaybeUninit::uninit(); 1];
+ let mut buf = ReadBuf::uninit(&mut buf);
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled(), [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [MaybeUninit::uninit(); 3];
+ let mut buf = ReadBuf::uninit(&mut buf);
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled(), [3]);
+ assert_eq!(reader.buffer(), []);
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled(), [3, 4]);
+ assert_eq!(reader.buffer(), []);
+
+ buf.clear();
+
+ reader.read_buf(&mut buf).unwrap();
+
+ assert_eq!(buf.filled_len(), 0);
+}
+
+#[test]
+fn test_buffered_reader_seek() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
+
+ assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(3));
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert_eq!(reader.seek(SeekFrom::Current(1)).ok(), Some(4));
+ assert_eq!(reader.fill_buf().ok(), Some(&[1, 2][..]));
+ reader.consume(1);
+ assert_eq!(reader.seek(SeekFrom::Current(-2)).ok(), Some(3));
+}
+
+#[test]
+fn test_buffered_reader_seek_relative() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
+
+ assert!(reader.seek_relative(3).is_ok());
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert!(reader.seek_relative(0).is_ok());
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert!(reader.seek_relative(1).is_ok());
+ assert_eq!(reader.fill_buf().ok(), Some(&[1][..]));
+ assert!(reader.seek_relative(-1).is_ok());
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert!(reader.seek_relative(2).is_ok());
+ assert_eq!(reader.fill_buf().ok(), Some(&[2, 3][..]));
+}
+
+#[test]
+fn test_buffered_reader_stream_position() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
+
+ assert_eq!(reader.stream_position().ok(), Some(0));
+ assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));
+ assert_eq!(reader.stream_position().ok(), Some(3));
+ // relative seeking within the buffer and reading position should keep the buffer
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
+ assert!(reader.seek_relative(0).is_ok());
+ assert_eq!(reader.stream_position().ok(), Some(3));
+ assert_eq!(reader.buffer(), &[0, 1][..]);
+ assert!(reader.seek_relative(1).is_ok());
+ assert_eq!(reader.stream_position().ok(), Some(4));
+ assert_eq!(reader.buffer(), &[1][..]);
+ assert!(reader.seek_relative(-1).is_ok());
+ assert_eq!(reader.stream_position().ok(), Some(3));
+ assert_eq!(reader.buffer(), &[0, 1][..]);
+ // relative seeking outside the buffer will discard it
+ assert!(reader.seek_relative(2).is_ok());
+ assert_eq!(reader.stream_position().ok(), Some(5));
+ assert_eq!(reader.buffer(), &[][..]);
+}
+
+#[test]
+fn test_buffered_reader_stream_position_panic() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(4, io::Cursor::new(inner));
+
+ // cause internal buffer to be filled but read only partially
+ let mut buffer = [0, 0];
+ assert!(reader.read_exact(&mut buffer).is_ok());
+ // rewinding the internal reader will cause buffer to loose sync
+ let inner = reader.get_mut();
+ assert!(inner.seek(SeekFrom::Start(0)).is_ok());
+ // overflow when subtracting the remaining buffer size from current position
+ let result = panic::catch_unwind(panic::AssertUnwindSafe(|| reader.stream_position().ok()));
+ assert!(result.is_err());
+}
+
+#[test]
+fn test_buffered_reader_invalidated_after_read() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
+
+ assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
+ reader.consume(3);
+
+ let mut buffer = [0, 0, 0, 0, 0];
+ assert_eq!(reader.read(&mut buffer).ok(), Some(5));
+ assert_eq!(buffer, [0, 1, 2, 3, 4]);
+
+ assert!(reader.seek_relative(-2).is_ok());
+ let mut buffer = [0, 0];
+ assert_eq!(reader.read(&mut buffer).ok(), Some(2));
+ assert_eq!(buffer, [3, 4]);
+}
+
+#[test]
+fn test_buffered_reader_invalidated_after_seek() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
+
+ assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
+ reader.consume(3);
+
+ assert!(reader.seek(SeekFrom::Current(5)).is_ok());
+
+ assert!(reader.seek_relative(-2).is_ok());
+ let mut buffer = [0, 0];
+ assert_eq!(reader.read(&mut buffer).ok(), Some(2));
+ assert_eq!(buffer, [3, 4]);
+}
+
+#[test]
+fn test_buffered_reader_seek_underflow() {
+ // gimmick reader that yields its position modulo 256 for each byte
+ struct PositionReader {
+ pos: u64,
+ }
+ impl Read for PositionReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let len = buf.len();
+ for x in buf {
+ *x = self.pos as u8;
+ self.pos = self.pos.wrapping_add(1);
+ }
+ Ok(len)
+ }
+ }
+ impl Seek for PositionReader {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ match pos {
+ SeekFrom::Start(n) => {
+ self.pos = n;
+ }
+ SeekFrom::Current(n) => {
+ self.pos = self.pos.wrapping_add(n as u64);
+ }
+ SeekFrom::End(n) => {
+ self.pos = u64::MAX.wrapping_add(n as u64);
+ }
+ }
+ Ok(self.pos)
+ }
+ }
+
+ let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));
+ assert_eq!(reader.seek(SeekFrom::End(-5)).ok(), Some(u64::MAX - 5));
+ assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
+ // the following seek will require two underlying seeks
+ let expected = 9223372036854775802;
+ assert_eq!(reader.seek(SeekFrom::Current(i64::MIN)).ok(), Some(expected));
+ assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
+ // seeking to 0 should empty the buffer.
+ assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(expected));
+ assert_eq!(reader.get_ref().pos, expected);
+}
+
+#[test]
+fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() {
+ // gimmick reader that returns Err after first seek
+ struct ErrAfterFirstSeekReader {
+ first_seek: bool,
+ }
+ impl Read for ErrAfterFirstSeekReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ for x in &mut *buf {
+ *x = 0;
+ }
+ Ok(buf.len())
+ }
+ }
+ impl Seek for ErrAfterFirstSeekReader {
+ fn seek(&mut self, _: SeekFrom) -> io::Result<u64> {
+ if self.first_seek {
+ self.first_seek = false;
+ Ok(0)
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, "oh no!"))
+ }
+ }
+ }
+
+ let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true });
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..]));
+
+ // The following seek will require two underlying seeks. The first will
+ // succeed but the second will fail. This should still invalidate the
+ // buffer.
+ assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err());
+ assert_eq!(reader.buffer().len(), 0);
+}
+
+#[test]
+fn test_buffered_reader_read_to_end_consumes_buffer() {
+ let data: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7];
+ let mut reader = BufReader::with_capacity(3, data);
+ let mut buf = Vec::new();
+ assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2][..]));
+ assert_eq!(reader.read_to_end(&mut buf).ok(), Some(8));
+ assert_eq!(&buf, &[0, 1, 2, 3, 4, 5, 6, 7]);
+ assert!(reader.buffer().is_empty());
+}
+
+#[test]
+fn test_buffered_reader_read_to_string_consumes_buffer() {
+ let data: &[u8] = "deadbeef".as_bytes();
+ let mut reader = BufReader::with_capacity(3, data);
+ let mut buf = String::new();
+ assert_eq!(reader.fill_buf().ok(), Some("dea".as_bytes()));
+ assert_eq!(reader.read_to_string(&mut buf).ok(), Some(8));
+ assert_eq!(&buf, "deadbeef");
+ assert!(reader.buffer().is_empty());
+}
+
+#[test]
+fn test_buffered_writer() {
+ let inner = Vec::new();
+ let mut writer = BufWriter::with_capacity(2, inner);
+
+ writer.write(&[0, 1]).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.write(&[2]).unwrap();
+ assert_eq!(writer.buffer(), [2]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.write(&[3]).unwrap();
+ assert_eq!(writer.buffer(), [2, 3]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.flush().unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ writer.write(&[4]).unwrap();
+ writer.write(&[5]).unwrap();
+ assert_eq!(writer.buffer(), [4, 5]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ writer.write(&[6]).unwrap();
+ assert_eq!(writer.buffer(), [6]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
+
+ writer.write(&[7, 8]).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ writer.write(&[9, 10, 11]).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+
+ writer.flush().unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+}
+
+#[test]
+fn test_buffered_writer_inner_flushes() {
+ let mut w = BufWriter::with_capacity(3, Vec::new());
+ w.write(&[0, 1]).unwrap();
+ assert_eq!(*w.get_ref(), []);
+ let w = w.into_inner().unwrap();
+ assert_eq!(w, [0, 1]);
+}
+
+#[test]
+fn test_buffered_writer_seek() {
+ let mut w = BufWriter::with_capacity(3, io::Cursor::new(Vec::new()));
+ w.write_all(&[0, 1, 2, 3, 4, 5]).unwrap();
+ w.write_all(&[6, 7]).unwrap();
+ assert_eq!(w.seek(SeekFrom::Current(0)).ok(), Some(8));
+ assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
+ assert_eq!(w.seek(SeekFrom::Start(2)).ok(), Some(2));
+ w.write_all(&[8, 9]).unwrap();
+ assert_eq!(&w.into_inner().unwrap().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
+}
+
+#[test]
+fn test_read_until() {
+ let inner: &[u8] = &[0, 1, 2, 1, 0];
+ let mut reader = BufReader::with_capacity(2, inner);
+ let mut v = Vec::new();
+ reader.read_until(0, &mut v).unwrap();
+ assert_eq!(v, [0]);
+ v.truncate(0);
+ reader.read_until(2, &mut v).unwrap();
+ assert_eq!(v, [1, 2]);
+ v.truncate(0);
+ reader.read_until(1, &mut v).unwrap();
+ assert_eq!(v, [1]);
+ v.truncate(0);
+ reader.read_until(8, &mut v).unwrap();
+ assert_eq!(v, [0]);
+ v.truncate(0);
+ reader.read_until(9, &mut v).unwrap();
+ assert_eq!(v, []);
+}
+
+#[test]
+fn test_line_buffer() {
+ let mut writer = LineWriter::new(Vec::new());
+ writer.write(&[0]).unwrap();
+ assert_eq!(*writer.get_ref(), []);
+ writer.write(&[1]).unwrap();
+ assert_eq!(*writer.get_ref(), []);
+ writer.flush().unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1]);
+ writer.write(&[0, b'\n', 1, b'\n', 2]).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']);
+ writer.flush().unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]);
+ writer.write(&[3, b'\n']).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']);
+}
+
+#[test]
+fn test_read_line() {
+ let in_buf: &[u8] = b"a\nb\nc";
+ let mut reader = BufReader::with_capacity(2, in_buf);
+ let mut s = String::new();
+ reader.read_line(&mut s).unwrap();
+ assert_eq!(s, "a\n");
+ s.truncate(0);
+ reader.read_line(&mut s).unwrap();
+ assert_eq!(s, "b\n");
+ s.truncate(0);
+ reader.read_line(&mut s).unwrap();
+ assert_eq!(s, "c");
+ s.truncate(0);
+ reader.read_line(&mut s).unwrap();
+ assert_eq!(s, "");
+}
+
+#[test]
+fn test_lines() {
+ let in_buf: &[u8] = b"a\nb\nc";
+ let reader = BufReader::with_capacity(2, in_buf);
+ let mut it = reader.lines();
+ assert_eq!(it.next().unwrap().unwrap(), "a".to_string());
+ assert_eq!(it.next().unwrap().unwrap(), "b".to_string());
+ assert_eq!(it.next().unwrap().unwrap(), "c".to_string());
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_short_reads() {
+ let inner = ShortReader { lengths: vec![0, 1, 2, 0, 1, 0] };
+ let mut reader = BufReader::new(inner);
+ let mut buf = [0, 0];
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 2);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+#[should_panic]
+fn dont_panic_in_drop_on_panicked_flush() {
+ struct FailFlushWriter;
+
+ impl Write for FailFlushWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Err(io::Error::last_os_error())
+ }
+ }
+
+ let writer = FailFlushWriter;
+ let _writer = BufWriter::new(writer);
+
+ // If writer panics *again* due to the flush error then the process will
+ // abort.
+ panic!();
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn panic_in_write_doesnt_flush_in_drop() {
+ static WRITES: AtomicUsize = AtomicUsize::new(0);
+
+ struct PanicWriter;
+
+ impl Write for PanicWriter {
+ fn write(&mut self, _: &[u8]) -> io::Result<usize> {
+ WRITES.fetch_add(1, Ordering::SeqCst);
+ panic!();
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+ }
+
+ thread::spawn(|| {
+ let mut writer = BufWriter::new(PanicWriter);
+ let _ = writer.write(b"hello world");
+ let _ = writer.flush();
+ })
+ .join()
+ .unwrap_err();
+
+ assert_eq!(WRITES.load(Ordering::SeqCst), 1);
+}
+
+#[bench]
+fn bench_buffered_reader(b: &mut test::Bencher) {
+ b.iter(|| BufReader::new(io::empty()));
+}
+
+#[bench]
+fn bench_buffered_reader_small_reads(b: &mut test::Bencher) {
+ let data = (0..u8::MAX).cycle().take(1024 * 4).collect::<Vec<_>>();
+ b.iter(|| {
+ let mut reader = BufReader::new(&data[..]);
+ let mut buf = [0u8; 4];
+ for _ in 0..1024 {
+ reader.read_exact(&mut buf).unwrap();
+ core::hint::black_box(&buf);
+ }
+ });
+}
+
+#[bench]
+fn bench_buffered_writer(b: &mut test::Bencher) {
+ b.iter(|| BufWriter::new(io::sink()));
+}
+
+/// A simple `Write` target, designed to be wrapped by `LineWriter` /
+/// `BufWriter` / etc, that can have its `write` & `flush` behavior
+/// configured
+#[derive(Default, Clone)]
+struct ProgrammableSink {
+ // Writes append to this slice
+ pub buffer: Vec<u8>,
+
+ // If true, writes will always be an error
+ pub always_write_error: bool,
+
+ // If true, flushes will always be an error
+ pub always_flush_error: bool,
+
+ // If set, only up to this number of bytes will be written in a single
+ // call to `write`
+ pub accept_prefix: Option<usize>,
+
+ // If set, counts down with each write, and writes return an error
+ // when it hits 0
+ pub max_writes: Option<usize>,
+
+ // If set, attempting to write when max_writes == Some(0) will be an
+ // error; otherwise, it will return Ok(0).
+ pub error_after_max_writes: bool,
+}
+
+impl Write for ProgrammableSink {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ if self.always_write_error {
+ return Err(io::Error::new(io::ErrorKind::Other, "test - always_write_error"));
+ }
+
+ match self.max_writes {
+ Some(0) if self.error_after_max_writes => {
+ return Err(io::Error::new(io::ErrorKind::Other, "test - max_writes"));
+ }
+ Some(0) => return Ok(0),
+ Some(ref mut count) => *count -= 1,
+ None => {}
+ }
+
+ let len = match self.accept_prefix {
+ None => data.len(),
+ Some(prefix) => data.len().min(prefix),
+ };
+
+ let data = &data[..len];
+ self.buffer.extend_from_slice(data);
+
+ Ok(len)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ if self.always_flush_error {
+ Err(io::Error::new(io::ErrorKind::Other, "test - always_flush_error"))
+ } else {
+ Ok(())
+ }
+ }
+}
+
+/// Previously the `LineWriter` could successfully write some bytes but
+/// then fail to report that it has done so. Additionally, an erroneous
+/// flush after a successful write was permanently ignored.
+///
+/// Test that a line writer correctly reports the number of written bytes,
+/// and that it attempts to flush buffered lines from previous writes
+/// before processing new data
+///
+/// Regression test for #37807
+#[test]
+fn erroneous_flush_retried() {
+ let writer = ProgrammableSink {
+ // Only write up to 4 bytes at a time
+ accept_prefix: Some(4),
+
+ // Accept the first two writes, then error the others
+ max_writes: Some(2),
+ error_after_max_writes: true,
+
+ ..Default::default()
+ };
+
+ // This should write the first 4 bytes. The rest will be buffered, out
+ // to the last newline.
+ let mut writer = LineWriter::new(writer);
+ assert_eq!(writer.write(b"a\nb\nc\nd\ne").unwrap(), 8);
+
+ // This write should attempt to flush "c\nd\n", then buffer "e". No
+ // errors should happen here because no further writes should be
+ // attempted against `writer`.
+ assert_eq!(writer.write(b"e").unwrap(), 1);
+ assert_eq!(&writer.get_ref().buffer, b"a\nb\nc\nd\n");
+}
+
+#[test]
+fn line_vectored() {
+ let mut a = LineWriter::new(Vec::new());
+ assert_eq!(
+ a.write_vectored(&[
+ IoSlice::new(&[]),
+ IoSlice::new(b"\n"),
+ IoSlice::new(&[]),
+ IoSlice::new(b"a"),
+ ])
+ .unwrap(),
+ 2,
+ );
+ assert_eq!(a.get_ref(), b"\n");
+
+ assert_eq!(
+ a.write_vectored(&[
+ IoSlice::new(&[]),
+ IoSlice::new(b"b"),
+ IoSlice::new(&[]),
+ IoSlice::new(b"a"),
+ IoSlice::new(&[]),
+ IoSlice::new(b"c"),
+ ])
+ .unwrap(),
+ 3,
+ );
+ assert_eq!(a.get_ref(), b"\n");
+ a.flush().unwrap();
+ assert_eq!(a.get_ref(), b"\nabac");
+ assert_eq!(a.write_vectored(&[]).unwrap(), 0);
+ assert_eq!(
+ a.write_vectored(&[
+ IoSlice::new(&[]),
+ IoSlice::new(&[]),
+ IoSlice::new(&[]),
+ IoSlice::new(&[]),
+ ])
+ .unwrap(),
+ 0,
+ );
+ assert_eq!(a.write_vectored(&[IoSlice::new(b"a\nb"),]).unwrap(), 3);
+ assert_eq!(a.get_ref(), b"\nabaca\nb");
+}
+
+#[test]
+fn line_vectored_partial_and_errors() {
+ use crate::collections::VecDeque;
+
+ enum Call {
+ Write { inputs: Vec<&'static [u8]>, output: io::Result<usize> },
+ Flush { output: io::Result<()> },
+ }
+
+ #[derive(Default)]
+ struct Writer {
+ calls: VecDeque<Call>,
+ }
+
+ impl Write for Writer {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(buf)])
+ }
+
+ fn write_vectored(&mut self, buf: &[IoSlice<'_>]) -> io::Result<usize> {
+ match self.calls.pop_front().expect("unexpected call to write") {
+ Call::Write { inputs, output } => {
+ assert_eq!(inputs, buf.iter().map(|b| &**b).collect::<Vec<_>>());
+ output
+ }
+ Call::Flush { .. } => panic!("unexpected call to write; expected a flush"),
+ }
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ match self.calls.pop_front().expect("Unexpected call to flush") {
+ Call::Flush { output } => output,
+ Call::Write { .. } => panic!("unexpected call to flush; expected a write"),
+ }
+ }
+ }
+
+ impl Drop for Writer {
+ fn drop(&mut self) {
+ if !thread::panicking() {
+ assert_eq!(self.calls.len(), 0);
+ }
+ }
+ }
+
+ // partial writes keep going
+ let mut a = LineWriter::new(Writer::default());
+ a.write_vectored(&[IoSlice::new(&[]), IoSlice::new(b"abc")]).unwrap();
+
+ a.get_mut().calls.push_back(Call::Write { inputs: vec![b"abc"], output: Ok(1) });
+ a.get_mut().calls.push_back(Call::Write { inputs: vec![b"bc"], output: Ok(2) });
+ a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\n"], output: Ok(2) });
+
+ a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\n")]).unwrap();
+
+ a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
+ a.flush().unwrap();
+
+ // erroneous writes stop and don't write more
+ a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\na"], output: Err(err()) });
+ a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
+ assert!(a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\na")]).is_err());
+ a.flush().unwrap();
+
+ fn err() -> io::Error {
+ io::Error::new(io::ErrorKind::Other, "x")
+ }
+}
+
+/// Test that, in cases where vectored writing is not enabled, the
+/// LineWriter uses the normal `write` call, which more-correctly handles
+/// partial lines
+#[test]
+fn line_vectored_ignored() {
+ let writer = ProgrammableSink::default();
+ let mut writer = LineWriter::new(writer);
+
+ let content = [
+ IoSlice::new(&[]),
+ IoSlice::new(b"Line 1\nLine"),
+ IoSlice::new(b" 2\nLine 3\nL"),
+ IoSlice::new(&[]),
+ IoSlice::new(&[]),
+ IoSlice::new(b"ine 4"),
+ IoSlice::new(b"\nLine 5\n"),
+ ];
+
+ let count = writer.write_vectored(&content).unwrap();
+ assert_eq!(count, 11);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
+
+ let count = writer.write_vectored(&content[2..]).unwrap();
+ assert_eq!(count, 11);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
+
+ let count = writer.write_vectored(&content[5..]).unwrap();
+ assert_eq!(count, 5);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
+
+ let count = writer.write_vectored(&content[6..]).unwrap();
+ assert_eq!(count, 8);
+ assert_eq!(
+ writer.get_ref().buffer.as_slice(),
+ b"Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n".as_ref()
+ );
+}
+
+/// Test that, given this input:
+///
+/// Line 1\n
+/// Line 2\n
+/// Line 3\n
+/// Line 4
+///
+/// And given a result that only writes to midway through Line 2
+///
+/// That only up to the end of Line 3 is buffered
+///
+/// This behavior is desirable because it prevents flushing partial lines
+#[test]
+fn partial_write_buffers_line() {
+ let writer = ProgrammableSink { accept_prefix: Some(13), ..Default::default() };
+ let mut writer = LineWriter::new(writer);
+
+ assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3\nLine4").unwrap(), 21);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2");
+
+ assert_eq!(writer.write(b"Line 4").unwrap(), 6);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
+}
+
+/// Test that, given this input:
+///
+/// Line 1\n
+/// Line 2\n
+/// Line 3
+///
+/// And given that the full write of lines 1 and 2 was successful
+/// That data up to Line 3 is buffered
+#[test]
+fn partial_line_buffered_after_line_write() {
+ let writer = ProgrammableSink::default();
+ let mut writer = LineWriter::new(writer);
+
+ assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3").unwrap(), 20);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\n");
+
+ assert!(writer.flush().is_ok());
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3");
+}
+
+/// Test that, given a partial line that exceeds the length of
+/// LineBuffer's buffer (that is, without a trailing newline), that that
+/// line is written to the inner writer
+#[test]
+fn long_line_flushed() {
+ let writer = ProgrammableSink::default();
+ let mut writer = LineWriter::with_capacity(5, writer);
+
+ assert_eq!(writer.write(b"0123456789").unwrap(), 10);
+ assert_eq!(&writer.get_ref().buffer, b"0123456789");
+}
+
+/// Test that, given a very long partial line *after* successfully
+/// flushing a complete line, that that line is buffered unconditionally,
+/// and no additional writes take place. This assures the property that
+/// `write` should make at-most-one attempt to write new data.
+#[test]
+fn line_long_tail_not_flushed() {
+ let writer = ProgrammableSink::default();
+ let mut writer = LineWriter::with_capacity(5, writer);
+
+ // Assert that Line 1\n is flushed, and 01234 is buffered
+ assert_eq!(writer.write(b"Line 1\n0123456789").unwrap(), 12);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
+
+ // Because the buffer is full, this subsequent write will flush it
+ assert_eq!(writer.write(b"5").unwrap(), 1);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n01234");
+}
+
+/// Test that, if an attempt to pre-flush buffered data returns Ok(0),
+/// this is propagated as an error.
+#[test]
+fn line_buffer_write0_error() {
+ let writer = ProgrammableSink {
+ // Accept one write, then return Ok(0) on subsequent ones
+ max_writes: Some(1),
+
+ ..Default::default()
+ };
+ let mut writer = LineWriter::new(writer);
+
+ // This should write "Line 1\n" and buffer "Partial"
+ assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
+
+ // This will attempt to flush "partial", which will return Ok(0), which
+ // needs to be an error, because we've already informed the client
+ // that we accepted the write.
+ let err = writer.write(b" Line End\n").unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::WriteZero);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
+}
+
+/// Test that, if a write returns Ok(0) after a successful pre-flush, this
+/// is propagated as Ok(0)
+#[test]
+fn line_buffer_write0_normal() {
+ let writer = ProgrammableSink {
+ // Accept two writes, then return Ok(0) on subsequent ones
+ max_writes: Some(2),
+
+ ..Default::default()
+ };
+ let mut writer = LineWriter::new(writer);
+
+ // This should write "Line 1\n" and buffer "Partial"
+ assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
+
+ // This will flush partial, which will succeed, but then return Ok(0)
+ // when flushing " Line End\n"
+ assert_eq!(writer.write(b" Line End\n").unwrap(), 0);
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nPartial");
+}
+
+/// LineWriter has a custom `write_all`; make sure it works correctly
+#[test]
+fn line_write_all() {
+ let writer = ProgrammableSink {
+ // Only write 5 bytes at a time
+ accept_prefix: Some(5),
+ ..Default::default()
+ };
+ let mut writer = LineWriter::new(writer);
+
+ writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial").unwrap();
+ assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\nLine 4\n");
+ writer.write_all(b" Line 5\n").unwrap();
+ assert_eq!(
+ writer.get_ref().buffer.as_slice(),
+ b"Line 1\nLine 2\nLine 3\nLine 4\nPartial Line 5\n".as_ref(),
+ );
+}
+
+#[test]
+fn line_write_all_error() {
+ let writer = ProgrammableSink {
+ // Only accept up to 3 writes of up to 5 bytes each
+ accept_prefix: Some(5),
+ max_writes: Some(3),
+ ..Default::default()
+ };
+
+ let mut writer = LineWriter::new(writer);
+ let res = writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial");
+ assert!(res.is_err());
+ // An error from write_all leaves everything in an indeterminate state,
+ // so there's nothing else to test here
+}
+
+/// Under certain circumstances, the old implementation of LineWriter
+/// would try to buffer "to the last newline" but be forced to buffer
+/// less than that, leading to inappropriate partial line writes.
+/// Regression test for that issue.
+#[test]
+fn partial_multiline_buffering() {
+ let writer = ProgrammableSink {
+ // Write only up to 5 bytes at a time
+ accept_prefix: Some(5),
+ ..Default::default()
+ };
+
+ let mut writer = LineWriter::with_capacity(10, writer);
+
+ let content = b"AAAAABBBBB\nCCCCDDDDDD\nEEE";
+
+ // When content is written, LineWriter will try to write blocks A, B,
+ // C, and D. Only block A will succeed. Under the old behavior, LineWriter
+ // would then try to buffer B, C and D, but because its capacity is 10,
+ // it will only be able to buffer B and C. We don't want to buffer
+ // partial lines concurrent with whole lines, so the correct behavior
+ // is to buffer only block B (out to the newline)
+ assert_eq!(writer.write(content).unwrap(), 11);
+ assert_eq!(writer.get_ref().buffer, *b"AAAAA");
+
+ writer.flush().unwrap();
+ assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB\n");
+}
+
+/// Same as test_partial_multiline_buffering, but in the event NO full lines
+/// fit in the buffer, just buffer as much as possible
+#[test]
+fn partial_multiline_buffering_without_full_line() {
+ let writer = ProgrammableSink {
+ // Write only up to 5 bytes at a time
+ accept_prefix: Some(5),
+ ..Default::default()
+ };
+
+ let mut writer = LineWriter::with_capacity(5, writer);
+
+ let content = b"AAAAABBBBBBBBBB\nCCCCC\nDDDDD";
+
+ // When content is written, LineWriter will try to write blocks A, B,
+ // and C. Only block A will succeed. Under the old behavior, LineWriter
+ // would then try to buffer B and C, but because its capacity is 5,
+ // it will only be able to buffer part of B. Because it's not possible
+ // for it to buffer any complete lines, it should buffer as much of B as
+ // possible
+ assert_eq!(writer.write(content).unwrap(), 10);
+ assert_eq!(writer.get_ref().buffer, *b"AAAAA");
+
+ writer.flush().unwrap();
+ assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB");
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum RecordedEvent {
+ Write(String),
+ Flush,
+}
+
+#[derive(Debug, Clone, Default)]
+struct WriteRecorder {
+ pub events: Vec<RecordedEvent>,
+}
+
+impl Write for WriteRecorder {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ use crate::str::from_utf8;
+
+ self.events.push(RecordedEvent::Write(from_utf8(buf).unwrap().to_string()));
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.events.push(RecordedEvent::Flush);
+ Ok(())
+ }
+}
+
+/// Test that a normal, formatted writeln only results in a single write
+/// call to the underlying writer. A naive implementation of
+/// LineWriter::write_all results in two writes: one of the buffered data,
+/// and another of the final substring in the formatted set
+#[test]
+fn single_formatted_write() {
+ let writer = WriteRecorder::default();
+ let mut writer = LineWriter::new(writer);
+
+ // Under a naive implementation of LineWriter, this will result in two
+ // writes: "hello, world" and "!\n", because write() has to flush the
+ // buffer before attempting to write the last "!\n". write_all shouldn't
+ // have this limitation.
+ writeln!(&mut writer, "{}, {}!", "hello", "world").unwrap();
+ assert_eq!(writer.get_ref().events, [RecordedEvent::Write("hello, world!\n".to_string())]);
+}
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
new file mode 100644
index 000000000..1a10245e4
--- /dev/null
+++ b/library/std/src/io/copy.rs
@@ -0,0 +1,161 @@
+use super::{BufWriter, ErrorKind, Read, ReadBuf, Result, Write, DEFAULT_BUF_SIZE};
+use crate::mem::MaybeUninit;
+
+/// Copies the entire contents of a reader into a writer.
+///
+/// This function will continuously read data from `reader` and then
+/// write it into `writer` in a streaming fashion until `reader`
+/// returns EOF.
+///
+/// On success, the total number of bytes that were copied from
+/// `reader` to `writer` is returned.
+///
+/// If you’re wanting to copy the contents of one file to another and you’re
+/// working with filesystem paths, see the [`fs::copy`] function.
+///
+/// [`fs::copy`]: crate::fs::copy
+///
+/// # Errors
+///
+/// This function will return an error immediately if any call to [`read`] or
+/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are
+/// handled by this function and the underlying operation is retried.
+///
+/// [`read`]: Read::read
+/// [`write`]: Write::write
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+///
+/// fn main() -> io::Result<()> {
+/// let mut reader: &[u8] = b"hello";
+/// let mut writer: Vec<u8> = vec![];
+///
+/// io::copy(&mut reader, &mut writer)?;
+///
+/// assert_eq!(&b"hello"[..], &writer[..]);
+/// Ok(())
+/// }
+/// ```
+///
+/// # Platform-specific behavior
+///
+/// On Linux (including Android), this function uses `copy_file_range(2)`,
+/// `sendfile(2)` or `splice(2)` syscalls to move data directly between file
+/// descriptors if possible.
+///
+/// Note that platform-specific behavior [may change in the future][changes].
+///
+/// [changes]: crate::io#platform-specific-behavior
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
+where
+ R: Read,
+ W: Write,
+{
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "linux", target_os = "android"))] {
+ crate::sys::kernel_copy::copy_spec(reader, writer)
+ } else {
+ generic_copy(reader, writer)
+ }
+ }
+}
+
+/// The userspace read-write-loop implementation of `io::copy` that is used when
+/// OS-specific specializations for copy offloading are not available or not applicable.
+pub(crate) fn generic_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
+where
+ R: Read,
+ W: Write,
+{
+ BufferedCopySpec::copy_to(reader, writer)
+}
+
+/// Specialization of the read-write loop that either uses a stack buffer
+/// or reuses the internal buffer of a BufWriter
+trait BufferedCopySpec: Write {
+ fn copy_to<R: Read + ?Sized>(reader: &mut R, writer: &mut Self) -> Result<u64>;
+}
+
+impl<W: Write + ?Sized> BufferedCopySpec for W {
+ default fn copy_to<R: Read + ?Sized>(reader: &mut R, writer: &mut Self) -> Result<u64> {
+ stack_buffer_copy(reader, writer)
+ }
+}
+
+impl<I: Write> BufferedCopySpec for BufWriter<I> {
+ fn copy_to<R: Read + ?Sized>(reader: &mut R, writer: &mut Self) -> Result<u64> {
+ if writer.capacity() < DEFAULT_BUF_SIZE {
+ return stack_buffer_copy(reader, writer);
+ }
+
+ let mut len = 0;
+ let mut init = 0;
+
+ loop {
+ let buf = writer.buffer_mut();
+ let mut read_buf = ReadBuf::uninit(buf.spare_capacity_mut());
+
+ // SAFETY: init is either 0 or the initialized_len of the previous iteration
+ unsafe {
+ read_buf.assume_init(init);
+ }
+
+ if read_buf.capacity() >= DEFAULT_BUF_SIZE {
+ match reader.read_buf(&mut read_buf) {
+ Ok(()) => {
+ let bytes_read = read_buf.filled_len();
+
+ if bytes_read == 0 {
+ return Ok(len);
+ }
+
+ init = read_buf.initialized_len() - bytes_read;
+
+ // SAFETY: ReadBuf guarantees all of its filled bytes are init
+ unsafe { buf.set_len(buf.len() + bytes_read) };
+ len += bytes_read as u64;
+ // Read again if the buffer still has enough capacity, as BufWriter itself would do
+ // This will occur if the reader returns short reads
+ continue;
+ }
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ }
+ }
+
+ writer.flush_buf()?;
+ }
+ }
+}
+
+fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
+ reader: &mut R,
+ writer: &mut W,
+) -> Result<u64> {
+ let mut buf = [MaybeUninit::uninit(); DEFAULT_BUF_SIZE];
+ let mut buf = ReadBuf::uninit(&mut buf);
+
+ let mut len = 0;
+
+ loop {
+ match reader.read_buf(&mut buf) {
+ Ok(()) => {}
+ Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ };
+
+ if buf.filled().is_empty() {
+ break;
+ }
+
+ len += buf.filled().len() as u64;
+ writer.write_all(buf.filled())?;
+ buf.clear();
+ }
+
+ Ok(len)
+}
diff --git a/library/std/src/io/cursor.rs b/library/std/src/io/cursor.rs
new file mode 100644
index 000000000..f3fbfc447
--- /dev/null
+++ b/library/std/src/io/cursor.rs
@@ -0,0 +1,640 @@
+#[cfg(test)]
+mod tests;
+
+use crate::io::prelude::*;
+
+use crate::alloc::Allocator;
+use crate::cmp;
+use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+
+/// A `Cursor` wraps an in-memory buffer and provides it with a
+/// [`Seek`] implementation.
+///
+/// `Cursor`s are used with in-memory buffers, anything implementing
+/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
+/// allowing these buffers to be used anywhere you might use a reader or writer
+/// that does actual I/O.
+///
+/// The standard library implements some I/O traits on various types which
+/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
+/// <code>Cursor<[&\[u8\]][bytes]></code>.
+///
+/// # Examples
+///
+/// We may want to write bytes to a [`File`] in our production
+/// code, but use an in-memory buffer in our tests. We can do this with
+/// `Cursor`:
+///
+/// [bytes]: crate::slice "slice"
+/// [`File`]: crate::fs::File
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::io::{self, SeekFrom};
+/// use std::fs::File;
+///
+/// // a library function we've written
+/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
+/// writer.seek(SeekFrom::End(-10))?;
+///
+/// for i in 0..10 {
+/// writer.write(&[i])?;
+/// }
+///
+/// // all went well
+/// Ok(())
+/// }
+///
+/// # fn foo() -> io::Result<()> {
+/// // Here's some code that uses this library function.
+/// //
+/// // We might want to use a BufReader here for efficiency, but let's
+/// // keep this example focused.
+/// let mut file = File::create("foo.txt")?;
+///
+/// write_ten_bytes_at_end(&mut file)?;
+/// # Ok(())
+/// # }
+///
+/// // now let's write a test
+/// #[test]
+/// fn test_writes_bytes() {
+/// // setting up a real File is much slower than an in-memory buffer,
+/// // let's use a cursor instead
+/// use std::io::Cursor;
+/// let mut buff = Cursor::new(vec![0; 15]);
+///
+/// write_ten_bytes_at_end(&mut buff).unwrap();
+///
+/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Default, Eq, PartialEq)]
+pub struct Cursor<T> {
+ inner: T,
+ pos: u64,
+}
+
+impl<T> Cursor<T> {
+ /// Creates a new cursor wrapping the provided underlying in-memory buffer.
+ ///
+ /// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
+ /// is not empty. So writing to cursor starts with overwriting [`Vec`]
+ /// content, not with appending to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+ pub const fn new(inner: T) -> Cursor<T> {
+ Cursor { pos: 0, inner }
+ }
+
+ /// Consumes this cursor, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let vec = buff.into_inner();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Gets a reference to the underlying value in this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_ref();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+ pub const fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying value in this cursor.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying value as it may corrupt this cursor's position.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_mut();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Returns the current position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ /// use std::io::prelude::*;
+ /// use std::io::SeekFrom;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.seek(SeekFrom::Current(2)).unwrap();
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.seek(SeekFrom::Current(-1)).unwrap();
+ /// assert_eq!(buff.position(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+ pub const fn position(&self) -> u64 {
+ self.pos
+ }
+
+ /// Sets the position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.set_position(2);
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.set_position(4);
+ /// assert_eq!(buff.position(), 4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set_position(&mut self, pos: u64) {
+ self.pos = pos;
+ }
+}
+
+impl<T> Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ /// Returns the remaining slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cursor_remaining)]
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
+ ///
+ /// buff.set_position(2);
+ /// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
+ ///
+ /// buff.set_position(4);
+ /// assert_eq!(buff.remaining_slice(), &[5]);
+ ///
+ /// buff.set_position(6);
+ /// assert_eq!(buff.remaining_slice(), &[]);
+ /// ```
+ #[unstable(feature = "cursor_remaining", issue = "86369")]
+ pub fn remaining_slice(&self) -> &[u8] {
+ let len = self.pos.min(self.inner.as_ref().len() as u64);
+ &self.inner.as_ref()[(len as usize)..]
+ }
+
+ /// Returns `true` if the remaining slice is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cursor_remaining)]
+ /// use std::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// buff.set_position(2);
+ /// assert!(!buff.is_empty());
+ ///
+ /// buff.set_position(5);
+ /// assert!(buff.is_empty());
+ ///
+ /// buff.set_position(10);
+ /// assert!(buff.is_empty());
+ /// ```
+ #[unstable(feature = "cursor_remaining", issue = "86369")]
+ pub fn is_empty(&self) -> bool {
+ self.pos >= self.inner.as_ref().len() as u64
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Cursor<T>
+where
+ T: Clone,
+{
+ #[inline]
+ fn clone(&self) -> Self {
+ Cursor { inner: self.inner.clone(), pos: self.pos }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.inner.clone_from(&other.inner);
+ self.pos = other.pos;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> io::Seek for Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
+ let (base_pos, offset) = match style {
+ SeekFrom::Start(n) => {
+ self.pos = n;
+ return Ok(n);
+ }
+ SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
+ SeekFrom::Current(n) => (self.pos, n),
+ };
+ match base_pos.checked_add_signed(offset) {
+ Some(n) => {
+ self.pos = n;
+ Ok(self.pos)
+ }
+ None => Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "invalid seek to a negative or overflowing position",
+ )),
+ }
+ }
+
+ fn stream_len(&mut self) -> io::Result<u64> {
+ Ok(self.inner.as_ref().len() as u64)
+ }
+
+ fn stream_position(&mut self) -> io::Result<u64> {
+ Ok(self.pos)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Read for Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let n = Read::read(&mut self.remaining_slice(), buf)?;
+ self.pos += n as u64;
+ Ok(n)
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ let prev_filled = buf.filled_len();
+
+ Read::read_buf(&mut self.fill_buf()?, buf)?;
+
+ self.pos += (buf.filled_len() - prev_filled) as u64;
+
+ Ok(())
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let mut nread = 0;
+ for buf in bufs {
+ let n = self.read(buf)?;
+ nread += n;
+ if n < buf.len() {
+ break;
+ }
+ }
+ Ok(nread)
+ }
+
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ let n = buf.len();
+ Read::read_exact(&mut self.remaining_slice(), buf)?;
+ self.pos += n as u64;
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> BufRead for Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ Ok(self.remaining_slice())
+ }
+ fn consume(&mut self, amt: usize) {
+ self.pos += amt as u64;
+ }
+}
+
+// Non-resizing write implementation
+#[inline]
+fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
+ let pos = cmp::min(*pos_mut, slice.len() as u64);
+ let amt = (&mut slice[(pos as usize)..]).write(buf)?;
+ *pos_mut += amt as u64;
+ Ok(amt)
+}
+
+#[inline]
+fn slice_write_vectored(
+ pos_mut: &mut u64,
+ slice: &mut [u8],
+ bufs: &[IoSlice<'_>],
+) -> io::Result<usize> {
+ let mut nwritten = 0;
+ for buf in bufs {
+ let n = slice_write(pos_mut, slice, buf)?;
+ nwritten += n;
+ if n < buf.len() {
+ break;
+ }
+ }
+ Ok(nwritten)
+}
+
+/// Reserves the required space, and pads the vec with 0s if necessary.
+fn reserve_and_pad<A: Allocator>(
+ pos_mut: &mut u64,
+ vec: &mut Vec<u8, A>,
+ buf_len: usize,
+) -> io::Result<usize> {
+ let pos: usize = (*pos_mut).try_into().map_err(|_| {
+ io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "cursor position exceeds maximum possible vector length",
+ )
+ })?;
+
+ // For safety reasons, we don't want these numbers to overflow
+ // otherwise our allocation won't be enough
+ let desired_cap = pos.saturating_add(buf_len);
+ if desired_cap > vec.capacity() {
+ // We want our vec's total capacity
+ // to have room for (pos+buf_len) bytes. Reserve allocates
+ // based on additional elements from the length, so we need to
+ // reserve the difference
+ vec.reserve(desired_cap - vec.len());
+ }
+ // Pad if pos is above the current len.
+ if pos > vec.len() {
+ let diff = pos - vec.len();
+ // Unfortunately, `resize()` would suffice but the optimiser does not
+ // realise the `reserve` it does can be eliminated. So we do it manually
+ // to eliminate that extra branch
+ let spare = vec.spare_capacity_mut();
+ debug_assert!(spare.len() >= diff);
+ // Safety: we have allocated enough capacity for this.
+ // And we are only writing, not reading
+ unsafe {
+ spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
+ vec.set_len(pos);
+ }
+ }
+
+ Ok(pos)
+}
+
+/// Writes the slice to the vec without allocating
+/// # Safety: vec must have buf.len() spare capacity
+unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
+where
+ A: Allocator,
+{
+ debug_assert!(vec.capacity() >= pos + buf.len());
+ vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
+ pos + buf.len()
+}
+
+/// Resizing write implementation for [`Cursor`]
+///
+/// Cursor is allowed to have a pre-allocated and initialised
+/// vector body, but with a position of 0. This means the [`Write`]
+/// will overwrite the contents of the vec.
+///
+/// This also allows for the vec body to be empty, but with a position of N.
+/// This means that [`Write`] will pad the vec with 0 initially,
+/// before writing anything from that point
+fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
+where
+ A: Allocator,
+{
+ let buf_len = buf.len();
+ let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
+
+ // Write the buf then progress the vec forward if necessary
+ // Safety: we have ensured that the capacity is available
+ // and that all bytes get written up to pos
+ unsafe {
+ pos = vec_write_unchecked(pos, vec, buf);
+ if pos > vec.len() {
+ vec.set_len(pos);
+ }
+ };
+
+ // Bump us forward
+ *pos_mut += buf_len as u64;
+ Ok(buf_len)
+}
+
+/// Resizing write_vectored implementation for [`Cursor`]
+///
+/// Cursor is allowed to have a pre-allocated and initialised
+/// vector body, but with a position of 0. This means the [`Write`]
+/// will overwrite the contents of the vec.
+///
+/// This also allows for the vec body to be empty, but with a position of N.
+/// This means that [`Write`] will pad the vec with 0 initially,
+/// before writing anything from that point
+fn vec_write_vectored<A>(
+ pos_mut: &mut u64,
+ vec: &mut Vec<u8, A>,
+ bufs: &[IoSlice<'_>],
+) -> io::Result<usize>
+where
+ A: Allocator,
+{
+ // For safety reasons, we don't want this sum to overflow ever.
+ // If this saturates, the reserve should panic to avoid any unsound writing.
+ let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
+ let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
+
+ // Write the buf then progress the vec forward if necessary
+ // Safety: we have ensured that the capacity is available
+ // and that all bytes get written up to the last pos
+ unsafe {
+ for buf in bufs {
+ pos = vec_write_unchecked(pos, vec, buf);
+ }
+ if pos > vec.len() {
+ vec.set_len(pos);
+ }
+ }
+
+ // Bump us forward
+ *pos_mut += buf_len as u64;
+ Ok(buf_len)
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for Cursor<&mut [u8]> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ slice_write(&mut self.pos, self.inner, buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ slice_write_vectored(&mut self.pos, self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "cursor_mut_vec", since = "1.25.0")]
+impl<A> Write for Cursor<&mut Vec<u8, A>>
+where
+ A: Allocator,
+{
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ vec_write(&mut self.pos, self.inner, buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ vec_write_vectored(&mut self.pos, self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> Write for Cursor<Vec<u8, A>>
+where
+ A: Allocator,
+{
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ vec_write(&mut self.pos, &mut self.inner, buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "cursor_box_slice", since = "1.5.0")]
+impl<A> Write for Cursor<Box<[u8], A>>
+where
+ A: Allocator,
+{
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ slice_write(&mut self.pos, &mut self.inner, buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "cursor_array", since = "1.61.0")]
+impl<const N: usize> Write for Cursor<[u8; N]> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ slice_write(&mut self.pos, &mut self.inner, buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/library/std/src/io/cursor/tests.rs b/library/std/src/io/cursor/tests.rs
new file mode 100644
index 000000000..d7c203c29
--- /dev/null
+++ b/library/std/src/io/cursor/tests.rs
@@ -0,0 +1,567 @@
+use crate::io::prelude::*;
+use crate::io::{Cursor, IoSlice, IoSliceMut, SeekFrom};
+
+#[test]
+fn test_vec_writer() {
+ let mut writer = Vec::new();
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ assert_eq!(
+ writer
+ .write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
+ .unwrap(),
+ 3
+ );
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(writer, b);
+}
+
+#[test]
+fn test_mem_writer() {
+ let mut writer = Cursor::new(Vec::new());
+ writer.set_position(10);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ assert_eq!(
+ writer
+ .write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
+ .unwrap(),
+ 3
+ );
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(&writer.get_ref()[..10], &[0; 10]);
+ assert_eq!(&writer.get_ref()[10..], b);
+}
+
+#[test]
+fn test_mem_writer_preallocated() {
+ let mut writer = Cursor::new(vec![0, 0, 0, 0, 0, 0, 0, 0, 8, 9, 10]);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(&writer.get_ref()[..], b);
+}
+
+#[test]
+fn test_mem_mut_writer() {
+ let mut vec = Vec::new();
+ let mut writer = Cursor::new(&mut vec);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ assert_eq!(
+ writer
+ .write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
+ .unwrap(),
+ 3
+ );
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(&writer.get_ref()[..], b);
+}
+
+fn test_slice_writer<T>(writer: &mut Cursor<T>)
+where
+ T: AsRef<[u8]>,
+ Cursor<T>: Write,
+{
+ assert_eq!(writer.position(), 0);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.position(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ assert_eq!(writer.position(), 8);
+ assert_eq!(writer.write(&[]).unwrap(), 0);
+ assert_eq!(writer.position(), 8);
+
+ assert_eq!(writer.write(&[8, 9]).unwrap(), 1);
+ assert_eq!(writer.write(&[10]).unwrap(), 0);
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
+ assert_eq!(writer.get_ref().as_ref(), b);
+}
+
+fn test_slice_writer_vectored<T>(writer: &mut Cursor<T>)
+where
+ T: AsRef<[u8]>,
+ Cursor<T>: Write,
+{
+ assert_eq!(writer.position(), 0);
+ assert_eq!(writer.write_vectored(&[IoSlice::new(&[0])]).unwrap(), 1);
+ assert_eq!(writer.position(), 1);
+ assert_eq!(
+ writer.write_vectored(&[IoSlice::new(&[1, 2, 3]), IoSlice::new(&[4, 5, 6, 7]),]).unwrap(),
+ 7,
+ );
+ assert_eq!(writer.position(), 8);
+ assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
+ assert_eq!(writer.position(), 8);
+
+ assert_eq!(writer.write_vectored(&[IoSlice::new(&[8, 9])]).unwrap(), 1);
+ assert_eq!(writer.write_vectored(&[IoSlice::new(&[10])]).unwrap(), 0);
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
+ assert_eq!(writer.get_ref().as_ref(), b);
+}
+
+#[test]
+fn test_box_slice_writer() {
+ let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+ test_slice_writer(&mut writer);
+}
+
+#[test]
+fn test_box_slice_writer_vectored() {
+ let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+ test_slice_writer_vectored(&mut writer);
+}
+
+#[test]
+fn test_array_writer() {
+ let mut writer = Cursor::new([0u8; 9]);
+ test_slice_writer(&mut writer);
+}
+
+#[test]
+fn test_array_writer_vectored() {
+ let mut writer = Cursor::new([0u8; 9]);
+ test_slice_writer_vectored(&mut writer);
+}
+
+#[test]
+fn test_buf_writer() {
+ let mut buf = [0 as u8; 9];
+ let mut writer = Cursor::new(&mut buf[..]);
+ test_slice_writer(&mut writer);
+}
+
+#[test]
+fn test_buf_writer_vectored() {
+ let mut buf = [0 as u8; 9];
+ let mut writer = Cursor::new(&mut buf[..]);
+ test_slice_writer_vectored(&mut writer);
+}
+
+#[test]
+fn test_buf_writer_seek() {
+ let mut buf = [0 as u8; 8];
+ {
+ let mut writer = Cursor::new(&mut buf[..]);
+ assert_eq!(writer.position(), 0);
+ assert_eq!(writer.write(&[1]).unwrap(), 1);
+ assert_eq!(writer.position(), 1);
+
+ assert_eq!(writer.seek(SeekFrom::Start(2)).unwrap(), 2);
+ assert_eq!(writer.position(), 2);
+ assert_eq!(writer.write(&[2]).unwrap(), 1);
+ assert_eq!(writer.position(), 3);
+
+ assert_eq!(writer.seek(SeekFrom::Current(-2)).unwrap(), 1);
+ assert_eq!(writer.position(), 1);
+ assert_eq!(writer.write(&[3]).unwrap(), 1);
+ assert_eq!(writer.position(), 2);
+
+ assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
+ assert_eq!(writer.position(), 7);
+ assert_eq!(writer.write(&[4]).unwrap(), 1);
+ assert_eq!(writer.position(), 8);
+ }
+ let b: &[_] = &[1, 3, 2, 0, 0, 0, 0, 4];
+ assert_eq!(buf, b);
+}
+
+#[test]
+fn test_buf_writer_error() {
+ let mut buf = [0 as u8; 2];
+ let mut writer = Cursor::new(&mut buf[..]);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.write(&[0, 0]).unwrap(), 1);
+ assert_eq!(writer.write(&[0, 0]).unwrap(), 0);
+}
+
+#[test]
+fn test_mem_reader() {
+ let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
+ let mut buf = [];
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ let mut buf = [0];
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.position(), 1);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf = [0; 4];
+ assert_eq!(reader.read(&mut buf).unwrap(), 4);
+ assert_eq!(reader.position(), 5);
+ let b: &[_] = &[1, 2, 3, 4];
+ assert_eq!(buf, b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 3);
+ let b: &[_] = &[5, 6, 7];
+ assert_eq!(&buf[..3], b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_mem_reader_vectored() {
+ let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
+ let mut buf = [];
+ assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ let mut buf = [0];
+ assert_eq!(
+ reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
+ 1,
+ );
+ assert_eq!(reader.position(), 1);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf1 = [0; 4];
+ let mut buf2 = [0; 4];
+ assert_eq!(
+ reader
+ .read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2),])
+ .unwrap(),
+ 7,
+ );
+ let b1: &[_] = &[1, 2, 3, 4];
+ let b2: &[_] = &[5, 6, 7];
+ assert_eq!(buf1, b1);
+ assert_eq!(&buf2[..3], b2);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_boxed_slice_reader() {
+ let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
+ let mut buf = [];
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ let mut buf = [0];
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.position(), 1);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf = [0; 4];
+ assert_eq!(reader.read(&mut buf).unwrap(), 4);
+ assert_eq!(reader.position(), 5);
+ let b: &[_] = &[1, 2, 3, 4];
+ assert_eq!(buf, b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 3);
+ let b: &[_] = &[5, 6, 7];
+ assert_eq!(&buf[..3], b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_boxed_slice_reader_vectored() {
+ let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
+ let mut buf = [];
+ assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ let mut buf = [0];
+ assert_eq!(
+ reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
+ 1,
+ );
+ assert_eq!(reader.position(), 1);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf1 = [0; 4];
+ let mut buf2 = [0; 4];
+ assert_eq!(
+ reader
+ .read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
+ .unwrap(),
+ 7,
+ );
+ let b1: &[_] = &[1, 2, 3, 4];
+ let b2: &[_] = &[5, 6, 7];
+ assert_eq!(buf1, b1);
+ assert_eq!(&buf2[..3], b2);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn read_to_end() {
+ let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
+ let mut v = Vec::new();
+ reader.read_to_end(&mut v).unwrap();
+ assert_eq!(v, [0, 1, 2, 3, 4, 5, 6, 7]);
+}
+
+#[test]
+fn test_slice_reader() {
+ let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
+ let reader = &mut &in_buf[..];
+ let mut buf = [];
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ let mut buf = [0];
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.len(), 7);
+ let b: &[_] = &[0];
+ assert_eq!(&buf[..], b);
+ let mut buf = [0; 4];
+ assert_eq!(reader.read(&mut buf).unwrap(), 4);
+ assert_eq!(reader.len(), 3);
+ let b: &[_] = &[1, 2, 3, 4];
+ assert_eq!(&buf[..], b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 3);
+ let b: &[_] = &[5, 6, 7];
+ assert_eq!(&buf[..3], b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_slice_reader_vectored() {
+ let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
+ let reader = &mut &in_buf[..];
+ let mut buf = [];
+ assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
+ let mut buf = [0];
+ assert_eq!(
+ reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
+ 1,
+ );
+ assert_eq!(reader.len(), 7);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf1 = [0; 4];
+ let mut buf2 = [0; 4];
+ assert_eq!(
+ reader
+ .read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
+ .unwrap(),
+ 7,
+ );
+ let b1: &[_] = &[1, 2, 3, 4];
+ let b2: &[_] = &[5, 6, 7];
+ assert_eq!(buf1, b1);
+ assert_eq!(&buf2[..3], b2);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn test_read_exact() {
+ let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
+ let reader = &mut &in_buf[..];
+ let mut buf = [];
+ assert!(reader.read_exact(&mut buf).is_ok());
+ let mut buf = [8];
+ assert!(reader.read_exact(&mut buf).is_ok());
+ assert_eq!(buf[0], 0);
+ assert_eq!(reader.len(), 7);
+ let mut buf = [0, 0, 0, 0, 0, 0, 0];
+ assert!(reader.read_exact(&mut buf).is_ok());
+ assert_eq!(buf, [1, 2, 3, 4, 5, 6, 7]);
+ assert_eq!(reader.len(), 0);
+ let mut buf = [0];
+ assert!(reader.read_exact(&mut buf).is_err());
+}
+
+#[test]
+fn test_buf_reader() {
+ let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
+ let mut reader = Cursor::new(&in_buf[..]);
+ let mut buf = [];
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ let mut buf = [0];
+ assert_eq!(reader.read(&mut buf).unwrap(), 1);
+ assert_eq!(reader.position(), 1);
+ let b: &[_] = &[0];
+ assert_eq!(buf, b);
+ let mut buf = [0; 4];
+ assert_eq!(reader.read(&mut buf).unwrap(), 4);
+ assert_eq!(reader.position(), 5);
+ let b: &[_] = &[1, 2, 3, 4];
+ assert_eq!(buf, b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 3);
+ let b: &[_] = &[5, 6, 7];
+ assert_eq!(&buf[..3], b);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+}
+
+#[test]
+fn seek_past_end() {
+ let buf = [0xff];
+ let mut r = Cursor::new(&buf[..]);
+ assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(r.read(&mut [0]).unwrap(), 0);
+
+ let mut r = Cursor::new(vec![10]);
+ assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(r.read(&mut [0]).unwrap(), 0);
+
+ let mut buf = [0];
+ let mut r = Cursor::new(&mut buf[..]);
+ assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(r.write(&[3]).unwrap(), 0);
+
+ let mut r = Cursor::new(vec![10].into_boxed_slice());
+ assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(r.write(&[3]).unwrap(), 0);
+}
+
+#[test]
+fn seek_past_i64() {
+ let buf = [0xff];
+ let mut r = Cursor::new(&buf[..]);
+ assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
+ assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
+ assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
+ assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
+ assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
+ assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
+
+ let mut r = Cursor::new(vec![10]);
+ assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
+ assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
+ assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
+ assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
+ assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
+ assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
+
+ let mut buf = [0];
+ let mut r = Cursor::new(&mut buf[..]);
+ assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
+ assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
+ assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
+ assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
+ assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
+ assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
+
+ let mut r = Cursor::new(vec![10].into_boxed_slice());
+ assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
+ assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
+ assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
+ assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
+ assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
+ assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
+}
+
+#[test]
+fn seek_before_0() {
+ let buf = [0xff];
+ let mut r = Cursor::new(&buf[..]);
+ assert!(r.seek(SeekFrom::End(-2)).is_err());
+
+ let mut r = Cursor::new(vec![10]);
+ assert!(r.seek(SeekFrom::End(-2)).is_err());
+
+ let mut buf = [0];
+ let mut r = Cursor::new(&mut buf[..]);
+ assert!(r.seek(SeekFrom::End(-2)).is_err());
+
+ let mut r = Cursor::new(vec![10].into_boxed_slice());
+ assert!(r.seek(SeekFrom::End(-2)).is_err());
+}
+
+#[test]
+fn test_seekable_mem_writer() {
+ let mut writer = Cursor::new(Vec::<u8>::new());
+ assert_eq!(writer.position(), 0);
+ assert_eq!(writer.write(&[0]).unwrap(), 1);
+ assert_eq!(writer.position(), 1);
+ assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
+ assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
+ assert_eq!(writer.position(), 8);
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7];
+ assert_eq!(&writer.get_ref()[..], b);
+
+ assert_eq!(writer.seek(SeekFrom::Start(0)).unwrap(), 0);
+ assert_eq!(writer.position(), 0);
+ assert_eq!(writer.write(&[3, 4]).unwrap(), 2);
+ let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7];
+ assert_eq!(&writer.get_ref()[..], b);
+
+ assert_eq!(writer.seek(SeekFrom::Current(1)).unwrap(), 3);
+ assert_eq!(writer.write(&[0, 1]).unwrap(), 2);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7];
+ assert_eq!(&writer.get_ref()[..], b);
+
+ assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
+ assert_eq!(writer.write(&[1, 2]).unwrap(), 2);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2];
+ assert_eq!(&writer.get_ref()[..], b);
+
+ assert_eq!(writer.seek(SeekFrom::End(1)).unwrap(), 10);
+ assert_eq!(writer.write(&[1]).unwrap(), 1);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1];
+ assert_eq!(&writer.get_ref()[..], b);
+}
+
+#[test]
+fn vec_seek_past_end() {
+ let mut r = Cursor::new(Vec::new());
+ assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(r.write(&[3]).unwrap(), 1);
+}
+
+#[test]
+fn vec_seek_before_0() {
+ let mut r = Cursor::new(Vec::new());
+ assert!(r.seek(SeekFrom::End(-2)).is_err());
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn vec_seek_and_write_past_usize_max() {
+ let mut c = Cursor::new(Vec::new());
+ c.set_position(usize::MAX as u64 + 1);
+ assert!(c.write_all(&[1, 2, 3]).is_err());
+}
+
+#[test]
+fn test_partial_eq() {
+ assert_eq!(Cursor::new(Vec::<u8>::new()), Cursor::new(Vec::<u8>::new()));
+}
+
+#[test]
+fn test_eq() {
+ struct AssertEq<T: Eq>(pub T);
+
+ let _: AssertEq<Cursor<Vec<u8>>> = AssertEq(Cursor::new(Vec::new()));
+}
+
+#[allow(dead_code)]
+fn const_cursor() {
+ const CURSOR: Cursor<&[u8]> = Cursor::new(&[0]);
+ const _: &&[u8] = CURSOR.get_ref();
+ const _: u64 = CURSOR.position();
+}
+
+#[bench]
+fn bench_write_vec(b: &mut test::Bencher) {
+ let slice = &[1; 128];
+
+ b.iter(|| {
+ let mut buf = b"some random data to overwrite".to_vec();
+ let mut cursor = Cursor::new(&mut buf);
+
+ let _ = cursor.write_all(slice);
+ test::black_box(&cursor);
+ })
+}
+
+#[bench]
+fn bench_write_vec_vectored(b: &mut test::Bencher) {
+ let slices = [
+ IoSlice::new(&[1; 128]),
+ IoSlice::new(&[2; 256]),
+ IoSlice::new(&[3; 512]),
+ IoSlice::new(&[4; 1024]),
+ IoSlice::new(&[5; 2048]),
+ IoSlice::new(&[6; 4096]),
+ IoSlice::new(&[7; 8192]),
+ IoSlice::new(&[8; 8192 * 2]),
+ ];
+
+ b.iter(|| {
+ let mut buf = b"some random data to overwrite".to_vec();
+ let mut cursor = Cursor::new(&mut buf);
+
+ let mut slices = slices;
+ let _ = cursor.write_all_vectored(&mut slices);
+ test::black_box(&cursor);
+ })
+}
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
new file mode 100644
index 000000000..ff7fdcae1
--- /dev/null
+++ b/library/std/src/io/error.rs
@@ -0,0 +1,960 @@
+#[cfg(test)]
+mod tests;
+
+#[cfg(target_pointer_width = "64")]
+mod repr_bitpacked;
+#[cfg(target_pointer_width = "64")]
+use repr_bitpacked::Repr;
+
+#[cfg(not(target_pointer_width = "64"))]
+mod repr_unpacked;
+#[cfg(not(target_pointer_width = "64"))]
+use repr_unpacked::Repr;
+
+use crate::convert::From;
+use crate::error;
+use crate::fmt;
+use crate::result;
+use crate::sys;
+
+/// A specialized [`Result`] type for I/O operations.
+///
+/// This type is broadly used across [`std::io`] for any operation which may
+/// produce an error.
+///
+/// This typedef is generally used to avoid writing out [`io::Error`] directly and
+/// is otherwise a direct mapping to [`Result`].
+///
+/// While usual Rust style is to import types directly, aliases of [`Result`]
+/// often are not, to make it easier to distinguish between them. [`Result`] is
+/// generally assumed to be [`std::result::Result`][`Result`], and so users of this alias
+/// will generally use `io::Result` instead of shadowing the [prelude]'s import
+/// of [`std::result::Result`][`Result`].
+///
+/// [`std::io`]: crate::io
+/// [`io::Error`]: Error
+/// [`Result`]: crate::result::Result
+/// [prelude]: crate::prelude
+///
+/// # Examples
+///
+/// A convenience function that bubbles an `io::Result` to its caller:
+///
+/// ```
+/// use std::io;
+///
+/// fn get_string() -> io::Result<String> {
+/// let mut buffer = String::new();
+///
+/// io::stdin().read_line(&mut buffer)?;
+///
+/// Ok(buffer)
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type Result<T> = result::Result<T, Error>;
+
+/// The error type for I/O operations of the [`Read`], [`Write`], [`Seek`], and
+/// associated traits.
+///
+/// Errors mostly originate from the underlying OS, but custom instances of
+/// `Error` can be created with crafted error messages and a particular value of
+/// [`ErrorKind`].
+///
+/// [`Read`]: crate::io::Read
+/// [`Write`]: crate::io::Write
+/// [`Seek`]: crate::io::Seek
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Error {
+ repr: Repr,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.repr, f)
+ }
+}
+
+// Only derive debug in tests, to make sure it
+// doesn't accidentally get printed.
+#[cfg_attr(test, derive(Debug))]
+enum ErrorData<C> {
+ Os(i32),
+ Simple(ErrorKind),
+ SimpleMessage(&'static SimpleMessage),
+ Custom(C),
+}
+
+// `#[repr(align(4))]` is probably redundant, it should have that value or
+// higher already. We include it just because repr_bitpacked.rs's encoding
+// requires an alignment >= 4 (note that `#[repr(align)]` will not reduce the
+// alignment required by the struct, only increase it).
+//
+// If we add more variants to ErrorData, this can be increased to 8, but it
+// should probably be behind `#[cfg_attr(target_pointer_width = "64", ...)]` or
+// whatever cfg we're using to enable the `repr_bitpacked` code, since only the
+// that version needs the alignment, and 8 is higher than the alignment we'll
+// have on 32 bit platforms.
+//
+// (For the sake of being explicit: the alignment requirement here only matters
+// if `error/repr_bitpacked.rs` is in use — for the unpacked repr it doesn't
+// matter at all)
+#[repr(align(4))]
+#[derive(Debug)]
+pub(crate) struct SimpleMessage {
+ kind: ErrorKind,
+ message: &'static str,
+}
+
+impl SimpleMessage {
+ pub(crate) const fn new(kind: ErrorKind, message: &'static str) -> Self {
+ Self { kind, message }
+ }
+}
+
+/// Create and return an `io::Error` for a given `ErrorKind` and constant
+/// message. This doesn't allocate.
+pub(crate) macro const_io_error($kind:expr, $message:expr $(,)?) {
+ $crate::io::error::Error::from_static_message({
+ const MESSAGE_DATA: $crate::io::error::SimpleMessage =
+ $crate::io::error::SimpleMessage::new($kind, $message);
+ &MESSAGE_DATA
+ })
+}
+
+// As with `SimpleMessage`: `#[repr(align(4))]` here is just because
+// repr_bitpacked's encoding requires it. In practice it almost certainly be
+// already be this high or higher.
+#[derive(Debug)]
+#[repr(align(4))]
+struct Custom {
+ kind: ErrorKind,
+ error: Box<dyn error::Error + Send + Sync>,
+}
+
+/// A list specifying general categories of I/O error.
+///
+/// This list is intended to grow over time and it is not recommended to
+/// exhaustively match against it.
+///
+/// It is used with the [`io::Error`] type.
+///
+/// [`io::Error`]: Error
+///
+/// # Handling errors and matching on `ErrorKind`
+///
+/// In application code, use `match` for the `ErrorKind` values you are
+/// expecting; use `_` to match "all other errors".
+///
+/// In comprehensive and thorough tests that want to verify that a test doesn't
+/// return any known incorrect error kind, you may want to cut-and-paste the
+/// current full list of errors from here into your test code, and then match
+/// `_` as the correct case. This seems counterintuitive, but it will make your
+/// tests more robust. In particular, if you want to verify that your code does
+/// produce an unrecognized error kind, the robust solution is to check for all
+/// the recognized error kinds and fail in those cases.
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+#[non_exhaustive]
+pub enum ErrorKind {
+ /// An entity was not found, often a file.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ NotFound,
+ /// The operation lacked the necessary privileges to complete.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ PermissionDenied,
+ /// The connection was refused by the remote server.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ ConnectionRefused,
+ /// The connection was reset by the remote server.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ ConnectionReset,
+ /// The remote host is not reachable.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ HostUnreachable,
+ /// The network containing the remote host is not reachable.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ NetworkUnreachable,
+ /// The connection was aborted (terminated) by the remote server.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ ConnectionAborted,
+ /// The network operation failed because it was not connected yet.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ NotConnected,
+ /// A socket address could not be bound because the address is already in
+ /// use elsewhere.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AddrInUse,
+ /// A nonexistent interface was requested or the requested address was not
+ /// local.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AddrNotAvailable,
+ /// The system's networking is down.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ NetworkDown,
+ /// The operation failed because a pipe was closed.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ BrokenPipe,
+ /// An entity already exists, often a file.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AlreadyExists,
+ /// The operation needs to block to complete, but the blocking operation was
+ /// requested to not occur.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ WouldBlock,
+ /// A filesystem object is, unexpectedly, not a directory.
+ ///
+ /// For example, a filesystem path was specified where one of the intermediate directory
+ /// components was, in fact, a plain file.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ NotADirectory,
+ /// The filesystem object is, unexpectedly, a directory.
+ ///
+ /// A directory was specified when a non-directory was expected.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ IsADirectory,
+ /// A non-empty directory was specified where an empty directory was expected.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ DirectoryNotEmpty,
+ /// The filesystem or storage medium is read-only, but a write operation was attempted.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ ReadOnlyFilesystem,
+ /// Loop in the filesystem or IO subsystem; often, too many levels of symbolic links.
+ ///
+ /// There was a loop (or excessively long chain) resolving a filesystem object
+ /// or file IO object.
+ ///
+ /// On Unix this is usually the result of a symbolic link loop; or, of exceeding the
+ /// system-specific limit on the depth of symlink traversal.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ FilesystemLoop,
+ /// Stale network file handle.
+ ///
+ /// With some network filesystems, notably NFS, an open file (or directory) can be invalidated
+ /// by problems with the network or server.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ StaleNetworkFileHandle,
+ /// A parameter was incorrect.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ InvalidInput,
+ /// Data not valid for the operation were encountered.
+ ///
+ /// Unlike [`InvalidInput`], this typically means that the operation
+ /// parameters were valid, however the error was caused by malformed
+ /// input data.
+ ///
+ /// For example, a function that reads a file into a string will error with
+ /// `InvalidData` if the file's contents are not valid UTF-8.
+ ///
+ /// [`InvalidInput`]: ErrorKind::InvalidInput
+ #[stable(feature = "io_invalid_data", since = "1.2.0")]
+ InvalidData,
+ /// The I/O operation's timeout expired, causing it to be canceled.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ TimedOut,
+ /// An error returned when an operation could not be completed because a
+ /// call to [`write`] returned [`Ok(0)`].
+ ///
+ /// This typically means that an operation could only succeed if it wrote a
+ /// particular number of bytes but only a smaller number of bytes could be
+ /// written.
+ ///
+ /// [`write`]: crate::io::Write::write
+ /// [`Ok(0)`]: Ok
+ #[stable(feature = "rust1", since = "1.0.0")]
+ WriteZero,
+ /// The underlying storage (typically, a filesystem) is full.
+ ///
+ /// This does not include out of quota errors.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ StorageFull,
+ /// Seek on unseekable file.
+ ///
+ /// Seeking was attempted on an open file handle which is not suitable for seeking - for
+ /// example, on Unix, a named pipe opened with `File::open`.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ NotSeekable,
+ /// Filesystem quota was exceeded.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ FilesystemQuotaExceeded,
+ /// File larger than allowed or supported.
+ ///
+ /// This might arise from a hard limit of the underlying filesystem or file access API, or from
+ /// an administratively imposed resource limitation. Simple disk full, and out of quota, have
+ /// their own errors.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ FileTooLarge,
+ /// Resource is busy.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ ResourceBusy,
+ /// Executable file is busy.
+ ///
+ /// An attempt was made to write to a file which is also in use as a running program. (Not all
+ /// operating systems detect this situation.)
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ ExecutableFileBusy,
+ /// Deadlock (avoided).
+ ///
+ /// A file locking operation would result in deadlock. This situation is typically detected, if
+ /// at all, on a best-effort basis.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ Deadlock,
+ /// Cross-device or cross-filesystem (hard) link or rename.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ CrossesDevices,
+ /// Too many (hard) links to the same filesystem object.
+ ///
+ /// The filesystem does not support making so many hardlinks to the same file.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ TooManyLinks,
+ /// A filename was invalid.
+ ///
+ /// This error can also cause if it exceeded the filename length limit.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ InvalidFilename,
+ /// Program argument list too long.
+ ///
+ /// When trying to run an external program, a system or process limit on the size of the
+ /// arguments would have been exceeded.
+ #[unstable(feature = "io_error_more", issue = "86442")]
+ ArgumentListTooLong,
+ /// This operation was interrupted.
+ ///
+ /// Interrupted operations can typically be retried.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Interrupted,
+
+ /// This operation is unsupported on this platform.
+ ///
+ /// This means that the operation can never succeed.
+ #[stable(feature = "unsupported_error", since = "1.53.0")]
+ Unsupported,
+
+ // ErrorKinds which are primarily categorisations for OS error
+ // codes should be added above.
+ //
+ /// An error returned when an operation could not be completed because an
+ /// "end of file" was reached prematurely.
+ ///
+ /// This typically means that an operation could only succeed if it read a
+ /// particular number of bytes but only a smaller number of bytes could be
+ /// read.
+ #[stable(feature = "read_exact", since = "1.6.0")]
+ UnexpectedEof,
+
+ /// An operation could not be completed, because it failed
+ /// to allocate enough memory.
+ #[stable(feature = "out_of_memory_error", since = "1.54.0")]
+ OutOfMemory,
+
+ // "Unusual" error kinds which do not correspond simply to (sets
+ // of) OS error codes, should be added just above this comment.
+ // `Other` and `Uncategorised` should remain at the end:
+ //
+ /// A custom error that does not fall under any other I/O error kind.
+ ///
+ /// This can be used to construct your own [`Error`]s that do not match any
+ /// [`ErrorKind`].
+ ///
+ /// This [`ErrorKind`] is not used by the standard library.
+ ///
+ /// Errors from the standard library that do not fall under any of the I/O
+ /// error kinds cannot be `match`ed on, and will only match a wildcard (`_`) pattern.
+ /// New [`ErrorKind`]s might be added in the future for some of those.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Other,
+
+ /// Any I/O error from the standard library that's not part of this list.
+ ///
+ /// Errors that are `Uncategorized` now may move to a different or a new
+ /// [`ErrorKind`] variant in the future. It is not recommended to match
+ /// an error against `Uncategorized`; use a wildcard match (`_`) instead.
+ #[unstable(feature = "io_error_uncategorized", issue = "none")]
+ #[doc(hidden)]
+ Uncategorized,
+}
+
+impl ErrorKind {
+ pub(crate) fn as_str(&self) -> &'static str {
+ use ErrorKind::*;
+ // Strictly alphabetical, please. (Sadly rustfmt cannot do this yet.)
+ match *self {
+ AddrInUse => "address in use",
+ AddrNotAvailable => "address not available",
+ AlreadyExists => "entity already exists",
+ ArgumentListTooLong => "argument list too long",
+ BrokenPipe => "broken pipe",
+ ConnectionAborted => "connection aborted",
+ ConnectionRefused => "connection refused",
+ ConnectionReset => "connection reset",
+ CrossesDevices => "cross-device link or rename",
+ Deadlock => "deadlock",
+ DirectoryNotEmpty => "directory not empty",
+ ExecutableFileBusy => "executable file busy",
+ FileTooLarge => "file too large",
+ FilesystemLoop => "filesystem loop or indirection limit (e.g. symlink loop)",
+ FilesystemQuotaExceeded => "filesystem quota exceeded",
+ HostUnreachable => "host unreachable",
+ Interrupted => "operation interrupted",
+ InvalidData => "invalid data",
+ InvalidFilename => "invalid filename",
+ InvalidInput => "invalid input parameter",
+ IsADirectory => "is a directory",
+ NetworkDown => "network down",
+ NetworkUnreachable => "network unreachable",
+ NotADirectory => "not a directory",
+ NotConnected => "not connected",
+ NotFound => "entity not found",
+ NotSeekable => "seek on unseekable file",
+ Other => "other error",
+ OutOfMemory => "out of memory",
+ PermissionDenied => "permission denied",
+ ReadOnlyFilesystem => "read-only filesystem or storage medium",
+ ResourceBusy => "resource busy",
+ StaleNetworkFileHandle => "stale network file handle",
+ StorageFull => "no storage space",
+ TimedOut => "timed out",
+ TooManyLinks => "too many links",
+ Uncategorized => "uncategorized error",
+ UnexpectedEof => "unexpected end of file",
+ Unsupported => "unsupported",
+ WouldBlock => "operation would block",
+ WriteZero => "write zero",
+ }
+ }
+}
+
+#[stable(feature = "io_errorkind_display", since = "1.60.0")]
+impl fmt::Display for ErrorKind {
+ /// Shows a human-readable description of the `ErrorKind`.
+ ///
+ /// This is similar to `impl Display for Error`, but doesn't require first converting to Error.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::io::ErrorKind;
+ /// assert_eq!("entity not found", ErrorKind::NotFound.to_string());
+ /// ```
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.write_str(self.as_str())
+ }
+}
+
+/// Intended for use for errors not exposed to the user, where allocating onto
+/// the heap (for normal construction via Error::new) is too costly.
+#[stable(feature = "io_error_from_errorkind", since = "1.14.0")]
+impl From<ErrorKind> for Error {
+ /// Converts an [`ErrorKind`] into an [`Error`].
+ ///
+ /// This conversion creates a new error with a simple representation of error kind.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// let not_found = ErrorKind::NotFound;
+ /// let error = Error::from(not_found);
+ /// assert_eq!("entity not found", format!("{error}"));
+ /// ```
+ #[inline]
+ fn from(kind: ErrorKind) -> Error {
+ Error { repr: Repr::new_simple(kind) }
+ }
+}
+
+impl Error {
+ /// Creates a new I/O error from a known kind of error as well as an
+ /// arbitrary error payload.
+ ///
+ /// This function is used to generically create I/O errors which do not
+ /// originate from the OS itself. The `error` argument is an arbitrary
+ /// payload which will be contained in this [`Error`].
+ ///
+ /// If no extra payload is required, use the `From` conversion from
+ /// `ErrorKind`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// // errors can be created from strings
+ /// let custom_error = Error::new(ErrorKind::Other, "oh no!");
+ ///
+ /// // errors can also be created from other errors
+ /// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error);
+ ///
+ /// // creating an error without payload
+ /// let eof_error = Error::from(ErrorKind::UnexpectedEof);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new<E>(kind: ErrorKind, error: E) -> Error
+ where
+ E: Into<Box<dyn error::Error + Send + Sync>>,
+ {
+ Self::_new(kind, error.into())
+ }
+
+ /// Creates a new I/O error from an arbitrary error payload.
+ ///
+ /// This function is used to generically create I/O errors which do not
+ /// originate from the OS itself. It is a shortcut for [`Error::new`]
+ /// with [`ErrorKind::Other`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_error_other)]
+ ///
+ /// use std::io::Error;
+ ///
+ /// // errors can be created from strings
+ /// let custom_error = Error::other("oh no!");
+ ///
+ /// // errors can also be created from other errors
+ /// let custom_error2 = Error::other(custom_error);
+ /// ```
+ #[unstable(feature = "io_error_other", issue = "91946")]
+ pub fn other<E>(error: E) -> Error
+ where
+ E: Into<Box<dyn error::Error + Send + Sync>>,
+ {
+ Self::_new(ErrorKind::Other, error.into())
+ }
+
+ fn _new(kind: ErrorKind, error: Box<dyn error::Error + Send + Sync>) -> Error {
+ Error { repr: Repr::new_custom(Box::new(Custom { kind, error })) }
+ }
+
+ /// Creates a new I/O error from a known kind of error as well as a constant
+ /// message.
+ ///
+ /// This function does not allocate.
+ ///
+ /// You should not use this directly, and instead use the `const_io_error!`
+ /// macro: `io::const_io_error!(ErrorKind::Something, "some_message")`.
+ ///
+ /// This function should maybe change to `from_static_message<const MSG: &'static
+ /// str>(kind: ErrorKind)` in the future, when const generics allow that.
+ #[inline]
+ pub(crate) const fn from_static_message(msg: &'static SimpleMessage) -> Error {
+ Self { repr: Repr::new_simple_message(msg) }
+ }
+
+ /// Returns an error representing the last OS error which occurred.
+ ///
+ /// This function reads the value of `errno` for the target platform (e.g.
+ /// `GetLastError` on Windows) and will return a corresponding instance of
+ /// [`Error`] for the error code.
+ ///
+ /// This should be called immediately after a call to a platform function,
+ /// otherwise the state of the error value is indeterminate. In particular,
+ /// other standard library functions may call platform functions that may
+ /// (or may not) reset the error value even if they succeed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::Error;
+ ///
+ /// let os_error = Error::last_os_error();
+ /// println!("last OS error: {os_error:?}");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn last_os_error() -> Error {
+ Error::from_raw_os_error(sys::os::errno() as i32)
+ }
+
+ /// Creates a new instance of an [`Error`] from a particular OS error code.
+ ///
+ /// # Examples
+ ///
+ /// On Linux:
+ ///
+ /// ```
+ /// # if cfg!(target_os = "linux") {
+ /// use std::io;
+ ///
+ /// let error = io::Error::from_raw_os_error(22);
+ /// assert_eq!(error.kind(), io::ErrorKind::InvalidInput);
+ /// # }
+ /// ```
+ ///
+ /// On Windows:
+ ///
+ /// ```
+ /// # if cfg!(windows) {
+ /// use std::io;
+ ///
+ /// let error = io::Error::from_raw_os_error(10022);
+ /// assert_eq!(error.kind(), io::ErrorKind::InvalidInput);
+ /// # }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn from_raw_os_error(code: i32) -> Error {
+ Error { repr: Repr::new_os(code) }
+ }
+
+ /// Returns the OS error that this error represents (if any).
+ ///
+ /// If this [`Error`] was constructed via [`last_os_error`] or
+ /// [`from_raw_os_error`], then this function will return [`Some`], otherwise
+ /// it will return [`None`].
+ ///
+ /// [`last_os_error`]: Error::last_os_error
+ /// [`from_raw_os_error`]: Error::from_raw_os_error
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// fn print_os_error(err: &Error) {
+ /// if let Some(raw_os_err) = err.raw_os_error() {
+ /// println!("raw OS error: {raw_os_err:?}");
+ /// } else {
+ /// println!("Not an OS error");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// // Will print "raw OS error: ...".
+ /// print_os_error(&Error::last_os_error());
+ /// // Will print "Not an OS error".
+ /// print_os_error(&Error::new(ErrorKind::Other, "oh no!"));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn raw_os_error(&self) -> Option<i32> {
+ match self.repr.data() {
+ ErrorData::Os(i) => Some(i),
+ ErrorData::Custom(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ }
+ }
+
+ /// Returns a reference to the inner error wrapped by this error (if any).
+ ///
+ /// If this [`Error`] was constructed via [`new`] then this function will
+ /// return [`Some`], otherwise it will return [`None`].
+ ///
+ /// [`new`]: Error::new
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// fn print_error(err: &Error) {
+ /// if let Some(inner_err) = err.get_ref() {
+ /// println!("Inner error: {inner_err:?}");
+ /// } else {
+ /// println!("No inner error");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// // Will print "No inner error".
+ /// print_error(&Error::last_os_error());
+ /// // Will print "Inner error: ...".
+ /// print_error(&Error::new(ErrorKind::Other, "oh no!"));
+ /// }
+ /// ```
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
+ #[must_use]
+ #[inline]
+ pub fn get_ref(&self) -> Option<&(dyn error::Error + Send + Sync + 'static)> {
+ match self.repr.data() {
+ ErrorData::Os(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ ErrorData::Custom(c) => Some(&*c.error),
+ }
+ }
+
+ /// Returns a mutable reference to the inner error wrapped by this error
+ /// (if any).
+ ///
+ /// If this [`Error`] was constructed via [`new`] then this function will
+ /// return [`Some`], otherwise it will return [`None`].
+ ///
+ /// [`new`]: Error::new
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ /// use std::{error, fmt};
+ /// use std::fmt::Display;
+ ///
+ /// #[derive(Debug)]
+ /// struct MyError {
+ /// v: String,
+ /// }
+ ///
+ /// impl MyError {
+ /// fn new() -> MyError {
+ /// MyError {
+ /// v: "oh no!".to_string()
+ /// }
+ /// }
+ ///
+ /// fn change_message(&mut self, new_message: &str) {
+ /// self.v = new_message.to_string();
+ /// }
+ /// }
+ ///
+ /// impl error::Error for MyError {}
+ ///
+ /// impl Display for MyError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "MyError: {}", &self.v)
+ /// }
+ /// }
+ ///
+ /// fn change_error(mut err: Error) -> Error {
+ /// if let Some(inner_err) = err.get_mut() {
+ /// inner_err.downcast_mut::<MyError>().unwrap().change_message("I've been changed!");
+ /// }
+ /// err
+ /// }
+ ///
+ /// fn print_error(err: &Error) {
+ /// if let Some(inner_err) = err.get_ref() {
+ /// println!("Inner error: {inner_err}");
+ /// } else {
+ /// println!("No inner error");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// // Will print "No inner error".
+ /// print_error(&change_error(Error::last_os_error()));
+ /// // Will print "Inner error: ...".
+ /// print_error(&change_error(Error::new(ErrorKind::Other, MyError::new())));
+ /// }
+ /// ```
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
+ #[must_use]
+ #[inline]
+ pub fn get_mut(&mut self) -> Option<&mut (dyn error::Error + Send + Sync + 'static)> {
+ match self.repr.data_mut() {
+ ErrorData::Os(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ ErrorData::Custom(c) => Some(&mut *c.error),
+ }
+ }
+
+ /// Consumes the `Error`, returning its inner error (if any).
+ ///
+ /// If this [`Error`] was constructed via [`new`] then this function will
+ /// return [`Some`], otherwise it will return [`None`].
+ ///
+ /// [`new`]: Error::new
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// fn print_error(err: Error) {
+ /// if let Some(inner_err) = err.into_inner() {
+ /// println!("Inner error: {inner_err}");
+ /// } else {
+ /// println!("No inner error");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// // Will print "No inner error".
+ /// print_error(Error::last_os_error());
+ /// // Will print "Inner error: ...".
+ /// print_error(Error::new(ErrorKind::Other, "oh no!"));
+ /// }
+ /// ```
+ #[stable(feature = "io_error_inner", since = "1.3.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_inner(self) -> Option<Box<dyn error::Error + Send + Sync>> {
+ match self.repr.into_data() {
+ ErrorData::Os(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ ErrorData::Custom(c) => Some(c.error),
+ }
+ }
+
+ /// Attempt to downgrade the inner error to `E` if any.
+ ///
+ /// If this [`Error`] was constructed via [`new`] then this function will
+ /// attempt to perform downgrade on it, otherwise it will return [`Err`].
+ ///
+ /// If downgrade succeeds, it will return [`Ok`], otherwise it will also
+ /// return [`Err`].
+ ///
+ /// [`new`]: Error::new
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_error_downcast)]
+ ///
+ /// use std::fmt;
+ /// use std::io;
+ /// use std::error::Error;
+ ///
+ /// #[derive(Debug)]
+ /// enum E {
+ /// Io(io::Error),
+ /// SomeOtherVariant,
+ /// }
+ ///
+ /// impl fmt::Display for E {
+ /// // ...
+ /// # fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// # todo!()
+ /// # }
+ /// }
+ /// impl Error for E {}
+ ///
+ /// impl From<io::Error> for E {
+ /// fn from(err: io::Error) -> E {
+ /// err.downcast::<E>()
+ /// .map(|b| *b)
+ /// .unwrap_or_else(E::Io)
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "io_error_downcast", issue = "99262")]
+ pub fn downcast<E>(self) -> result::Result<Box<E>, Self>
+ where
+ E: error::Error + Send + Sync + 'static,
+ {
+ match self.repr.into_data() {
+ ErrorData::Custom(b) if b.error.is::<E>() => {
+ let res = (*b).error.downcast::<E>();
+
+ // downcast is a really trivial and is marked as inline, so
+ // it's likely be inlined here.
+ //
+ // And the compiler should be able to eliminate the branch
+ // that produces `Err` here since b.error.is::<E>()
+ // returns true.
+ Ok(res.unwrap())
+ }
+ repr_data => Err(Self { repr: Repr::new(repr_data) }),
+ }
+ }
+
+ /// Returns the corresponding [`ErrorKind`] for this error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{Error, ErrorKind};
+ ///
+ /// fn print_error(err: Error) {
+ /// println!("{:?}", err.kind());
+ /// }
+ ///
+ /// fn main() {
+ /// // Will print "Uncategorized".
+ /// print_error(Error::last_os_error());
+ /// // Will print "AddrInUse".
+ /// print_error(Error::new(ErrorKind::AddrInUse, "oh no!"));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn kind(&self) -> ErrorKind {
+ match self.repr.data() {
+ ErrorData::Os(code) => sys::decode_error_kind(code),
+ ErrorData::Custom(c) => c.kind,
+ ErrorData::Simple(kind) => kind,
+ ErrorData::SimpleMessage(m) => m.kind,
+ }
+ }
+}
+
+impl fmt::Debug for Repr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.data() {
+ ErrorData::Os(code) => fmt
+ .debug_struct("Os")
+ .field("code", &code)
+ .field("kind", &sys::decode_error_kind(code))
+ .field("message", &sys::os::error_string(code))
+ .finish(),
+ ErrorData::Custom(c) => fmt::Debug::fmt(&c, fmt),
+ ErrorData::Simple(kind) => fmt.debug_tuple("Kind").field(&kind).finish(),
+ ErrorData::SimpleMessage(msg) => fmt
+ .debug_struct("Error")
+ .field("kind", &msg.kind)
+ .field("message", &msg.message)
+ .finish(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Error {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.repr.data() {
+ ErrorData::Os(code) => {
+ let detail = sys::os::error_string(code);
+ write!(fmt, "{detail} (os error {code})")
+ }
+ ErrorData::Custom(ref c) => c.error.fmt(fmt),
+ ErrorData::Simple(kind) => write!(fmt, "{}", kind.as_str()),
+ ErrorData::SimpleMessage(msg) => msg.message.fmt(fmt),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl error::Error for Error {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ match self.repr.data() {
+ ErrorData::Os(..) | ErrorData::Simple(..) => self.kind().as_str(),
+ ErrorData::SimpleMessage(msg) => msg.message,
+ ErrorData::Custom(c) => c.error.description(),
+ }
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn error::Error> {
+ match self.repr.data() {
+ ErrorData::Os(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ ErrorData::Custom(c) => c.error.cause(),
+ }
+ }
+
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self.repr.data() {
+ ErrorData::Os(..) => None,
+ ErrorData::Simple(..) => None,
+ ErrorData::SimpleMessage(..) => None,
+ ErrorData::Custom(c) => c.error.source(),
+ }
+ }
+}
+
+fn _assert_error_is_sync_send() {
+ fn _is_sync_send<T: Sync + Send>() {}
+ _is_sync_send::<Error>();
+}
diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs
new file mode 100644
index 000000000..292bf4826
--- /dev/null
+++ b/library/std/src/io/error/repr_bitpacked.rs
@@ -0,0 +1,409 @@
+//! This is a densely packed error representation which is used on targets with
+//! 64-bit pointers.
+//!
+//! (Note that `bitpacked` vs `unpacked` here has no relationship to
+//! `#[repr(packed)]`, it just refers to attempting to use any available bits in
+//! a more clever manner than `rustc`'s default layout algorithm would).
+//!
+//! Conceptually, it stores the same data as the "unpacked" equivalent we use on
+//! other targets. Specifically, you can imagine it as an optimized version of
+//! the following enum (which is roughly equivalent to what's stored by
+//! `repr_unpacked::Repr`, e.g. `super::ErrorData<Box<Custom>>`):
+//!
+//! ```ignore (exposition-only)
+//! enum ErrorData {
+//! Os(i32),
+//! Simple(ErrorKind),
+//! SimpleMessage(&'static SimpleMessage),
+//! Custom(Box<Custom>),
+//! }
+//! ```
+//!
+//! However, it packs this data into a 64bit non-zero value.
+//!
+//! This optimization not only allows `io::Error` to occupy a single pointer,
+//! but improves `io::Result` as well, especially for situations like
+//! `io::Result<()>` (which is now 64 bits) or `io::Result<u64>` (which is now
+//! 128 bits), which are quite common.
+//!
+//! # Layout
+//! Tagged values are 64 bits, with the 2 least significant bits used for the
+//! tag. This means there are there are 4 "variants":
+//!
+//! - **Tag 0b00**: The first variant is equivalent to
+//! `ErrorData::SimpleMessage`, and holds a `&'static SimpleMessage` directly.
+//!
+//! `SimpleMessage` has an alignment >= 4 (which is requested with
+//! `#[repr(align)]` and checked statically at the bottom of this file), which
+//! means every `&'static SimpleMessage` should have the both tag bits as 0,
+//! meaning its tagged and untagged representation are equivalent.
+//!
+//! This means we can skip tagging it, which is necessary as this variant can
+//! be constructed from a `const fn`, which probably cannot tag pointers (or
+//! at least it would be difficult).
+//!
+//! - **Tag 0b01**: The other pointer variant holds the data for
+//! `ErrorData::Custom` and the remaining 62 bits are used to store a
+//! `Box<Custom>`. `Custom` also has alignment >= 4, so the bottom two bits
+//! are free to use for the tag.
+//!
+//! The only important thing to note is that `ptr::wrapping_add` and
+//! `ptr::wrapping_sub` are used to tag the pointer, rather than bitwise
+//! operations. This should preserve the pointer's provenance, which would
+//! otherwise be lost.
+//!
+//! - **Tag 0b10**: Holds the data for `ErrorData::Os(i32)`. We store the `i32`
+//! in the pointer's most significant 32 bits, and don't use the bits `2..32`
+//! for anything. Using the top 32 bits is just to let us easily recover the
+//! `i32` code with the correct sign.
+//!
+//! - **Tag 0b11**: Holds the data for `ErrorData::Simple(ErrorKind)`. This
+//! stores the `ErrorKind` in the top 32 bits as well, although it doesn't
+//! occupy nearly that many. Most of the bits are unused here, but it's not
+//! like we need them for anything else yet.
+//!
+//! # Use of `NonNull<()>`
+//!
+//! Everything is stored in a `NonNull<()>`, which is odd, but actually serves a
+//! purpose.
+//!
+//! Conceptually you might think of this more like:
+//!
+//! ```ignore (exposition-only)
+//! union Repr {
+//! // holds integer (Simple/Os) variants, and
+//! // provides access to the tag bits.
+//! bits: NonZeroU64,
+//! // Tag is 0, so this is stored untagged.
+//! msg: &'static SimpleMessage,
+//! // Tagged (offset) `Box<Custom>` pointer.
+//! tagged_custom: NonNull<()>,
+//! }
+//! ```
+//!
+//! But there are a few problems with this:
+//!
+//! 1. Union access is equivalent to a transmute, so this representation would
+//! require we transmute between integers and pointers in at least one
+//! direction, which may be UB (and even if not, it is likely harder for a
+//! compiler to reason about than explicit ptr->int operations).
+//!
+//! 2. Even if all fields of a union have a niche, the union itself doesn't,
+//! although this may change in the future. This would make things like
+//! `io::Result<()>` and `io::Result<usize>` larger, which defeats part of
+//! the motivation of this bitpacking.
+//!
+//! Storing everything in a `NonZeroUsize` (or some other integer) would be a
+//! bit more traditional for pointer tagging, but it would lose provenance
+//! information, couldn't be constructed from a `const fn`, and would probably
+//! run into other issues as well.
+//!
+//! The `NonNull<()>` seems like the only alternative, even if it's fairly odd
+//! to use a pointer type to store something that may hold an integer, some of
+//! the time.
+
+use super::{Custom, ErrorData, ErrorKind, SimpleMessage};
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::mem::{align_of, size_of};
+use core::ptr::{self, NonNull};
+
+// The 2 least-significant bits are used as tag.
+const TAG_MASK: usize = 0b11;
+const TAG_SIMPLE_MESSAGE: usize = 0b00;
+const TAG_CUSTOM: usize = 0b01;
+const TAG_OS: usize = 0b10;
+const TAG_SIMPLE: usize = 0b11;
+
+/// The internal representation.
+///
+/// See the module docs for more, this is just a way to hack in a check that we
+/// indeed are not unwind-safe.
+///
+/// ```compile_fail,E0277
+/// fn is_unwind_safe<T: core::panic::UnwindSafe>() {}
+/// is_unwind_safe::<std::io::Error>();
+/// ```
+#[repr(transparent)]
+pub(super) struct Repr(NonNull<()>, PhantomData<ErrorData<Box<Custom>>>);
+
+// All the types `Repr` stores internally are Send + Sync, and so is it.
+unsafe impl Send for Repr {}
+unsafe impl Sync for Repr {}
+
+impl Repr {
+ pub(super) fn new(dat: ErrorData<Box<Custom>>) -> Self {
+ match dat {
+ ErrorData::Os(code) => Self::new_os(code),
+ ErrorData::Simple(kind) => Self::new_simple(kind),
+ ErrorData::SimpleMessage(simple_message) => Self::new_simple_message(simple_message),
+ ErrorData::Custom(b) => Self::new_custom(b),
+ }
+ }
+
+ pub(super) fn new_custom(b: Box<Custom>) -> Self {
+ let p = Box::into_raw(b).cast::<u8>();
+ // Should only be possible if an allocator handed out a pointer with
+ // wrong alignment.
+ debug_assert_eq!(p.addr() & TAG_MASK, 0);
+ // Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at
+ // end of file), and both the start and end of the expression must be
+ // valid without address space wraparound due to `Box`'s semantics.
+ //
+ // This means it would be correct to implement this using `ptr::add`
+ // (rather than `ptr::wrapping_add`), but it's unclear this would give
+ // any benefit, so we just use `wrapping_add` instead.
+ let tagged = p.wrapping_add(TAG_CUSTOM).cast::<()>();
+ // Safety: `TAG_CUSTOM + p` is the same as `TAG_CUSTOM | p`,
+ // because `p`'s alignment means it isn't allowed to have any of the
+ // `TAG_BITS` set (you can verify that addition and bitwise-or are the
+ // same when the operands have no bits in common using a truth table).
+ //
+ // Then, `TAG_CUSTOM | p` is not zero, as that would require
+ // `TAG_CUSTOM` and `p` both be zero, and neither is (as `p` came from a
+ // box, and `TAG_CUSTOM` just... isn't zero -- it's `0b01`). Therefore,
+ // `TAG_CUSTOM + p` isn't zero and so `tagged` can't be, and the
+ // `new_unchecked` is safe.
+ let res = Self(unsafe { NonNull::new_unchecked(tagged) }, PhantomData);
+ // quickly smoke-check we encoded the right thing (This generally will
+ // only run in libstd's tests, unless the user uses -Zbuild-std)
+ debug_assert!(matches!(res.data(), ErrorData::Custom(_)), "repr(custom) encoding failed");
+ res
+ }
+
+ #[inline]
+ pub(super) fn new_os(code: i32) -> Self {
+ let utagged = ((code as usize) << 32) | TAG_OS;
+ // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0.
+ let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
+ // quickly smoke-check we encoded the right thing (This generally will
+ // only run in libstd's tests, unless the user uses -Zbuild-std)
+ debug_assert!(
+ matches!(res.data(), ErrorData::Os(c) if c == code),
+ "repr(os) encoding failed for {code}"
+ );
+ res
+ }
+
+ #[inline]
+ pub(super) fn new_simple(kind: ErrorKind) -> Self {
+ let utagged = ((kind as usize) << 32) | TAG_SIMPLE;
+ // Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0.
+ let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
+ // quickly smoke-check we encoded the right thing (This generally will
+ // only run in libstd's tests, unless the user uses -Zbuild-std)
+ debug_assert!(
+ matches!(res.data(), ErrorData::Simple(k) if k == kind),
+ "repr(simple) encoding failed {:?}",
+ kind,
+ );
+ res
+ }
+
+ #[inline]
+ pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
+ // Safety: References are never null.
+ Self(unsafe { NonNull::new_unchecked(m as *const _ as *mut ()) }, PhantomData)
+ }
+
+ #[inline]
+ pub(super) fn data(&self) -> ErrorData<&Custom> {
+ // Safety: We're a Repr, decode_repr is fine.
+ unsafe { decode_repr(self.0, |c| &*c) }
+ }
+
+ #[inline]
+ pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
+ // Safety: We're a Repr, decode_repr is fine.
+ unsafe { decode_repr(self.0, |c| &mut *c) }
+ }
+
+ #[inline]
+ pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
+ let this = core::mem::ManuallyDrop::new(self);
+ // Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
+ // safe because we prevent double-drop using `ManuallyDrop`.
+ unsafe { decode_repr(this.0, |p| Box::from_raw(p)) }
+ }
+}
+
+impl Drop for Repr {
+ #[inline]
+ fn drop(&mut self) {
+ // Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
+ // safe because we're being dropped.
+ unsafe {
+ let _ = decode_repr(self.0, |p| Box::<Custom>::from_raw(p));
+ }
+ }
+}
+
+// Shared helper to decode a `Repr`'s internal pointer into an ErrorData.
+//
+// Safety: `ptr`'s bits should be encoded as described in the document at the
+// top (it should `some_repr.0`)
+#[inline]
+unsafe fn decode_repr<C, F>(ptr: NonNull<()>, make_custom: F) -> ErrorData<C>
+where
+ F: FnOnce(*mut Custom) -> C,
+{
+ let bits = ptr.as_ptr().addr();
+ match bits & TAG_MASK {
+ TAG_OS => {
+ let code = ((bits as i64) >> 32) as i32;
+ ErrorData::Os(code)
+ }
+ TAG_SIMPLE => {
+ let kind_bits = (bits >> 32) as u32;
+ let kind = kind_from_prim(kind_bits).unwrap_or_else(|| {
+ debug_assert!(false, "Invalid io::error::Repr bits: `Repr({:#018x})`", bits);
+ // This means the `ptr` passed in was not valid, which violates
+ // the unsafe contract of `decode_repr`.
+ //
+ // Using this rather than unwrap meaningfully improves the code
+ // for callers which only care about one variant (usually
+ // `Custom`)
+ core::hint::unreachable_unchecked();
+ });
+ ErrorData::Simple(kind)
+ }
+ TAG_SIMPLE_MESSAGE => ErrorData::SimpleMessage(&*ptr.cast::<SimpleMessage>().as_ptr()),
+ TAG_CUSTOM => {
+ // It would be correct for us to use `ptr::sub` here (see the
+ // comment above the `wrapping_add` call in `new_custom` for why),
+ // but it isn't clear that it makes a difference, so we don't.
+ let custom = ptr.as_ptr().cast::<u8>().wrapping_sub(TAG_CUSTOM).cast::<Custom>();
+ ErrorData::Custom(make_custom(custom))
+ }
+ _ => {
+ // Can't happen, and compiler can tell
+ unreachable!();
+ }
+ }
+}
+
+// This compiles to the same code as the check+transmute, but doesn't require
+// unsafe, or to hard-code max ErrorKind or its size in a way the compiler
+// couldn't verify.
+#[inline]
+fn kind_from_prim(ek: u32) -> Option<ErrorKind> {
+ macro_rules! from_prim {
+ ($prim:expr => $Enum:ident { $($Variant:ident),* $(,)? }) => {{
+ // Force a compile error if the list gets out of date.
+ const _: fn(e: $Enum) = |e: $Enum| match e {
+ $($Enum::$Variant => ()),*
+ };
+ match $prim {
+ $(v if v == ($Enum::$Variant as _) => Some($Enum::$Variant),)*
+ _ => None,
+ }
+ }}
+ }
+ from_prim!(ek => ErrorKind {
+ NotFound,
+ PermissionDenied,
+ ConnectionRefused,
+ ConnectionReset,
+ HostUnreachable,
+ NetworkUnreachable,
+ ConnectionAborted,
+ NotConnected,
+ AddrInUse,
+ AddrNotAvailable,
+ NetworkDown,
+ BrokenPipe,
+ AlreadyExists,
+ WouldBlock,
+ NotADirectory,
+ IsADirectory,
+ DirectoryNotEmpty,
+ ReadOnlyFilesystem,
+ FilesystemLoop,
+ StaleNetworkFileHandle,
+ InvalidInput,
+ InvalidData,
+ TimedOut,
+ WriteZero,
+ StorageFull,
+ NotSeekable,
+ FilesystemQuotaExceeded,
+ FileTooLarge,
+ ResourceBusy,
+ ExecutableFileBusy,
+ Deadlock,
+ CrossesDevices,
+ TooManyLinks,
+ InvalidFilename,
+ ArgumentListTooLong,
+ Interrupted,
+ Other,
+ UnexpectedEof,
+ Unsupported,
+ OutOfMemory,
+ Uncategorized,
+ })
+}
+
+// Some static checking to alert us if a change breaks any of the assumptions
+// that our encoding relies on for correctness and soundness. (Some of these are
+// a bit overly thorough/cautious, admittedly)
+//
+// If any of these are hit on a platform that libstd supports, we should likely
+// just use `repr_unpacked.rs` there instead (unless the fix is easy).
+macro_rules! static_assert {
+ ($condition:expr) => {
+ const _: () = assert!($condition);
+ };
+ (@usize_eq: $lhs:expr, $rhs:expr) => {
+ const _: [(); $lhs] = [(); $rhs];
+ };
+}
+
+// The bitpacking we use requires pointers be exactly 64 bits.
+static_assert!(@usize_eq: size_of::<NonNull<()>>(), 8);
+
+// We also require pointers and usize be the same size.
+static_assert!(@usize_eq: size_of::<NonNull<()>>(), size_of::<usize>());
+
+// `Custom` and `SimpleMessage` need to be thin pointers.
+static_assert!(@usize_eq: size_of::<&'static SimpleMessage>(), 8);
+static_assert!(@usize_eq: size_of::<Box<Custom>>(), 8);
+
+static_assert!((TAG_MASK + 1).is_power_of_two());
+// And they must have sufficient alignment.
+static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1);
+static_assert!(align_of::<Custom>() >= TAG_MASK + 1);
+
+static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE_MESSAGE), TAG_SIMPLE_MESSAGE);
+static_assert!(@usize_eq: (TAG_MASK & TAG_CUSTOM), TAG_CUSTOM);
+static_assert!(@usize_eq: (TAG_MASK & TAG_OS), TAG_OS);
+static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE), TAG_SIMPLE);
+
+// This is obviously true (`TAG_CUSTOM` is `0b01`), but in `Repr::new_custom` we
+// offset a pointer by this value, and expect it to both be within the same
+// object, and to not wrap around the address space. See the comment in that
+// function for further details.
+//
+// Actually, at the moment we use `ptr::wrapping_add`, not `ptr::add`, so this
+// check isn't needed for that one, although the assertion that we don't
+// actually wrap around in that wrapping_add does simplify the safety reasoning
+// elsewhere considerably.
+static_assert!(size_of::<Custom>() >= TAG_CUSTOM);
+
+// These two store a payload which is allowed to be zero, so they must be
+// non-zero to preserve the `NonNull`'s range invariant.
+static_assert!(TAG_OS != 0);
+static_assert!(TAG_SIMPLE != 0);
+// We can't tag `SimpleMessage`s, the tag must be 0.
+static_assert!(@usize_eq: TAG_SIMPLE_MESSAGE, 0);
+
+// Check that the point of all of this still holds.
+//
+// We'd check against `io::Error`, but *technically* it's allowed to vary,
+// as it's not `#[repr(transparent)]`/`#[repr(C)]`. We could add that, but
+// the `#[repr()]` would show up in rustdoc, which might be seen as a stable
+// commitment.
+static_assert!(@usize_eq: size_of::<Repr>(), 8);
+static_assert!(@usize_eq: size_of::<Option<Repr>>(), 8);
+static_assert!(@usize_eq: size_of::<Result<(), Repr>>(), 8);
+static_assert!(@usize_eq: size_of::<Result<usize, Repr>>(), 16);
diff --git a/library/std/src/io/error/repr_unpacked.rs b/library/std/src/io/error/repr_unpacked.rs
new file mode 100644
index 000000000..d6ad55b99
--- /dev/null
+++ b/library/std/src/io/error/repr_unpacked.rs
@@ -0,0 +1,54 @@
+//! This is a fairly simple unpacked error representation that's used on
+//! non-64bit targets, where the packed 64 bit representation wouldn't work, and
+//! would have no benefit.
+
+use super::{Custom, ErrorData, ErrorKind, SimpleMessage};
+use alloc::boxed::Box;
+
+type Inner = ErrorData<Box<Custom>>;
+
+pub(super) struct Repr(Inner);
+
+impl Repr {
+ #[inline]
+ pub(super) fn new(dat: ErrorData<Box<Custom>>) -> Self {
+ Self(dat)
+ }
+ pub(super) fn new_custom(b: Box<Custom>) -> Self {
+ Self(Inner::Custom(b))
+ }
+ #[inline]
+ pub(super) fn new_os(code: i32) -> Self {
+ Self(Inner::Os(code))
+ }
+ #[inline]
+ pub(super) fn new_simple(kind: ErrorKind) -> Self {
+ Self(Inner::Simple(kind))
+ }
+ #[inline]
+ pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
+ Self(Inner::SimpleMessage(m))
+ }
+ #[inline]
+ pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
+ self.0
+ }
+ #[inline]
+ pub(super) fn data(&self) -> ErrorData<&Custom> {
+ match &self.0 {
+ Inner::Os(c) => ErrorData::Os(*c),
+ Inner::Simple(k) => ErrorData::Simple(*k),
+ Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
+ Inner::Custom(m) => ErrorData::Custom(&*m),
+ }
+ }
+ #[inline]
+ pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
+ match &mut self.0 {
+ Inner::Os(c) => ErrorData::Os(*c),
+ Inner::Simple(k) => ErrorData::Simple(*k),
+ Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
+ Inner::Custom(m) => ErrorData::Custom(&mut *m),
+ }
+ }
+}
diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs
new file mode 100644
index 000000000..c897a5e87
--- /dev/null
+++ b/library/std/src/io/error/tests.rs
@@ -0,0 +1,194 @@
+use super::{const_io_error, Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage};
+use crate::assert_matches::assert_matches;
+use crate::error;
+use crate::fmt;
+use crate::mem::size_of;
+use crate::sys::decode_error_kind;
+use crate::sys::os::error_string;
+
+#[test]
+fn test_size() {
+ assert!(size_of::<Error>() <= size_of::<[usize; 2]>());
+}
+
+#[test]
+fn test_debug_error() {
+ let code = 6;
+ let msg = error_string(code);
+ let kind = decode_error_kind(code);
+ let err = Error {
+ repr: Repr::new_custom(Box::new(Custom {
+ kind: ErrorKind::InvalidInput,
+ error: Box::new(Error { repr: super::Repr::new_os(code) }),
+ })),
+ };
+ let expected = format!(
+ "Custom {{ \
+ kind: InvalidInput, \
+ error: Os {{ \
+ code: {:?}, \
+ kind: {:?}, \
+ message: {:?} \
+ }} \
+ }}",
+ code, kind, msg
+ );
+ assert_eq!(format!("{err:?}"), expected);
+}
+
+#[test]
+fn test_downcasting() {
+ #[derive(Debug)]
+ struct TestError;
+
+ impl fmt::Display for TestError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("asdf")
+ }
+ }
+
+ impl error::Error for TestError {}
+
+ // we have to call all of these UFCS style right now since method
+ // resolution won't implicitly drop the Send+Sync bounds
+ let mut err = Error::new(ErrorKind::Other, TestError);
+ assert!(err.get_ref().unwrap().is::<TestError>());
+ assert_eq!("asdf", err.get_ref().unwrap().to_string());
+ assert!(err.get_mut().unwrap().is::<TestError>());
+ let extracted = err.into_inner().unwrap();
+ extracted.downcast::<TestError>().unwrap();
+}
+
+#[test]
+fn test_const() {
+ const E: Error = const_io_error!(ErrorKind::NotFound, "hello");
+
+ assert_eq!(E.kind(), ErrorKind::NotFound);
+ assert_eq!(E.to_string(), "hello");
+ assert!(format!("{E:?}").contains("\"hello\""));
+ assert!(format!("{E:?}").contains("NotFound"));
+}
+
+#[test]
+fn test_os_packing() {
+ for code in -20i32..20i32 {
+ let e = Error::from_raw_os_error(code);
+ assert_eq!(e.raw_os_error(), Some(code));
+ assert_matches!(
+ e.repr.data(),
+ ErrorData::Os(c) if c == code,
+ );
+ }
+}
+
+#[test]
+fn test_errorkind_packing() {
+ assert_eq!(Error::from(ErrorKind::NotFound).kind(), ErrorKind::NotFound);
+ assert_eq!(Error::from(ErrorKind::PermissionDenied).kind(), ErrorKind::PermissionDenied);
+ assert_eq!(Error::from(ErrorKind::Uncategorized).kind(), ErrorKind::Uncategorized);
+ // Check that the innards look like like what we want.
+ assert_matches!(
+ Error::from(ErrorKind::OutOfMemory).repr.data(),
+ ErrorData::Simple(ErrorKind::OutOfMemory),
+ );
+}
+
+#[test]
+fn test_simple_message_packing() {
+ use super::{ErrorKind::*, SimpleMessage};
+ macro_rules! check_simple_msg {
+ ($err:expr, $kind:ident, $msg:literal) => {{
+ let e = &$err;
+ // Check that the public api is right.
+ assert_eq!(e.kind(), $kind);
+ assert!(format!("{e:?}").contains($msg));
+ // and we got what we expected
+ assert_matches!(
+ e.repr.data(),
+ ErrorData::SimpleMessage(SimpleMessage { kind: $kind, message: $msg })
+ );
+ }};
+ }
+
+ let not_static = const_io_error!(Uncategorized, "not a constant!");
+ check_simple_msg!(not_static, Uncategorized, "not a constant!");
+
+ const CONST: Error = const_io_error!(NotFound, "definitely a constant!");
+ check_simple_msg!(CONST, NotFound, "definitely a constant!");
+
+ static STATIC: Error = const_io_error!(BrokenPipe, "a constant, sort of!");
+ check_simple_msg!(STATIC, BrokenPipe, "a constant, sort of!");
+}
+
+#[derive(Debug, PartialEq)]
+struct Bojji(bool);
+impl error::Error for Bojji {}
+impl fmt::Display for Bojji {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ah! {:?}", self)
+ }
+}
+
+#[test]
+fn test_custom_error_packing() {
+ use super::Custom;
+ let test = Error::new(ErrorKind::Uncategorized, Bojji(true));
+ assert_matches!(
+ test.repr.data(),
+ ErrorData::Custom(Custom {
+ kind: ErrorKind::Uncategorized,
+ error,
+ }) if error.downcast_ref::<Bojji>().as_deref() == Some(&Bojji(true)),
+ );
+}
+
+#[derive(Debug)]
+struct E;
+
+impl fmt::Display for E {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+impl error::Error for E {}
+
+#[test]
+fn test_std_io_error_downcast() {
+ // Case 1: custom error, downcast succeeds
+ let io_error = Error::new(ErrorKind::Other, Bojji(true));
+ let e: Box<Bojji> = io_error.downcast().unwrap();
+ assert!(e.0);
+
+ // Case 2: custom error, downcast fails
+ let io_error = Error::new(ErrorKind::Other, Bojji(true));
+ let io_error = io_error.downcast::<E>().unwrap_err();
+
+ // ensures that the custom error is intact
+ assert_eq!(ErrorKind::Other, io_error.kind());
+ let e: Box<Bojji> = io_error.downcast().unwrap();
+ assert!(e.0);
+
+ // Case 3: os error
+ let errno = 20;
+ let io_error = Error::from_raw_os_error(errno);
+ let io_error = io_error.downcast::<E>().unwrap_err();
+
+ assert_eq!(errno, io_error.raw_os_error().unwrap());
+
+ // Case 4: simple
+ let kind = ErrorKind::OutOfMemory;
+ let io_error: Error = kind.into();
+ let io_error = io_error.downcast::<E>().unwrap_err();
+
+ assert_eq!(kind, io_error.kind());
+
+ // Case 5: simple message
+ const SIMPLE_MESSAGE: SimpleMessage =
+ SimpleMessage { kind: ErrorKind::Other, message: "simple message error test" };
+ let io_error = Error::from_static_message(&SIMPLE_MESSAGE);
+ let io_error = io_error.downcast::<E>().unwrap_err();
+
+ assert_eq!(SIMPLE_MESSAGE.kind, io_error.kind());
+ assert_eq!(SIMPLE_MESSAGE.message, &*format!("{io_error}"));
+}
diff --git a/library/std/src/io/impls.rs b/library/std/src/io/impls.rs
new file mode 100644
index 000000000..950725473
--- /dev/null
+++ b/library/std/src/io/impls.rs
@@ -0,0 +1,458 @@
+#[cfg(test)]
+mod tests;
+
+use crate::alloc::Allocator;
+use crate::cmp;
+use crate::collections::VecDeque;
+use crate::fmt;
+use crate::io::{
+ self, BufRead, ErrorKind, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, Write,
+};
+use crate::mem;
+
+// =============================================================================
+// Forwarding implementations
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Read + ?Sized> Read for &mut R {
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+
+ #[inline]
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ (**self).read_buf(buf)
+ }
+
+ #[inline]
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ (**self).read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ (**self).is_read_vectored()
+ }
+
+ #[inline]
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ (**self).read_to_end(buf)
+ }
+
+ #[inline]
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ (**self).read_to_string(buf)
+ }
+
+ #[inline]
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ (**self).read_exact(buf)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write + ?Sized> Write for &mut W {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (**self).write(buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (**self).write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ (**self).is_write_vectored()
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ (**self).flush()
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ (**self).write_all(buf)
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ (**self).write_fmt(fmt)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<S: Seek + ?Sized> Seek for &mut S {
+ #[inline]
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ (**self).seek(pos)
+ }
+
+ #[inline]
+ fn stream_position(&mut self) -> io::Result<u64> {
+ (**self).stream_position()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: BufRead + ?Sized> BufRead for &mut B {
+ #[inline]
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ (**self).fill_buf()
+ }
+
+ #[inline]
+ fn consume(&mut self, amt: usize) {
+ (**self).consume(amt)
+ }
+
+ #[inline]
+ fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
+ (**self).read_until(byte, buf)
+ }
+
+ #[inline]
+ fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
+ (**self).read_line(buf)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Read + ?Sized> Read for Box<R> {
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+
+ #[inline]
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ (**self).read_buf(buf)
+ }
+
+ #[inline]
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ (**self).read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ (**self).is_read_vectored()
+ }
+
+ #[inline]
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ (**self).read_to_end(buf)
+ }
+
+ #[inline]
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ (**self).read_to_string(buf)
+ }
+
+ #[inline]
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ (**self).read_exact(buf)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<W: Write + ?Sized> Write for Box<W> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (**self).write(buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (**self).write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ (**self).is_write_vectored()
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ (**self).flush()
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ (**self).write_all(buf)
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ (**self).write_fmt(fmt)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<S: Seek + ?Sized> Seek for Box<S> {
+ #[inline]
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ (**self).seek(pos)
+ }
+
+ #[inline]
+ fn stream_position(&mut self) -> io::Result<u64> {
+ (**self).stream_position()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: BufRead + ?Sized> BufRead for Box<B> {
+ #[inline]
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ (**self).fill_buf()
+ }
+
+ #[inline]
+ fn consume(&mut self, amt: usize) {
+ (**self).consume(amt)
+ }
+
+ #[inline]
+ fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
+ (**self).read_until(byte, buf)
+ }
+
+ #[inline]
+ fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
+ (**self).read_line(buf)
+ }
+}
+
+// =============================================================================
+// In-memory buffer implementations
+
+/// Read is implemented for `&[u8]` by copying from the slice.
+///
+/// Note that reading updates the slice to point to the yet unread part.
+/// The slice will be empty when EOF is reached.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for &[u8] {
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let amt = cmp::min(buf.len(), self.len());
+ let (a, b) = self.split_at(amt);
+
+ // First check if the amount of bytes we want to read is small:
+ // `copy_from_slice` will generally expand to a call to `memcpy`, and
+ // for a single byte the overhead is significant.
+ if amt == 1 {
+ buf[0] = a[0];
+ } else {
+ buf[..amt].copy_from_slice(a);
+ }
+
+ *self = b;
+ Ok(amt)
+ }
+
+ #[inline]
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ let amt = cmp::min(buf.remaining(), self.len());
+ let (a, b) = self.split_at(amt);
+
+ buf.append(a);
+
+ *self = b;
+ Ok(())
+ }
+
+ #[inline]
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let mut nread = 0;
+ for buf in bufs {
+ nread += self.read(buf)?;
+ if self.is_empty() {
+ break;
+ }
+ }
+
+ Ok(nread)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ if buf.len() > self.len() {
+ return Err(io::const_io_error!(
+ ErrorKind::UnexpectedEof,
+ "failed to fill whole buffer"
+ ));
+ }
+ let (a, b) = self.split_at(buf.len());
+
+ // First check if the amount of bytes we want to read is small:
+ // `copy_from_slice` will generally expand to a call to `memcpy`, and
+ // for a single byte the overhead is significant.
+ if buf.len() == 1 {
+ buf[0] = a[0];
+ } else {
+ buf.copy_from_slice(a);
+ }
+
+ *self = b;
+ Ok(())
+ }
+
+ #[inline]
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ buf.extend_from_slice(*self);
+ let len = self.len();
+ *self = &self[len..];
+ Ok(len)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl BufRead for &[u8] {
+ #[inline]
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ Ok(*self)
+ }
+
+ #[inline]
+ fn consume(&mut self, amt: usize) {
+ *self = &self[amt..];
+ }
+}
+
+/// Write is implemented for `&mut [u8]` by copying into the slice, overwriting
+/// its data.
+///
+/// Note that writing updates the slice to point to the yet unwritten part.
+/// The slice will be empty when it has been completely overwritten.
+///
+/// If the number of bytes to be written exceeds the size of the slice, write operations will
+/// return short writes: ultimately, `Ok(0)`; in this situation, `write_all` returns an error of
+/// kind `ErrorKind::WriteZero`.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for &mut [u8] {
+ #[inline]
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ let amt = cmp::min(data.len(), self.len());
+ let (a, b) = mem::replace(self, &mut []).split_at_mut(amt);
+ a.copy_from_slice(&data[..amt]);
+ *self = b;
+ Ok(amt)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let mut nwritten = 0;
+ for buf in bufs {
+ nwritten += self.write(buf)?;
+ if self.is_empty() {
+ break;
+ }
+ }
+
+ Ok(nwritten)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn write_all(&mut self, data: &[u8]) -> io::Result<()> {
+ if self.write(data)? == data.len() {
+ Ok(())
+ } else {
+ Err(io::const_io_error!(ErrorKind::WriteZero, "failed to write whole buffer"))
+ }
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+/// Write is implemented for `Vec<u8>` by appending to the vector.
+/// The vector will grow as needed.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Allocator> Write for Vec<u8, A> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.extend_from_slice(buf);
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let len = bufs.iter().map(|b| b.len()).sum();
+ self.reserve(len);
+ for buf in bufs {
+ self.extend_from_slice(buf);
+ }
+ Ok(len)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.extend_from_slice(buf);
+ Ok(())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+/// Read is implemented for `VecDeque<u8>` by consuming bytes from the front of the `VecDeque`.
+#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
+impl<A: Allocator> Read for VecDeque<u8, A> {
+ /// Fill `buf` with the contents of the "front" slice as returned by
+ /// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
+ /// discontiguous, multiple calls to `read` will be needed to read the entire content.
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let (ref mut front, _) = self.as_slices();
+ let n = Read::read(front, buf)?;
+ self.drain(..n);
+ Ok(n)
+ }
+
+ #[inline]
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ let (ref mut front, _) = self.as_slices();
+ let n = cmp::min(buf.remaining(), front.len());
+ Read::read_buf(front, buf)?;
+ self.drain(..n);
+ Ok(())
+ }
+}
+
+/// Write is implemented for `VecDeque<u8>` by appending to the `VecDeque`, growing it as needed.
+#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
+impl<A: Allocator> Write for VecDeque<u8, A> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.extend(buf);
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.extend(buf);
+ Ok(())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/library/std/src/io/impls/tests.rs b/library/std/src/io/impls/tests.rs
new file mode 100644
index 000000000..d1cd84a67
--- /dev/null
+++ b/library/std/src/io/impls/tests.rs
@@ -0,0 +1,57 @@
+use crate::io::prelude::*;
+
+#[bench]
+fn bench_read_slice(b: &mut test::Bencher) {
+ let buf = [5; 1024];
+ let mut dst = [0; 128];
+
+ b.iter(|| {
+ let mut rd = &buf[..];
+ for _ in 0..8 {
+ let _ = rd.read(&mut dst);
+ test::black_box(&dst);
+ }
+ })
+}
+
+#[bench]
+fn bench_write_slice(b: &mut test::Bencher) {
+ let mut buf = [0; 1024];
+ let src = [5; 128];
+
+ b.iter(|| {
+ let mut wr = &mut buf[..];
+ for _ in 0..8 {
+ let _ = wr.write_all(&src);
+ test::black_box(&wr);
+ }
+ })
+}
+
+#[bench]
+fn bench_read_vec(b: &mut test::Bencher) {
+ let buf = vec![5; 1024];
+ let mut dst = [0; 128];
+
+ b.iter(|| {
+ let mut rd = &buf[..];
+ for _ in 0..8 {
+ let _ = rd.read(&mut dst);
+ test::black_box(&dst);
+ }
+ })
+}
+
+#[bench]
+fn bench_write_vec(b: &mut test::Bencher) {
+ let mut buf = Vec::with_capacity(1024);
+ let src = [5; 128];
+
+ b.iter(|| {
+ let mut wr = &mut buf[..];
+ for _ in 0..8 {
+ let _ = wr.write_all(&src);
+ test::black_box(&wr);
+ }
+ })
+}
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
new file mode 100644
index 000000000..96addbd1a
--- /dev/null
+++ b/library/std/src/io/mod.rs
@@ -0,0 +1,2827 @@
+//! Traits, helpers, and type definitions for core I/O functionality.
+//!
+//! The `std::io` module contains a number of common things you'll need
+//! when doing input and output. The most core part of this module is
+//! the [`Read`] and [`Write`] traits, which provide the
+//! most general interface for reading and writing input and output.
+//!
+//! # Read and Write
+//!
+//! Because they are traits, [`Read`] and [`Write`] are implemented by a number
+//! of other types, and you can implement them for your types too. As such,
+//! you'll see a few different types of I/O throughout the documentation in
+//! this module: [`File`]s, [`TcpStream`]s, and sometimes even [`Vec<T>`]s. For
+//! example, [`Read`] adds a [`read`][`Read::read`] method, which we can use on
+//! [`File`]s:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::fs::File;
+//!
+//! fn main() -> io::Result<()> {
+//! let mut f = File::open("foo.txt")?;
+//! let mut buffer = [0; 10];
+//!
+//! // read up to 10 bytes
+//! let n = f.read(&mut buffer)?;
+//!
+//! println!("The bytes: {:?}", &buffer[..n]);
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`Read`] and [`Write`] are so important, implementors of the two traits have a
+//! nickname: readers and writers. So you'll sometimes see 'a reader' instead
+//! of 'a type that implements the [`Read`] trait'. Much easier!
+//!
+//! ## Seek and BufRead
+//!
+//! Beyond that, there are two important traits that are provided: [`Seek`]
+//! and [`BufRead`]. Both of these build on top of a reader to control
+//! how the reading happens. [`Seek`] lets you control where the next byte is
+//! coming from:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::SeekFrom;
+//! use std::fs::File;
+//!
+//! fn main() -> io::Result<()> {
+//! let mut f = File::open("foo.txt")?;
+//! let mut buffer = [0; 10];
+//!
+//! // skip to the last 10 bytes of the file
+//! f.seek(SeekFrom::End(-10))?;
+//!
+//! // read up to 10 bytes
+//! let n = f.read(&mut buffer)?;
+//!
+//! println!("The bytes: {:?}", &buffer[..n]);
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`BufRead`] uses an internal buffer to provide a number of other ways to read, but
+//! to show it off, we'll need to talk about buffers in general. Keep reading!
+//!
+//! ## BufReader and BufWriter
+//!
+//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be
+//! making near-constant calls to the operating system. To help with this,
+//! `std::io` comes with two structs, [`BufReader`] and [`BufWriter`], which wrap
+//! readers and writers. The wrapper uses a buffer, reducing the number of
+//! calls and providing nicer methods for accessing exactly what you want.
+//!
+//! For example, [`BufReader`] works with the [`BufRead`] trait to add extra
+//! methods to any reader:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufReader;
+//! use std::fs::File;
+//!
+//! fn main() -> io::Result<()> {
+//! let f = File::open("foo.txt")?;
+//! let mut reader = BufReader::new(f);
+//! let mut buffer = String::new();
+//!
+//! // read a line into buffer
+//! reader.read_line(&mut buffer)?;
+//!
+//! println!("{buffer}");
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call
+//! to [`write`][`Write::write`]:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufWriter;
+//! use std::fs::File;
+//!
+//! fn main() -> io::Result<()> {
+//! let f = File::create("foo.txt")?;
+//! {
+//! let mut writer = BufWriter::new(f);
+//!
+//! // write a byte to the buffer
+//! writer.write(&[42])?;
+//!
+//! } // the buffer is flushed once writer goes out of scope
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! ## Standard input and output
+//!
+//! A very common source of input is standard input:
+//!
+//! ```no_run
+//! use std::io;
+//!
+//! fn main() -> io::Result<()> {
+//! let mut input = String::new();
+//!
+//! io::stdin().read_line(&mut input)?;
+//!
+//! println!("You typed: {}", input.trim());
+//! Ok(())
+//! }
+//! ```
+//!
+//! Note that you cannot use the [`?` operator] in functions that do not return
+//! a [`Result<T, E>`][`Result`]. Instead, you can call [`.unwrap()`]
+//! or `match` on the return value to catch any possible errors:
+//!
+//! ```no_run
+//! use std::io;
+//!
+//! let mut input = String::new();
+//!
+//! io::stdin().read_line(&mut input).unwrap();
+//! ```
+//!
+//! And a very common source of output is standard output:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//!
+//! fn main() -> io::Result<()> {
+//! io::stdout().write(&[42])?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! Of course, using [`io::stdout`] directly is less common than something like
+//! [`println!`].
+//!
+//! ## Iterator types
+//!
+//! A large number of the structures provided by `std::io` are for various
+//! ways of iterating over I/O. For example, [`Lines`] is used to split over
+//! lines:
+//!
+//! ```no_run
+//! use std::io;
+//! use std::io::prelude::*;
+//! use std::io::BufReader;
+//! use std::fs::File;
+//!
+//! fn main() -> io::Result<()> {
+//! let f = File::open("foo.txt")?;
+//! let reader = BufReader::new(f);
+//!
+//! for line in reader.lines() {
+//! println!("{}", line?);
+//! }
+//! Ok(())
+//! }
+//! ```
+//!
+//! ## Functions
+//!
+//! There are a number of [functions][functions-list] that offer access to various
+//! features. For example, we can use three of these functions to copy everything
+//! from standard input to standard output:
+//!
+//! ```no_run
+//! use std::io;
+//!
+//! fn main() -> io::Result<()> {
+//! io::copy(&mut io::stdin(), &mut io::stdout())?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! [functions-list]: #functions-1
+//!
+//! ## io::Result
+//!
+//! Last, but certainly not least, is [`io::Result`]. This type is used
+//! as the return type of many `std::io` functions that can cause an error, and
+//! can be returned from your own functions as well. Many of the examples in this
+//! module use the [`?` operator]:
+//!
+//! ```
+//! use std::io;
+//!
+//! fn read_input() -> io::Result<()> {
+//! let mut input = String::new();
+//!
+//! io::stdin().read_line(&mut input)?;
+//!
+//! println!("You typed: {}", input.trim());
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! The return type of `read_input()`, [`io::Result<()>`][`io::Result`], is a very
+//! common type for functions which don't have a 'real' return value, but do want to
+//! return errors if they happen. In this case, the only purpose of this function is
+//! to read the line and print it, so we use `()`.
+//!
+//! ## Platform-specific behavior
+//!
+//! Many I/O functions throughout the standard library are documented to indicate
+//! what various library or syscalls they are delegated to. This is done to help
+//! applications both understand what's happening under the hood as well as investigate
+//! any possibly unclear semantics. Note, however, that this is informative, not a binding
+//! contract. The implementation of many of these functions are subject to change over
+//! time and may call fewer or more syscalls/library functions.
+//!
+//! [`File`]: crate::fs::File
+//! [`TcpStream`]: crate::net::TcpStream
+//! [`io::stdout`]: stdout
+//! [`io::Result`]: self::Result
+//! [`?` operator]: ../../book/appendix-02-operators.html
+//! [`Result`]: crate::result::Result
+//! [`.unwrap()`]: crate::result::Result::unwrap
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::fmt;
+use crate::mem::replace;
+use crate::ops::{Deref, DerefMut};
+use crate::slice;
+use crate::str;
+use crate::sys;
+use crate::sys_common::memchr;
+
+#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
+pub use self::buffered::WriterPanicked;
+#[unstable(feature = "internal_output_capture", issue = "none")]
+#[doc(no_inline, hidden)]
+pub use self::stdio::set_output_capture;
+#[unstable(feature = "print_internals", issue = "none")]
+pub use self::stdio::{_eprint, _print};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::{
+ buffered::{BufReader, BufWriter, IntoInnerError, LineWriter},
+ copy::copy,
+ cursor::Cursor,
+ error::{Error, ErrorKind, Result},
+ stdio::{stderr, stdin, stdout, Stderr, StderrLock, Stdin, StdinLock, Stdout, StdoutLock},
+ util::{empty, repeat, sink, Empty, Repeat, Sink},
+};
+
+#[unstable(feature = "read_buf", issue = "78485")]
+pub use self::readbuf::ReadBuf;
+pub(crate) use error::const_io_error;
+
+mod buffered;
+pub(crate) mod copy;
+mod cursor;
+mod error;
+mod impls;
+pub mod prelude;
+mod readbuf;
+mod stdio;
+mod util;
+
+const DEFAULT_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
+
+pub(crate) use stdio::cleanup;
+
+struct Guard<'a> {
+ buf: &'a mut Vec<u8>,
+ len: usize,
+}
+
+impl Drop for Guard<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ self.buf.set_len(self.len);
+ }
+ }
+}
+
+// Several `read_to_string` and `read_line` methods in the standard library will
+// append data into a `String` buffer, but we need to be pretty careful when
+// doing this. The implementation will just call `.as_mut_vec()` and then
+// delegate to a byte-oriented reading method, but we must ensure that when
+// returning we never leave `buf` in a state such that it contains invalid UTF-8
+// in its bounds.
+//
+// To this end, we use an RAII guard (to protect against panics) which updates
+// the length of the string when it is dropped. This guard initially truncates
+// the string to the prior length and only after we've validated that the
+// new contents are valid UTF-8 do we allow it to set a longer length.
+//
+// The unsafety in this function is twofold:
+//
+// 1. We're looking at the raw bytes of `buf`, so we take on the burden of UTF-8
+// checks.
+// 2. We're passing a raw buffer to the function `f`, and it is expected that
+// the function only *appends* bytes to the buffer. We'll get undefined
+// behavior if existing bytes are overwritten to have non-UTF-8 data.
+pub(crate) unsafe fn append_to_string<F>(buf: &mut String, f: F) -> Result<usize>
+where
+ F: FnOnce(&mut Vec<u8>) -> Result<usize>,
+{
+ let mut g = Guard { len: buf.len(), buf: buf.as_mut_vec() };
+ let ret = f(g.buf);
+ if str::from_utf8(&g.buf[g.len..]).is_err() {
+ ret.and_then(|_| {
+ Err(error::const_io_error!(
+ ErrorKind::InvalidData,
+ "stream did not contain valid UTF-8"
+ ))
+ })
+ } else {
+ g.len = g.buf.len();
+ ret
+ }
+}
+
+// This uses an adaptive system to extend the vector when it fills. We want to
+// avoid paying to allocate and zero a huge chunk of memory if the reader only
+// has 4 bytes while still making large reads if the reader does have a ton
+// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
+// time is 4,500 times (!) slower than a default reservation size of 32 if the
+// reader has a very small amount of data to return.
+pub(crate) fn default_read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<usize> {
+ let start_len = buf.len();
+ let start_cap = buf.capacity();
+
+ let mut initialized = 0; // Extra initialized bytes from previous loop iteration
+ loop {
+ if buf.len() == buf.capacity() {
+ buf.reserve(32); // buf is full, need more space
+ }
+
+ let mut read_buf = ReadBuf::uninit(buf.spare_capacity_mut());
+
+ // SAFETY: These bytes were initialized but not filled in the previous loop
+ unsafe {
+ read_buf.assume_init(initialized);
+ }
+
+ match r.read_buf(&mut read_buf) {
+ Ok(()) => {}
+ Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ }
+
+ if read_buf.filled_len() == 0 {
+ return Ok(buf.len() - start_len);
+ }
+
+ // store how much was initialized but not filled
+ initialized = read_buf.initialized_len() - read_buf.filled_len();
+ let new_len = read_buf.filled_len() + buf.len();
+
+ // SAFETY: ReadBuf's invariants mean this much memory is init
+ unsafe {
+ buf.set_len(new_len);
+ }
+
+ if buf.len() == buf.capacity() && buf.capacity() == start_cap {
+ // The buffer might be an exact fit. Let's read into a probe buffer
+ // and see if it returns `Ok(0)`. If so, we've avoided an
+ // unnecessary doubling of the capacity. But if not, append the
+ // probe buffer to the primary buffer and let its capacity grow.
+ let mut probe = [0u8; 32];
+
+ loop {
+ match r.read(&mut probe) {
+ Ok(0) => return Ok(buf.len() - start_len),
+ Ok(n) => {
+ buf.extend_from_slice(&probe[..n]);
+ break;
+ }
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ }
+ }
+ }
+ }
+}
+
+pub(crate) fn default_read_to_string<R: Read + ?Sized>(
+ r: &mut R,
+ buf: &mut String,
+) -> Result<usize> {
+ // Note that we do *not* call `r.read_to_end()` here. We are passing
+ // `&mut Vec<u8>` (the raw contents of `buf`) into the `read_to_end`
+ // method to fill it up. An arbitrary implementation could overwrite the
+ // entire contents of the vector, not just append to it (which is what
+ // we are expecting).
+ //
+ // To prevent extraneously checking the UTF-8-ness of the entire buffer
+ // we pass it to our hardcoded `default_read_to_end` implementation which
+ // we know is guaranteed to only read data into the end of the buffer.
+ unsafe { append_to_string(buf, |b| default_read_to_end(r, b)) }
+}
+
+pub(crate) fn default_read_vectored<F>(read: F, bufs: &mut [IoSliceMut<'_>]) -> Result<usize>
+where
+ F: FnOnce(&mut [u8]) -> Result<usize>,
+{
+ let buf = bufs.iter_mut().find(|b| !b.is_empty()).map_or(&mut [][..], |b| &mut **b);
+ read(buf)
+}
+
+pub(crate) fn default_write_vectored<F>(write: F, bufs: &[IoSlice<'_>]) -> Result<usize>
+where
+ F: FnOnce(&[u8]) -> Result<usize>,
+{
+ let buf = bufs.iter().find(|b| !b.is_empty()).map_or(&[][..], |b| &**b);
+ write(buf)
+}
+
+pub(crate) fn default_read_exact<R: Read + ?Sized>(this: &mut R, mut buf: &mut [u8]) -> Result<()> {
+ while !buf.is_empty() {
+ match this.read(buf) {
+ Ok(0) => break,
+ Ok(n) => {
+ let tmp = buf;
+ buf = &mut tmp[n..];
+ }
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ if !buf.is_empty() {
+ Err(error::const_io_error!(ErrorKind::UnexpectedEof, "failed to fill whole buffer"))
+ } else {
+ Ok(())
+ }
+}
+
+pub(crate) fn default_read_buf<F>(read: F, buf: &mut ReadBuf<'_>) -> Result<()>
+where
+ F: FnOnce(&mut [u8]) -> Result<usize>,
+{
+ let n = read(buf.initialize_unfilled())?;
+ buf.add_filled(n);
+ Ok(())
+}
+
+/// The `Read` trait allows for reading bytes from a source.
+///
+/// Implementors of the `Read` trait are called 'readers'.
+///
+/// Readers are defined by one required method, [`read()`]. Each call to [`read()`]
+/// will attempt to pull bytes from this source into a provided buffer. A
+/// number of other methods are implemented in terms of [`read()`], giving
+/// implementors a number of ways to read bytes while only needing to implement
+/// a single method.
+///
+/// Readers are intended to be composable with one another. Many implementors
+/// throughout [`std::io`] take and provide types which implement the `Read`
+/// trait.
+///
+/// Please note that each call to [`read()`] may involve a system call, and
+/// therefore, using something that implements [`BufRead`], such as
+/// [`BufReader`], will be more efficient.
+///
+/// # Examples
+///
+/// [`File`]s implement `Read`:
+///
+/// ```no_run
+/// use std::io;
+/// use std::io::prelude::*;
+/// use std::fs::File;
+///
+/// fn main() -> io::Result<()> {
+/// let mut f = File::open("foo.txt")?;
+/// let mut buffer = [0; 10];
+///
+/// // read up to 10 bytes
+/// f.read(&mut buffer)?;
+///
+/// let mut buffer = Vec::new();
+/// // read the whole file
+/// f.read_to_end(&mut buffer)?;
+///
+/// // read into a String, so that you don't need to do the conversion.
+/// let mut buffer = String::new();
+/// f.read_to_string(&mut buffer)?;
+///
+/// // and more! See the other methods for more details.
+/// Ok(())
+/// }
+/// ```
+///
+/// Read from [`&str`] because [`&[u8]`][prim@slice] implements `Read`:
+///
+/// ```no_run
+/// # use std::io;
+/// use std::io::prelude::*;
+///
+/// fn main() -> io::Result<()> {
+/// let mut b = "This string will be read".as_bytes();
+/// let mut buffer = [0; 10];
+///
+/// // read up to 10 bytes
+/// b.read(&mut buffer)?;
+///
+/// // etc... it works exactly as a File does!
+/// Ok(())
+/// }
+/// ```
+///
+/// [`read()`]: Read::read
+/// [`&str`]: prim@str
+/// [`std::io`]: self
+/// [`File`]: crate::fs::File
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(notable_trait)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "IoRead")]
+pub trait Read {
+ /// Pull some bytes from this source into the specified buffer, returning
+ /// how many bytes were read.
+ ///
+ /// This function does not provide any guarantees about whether it blocks
+ /// waiting for data, but if an object needs to block for a read and cannot,
+ /// it will typically signal this via an [`Err`] return value.
+ ///
+ /// If the return value of this method is [`Ok(n)`], then implementations must
+ /// guarantee that `0 <= n <= buf.len()`. A nonzero `n` value indicates
+ /// that the buffer `buf` has been filled in with `n` bytes of data from this
+ /// source. If `n` is `0`, then it can indicate one of two scenarios:
+ ///
+ /// 1. This reader has reached its "end of file" and will likely no longer
+ /// be able to produce bytes. Note that this does not mean that the
+ /// reader will *always* no longer be able to produce bytes. As an example,
+ /// on Linux, this method will call the `recv` syscall for a [`TcpStream`],
+ /// where returning zero indicates the connection was shut down correctly. While
+ /// for [`File`], it is possible to reach the end of file and get zero as result,
+ /// but if more data is appended to the file, future calls to `read` will return
+ /// more data.
+ /// 2. The buffer specified was 0 bytes in length.
+ ///
+ /// It is not an error if the returned value `n` is smaller than the buffer size,
+ /// even when the reader is not at the end of the stream yet.
+ /// This may happen for example because fewer bytes are actually available right now
+ /// (e. g. being close to end-of-file) or because read() was interrupted by a signal.
+ ///
+ /// As this trait is safe to implement, callers cannot rely on `n <= buf.len()` for safety.
+ /// Extra care needs to be taken when `unsafe` functions are used to access the read bytes.
+ /// Callers have to ensure that no unchecked out-of-bounds accesses are possible even if
+ /// `n > buf.len()`.
+ ///
+ /// No guarantees are provided about the contents of `buf` when this
+ /// function is called, implementations cannot rely on any property of the
+ /// contents of `buf` being true. It is recommended that *implementations*
+ /// only write data to `buf` instead of reading its contents.
+ ///
+ /// Correspondingly, however, *callers* of this method must not assume any guarantees
+ /// about how the implementation uses `buf`. The trait is safe to implement,
+ /// so it is possible that the code that's supposed to write to the buffer might also read
+ /// from it. It is your responsibility to make sure that `buf` is initialized
+ /// before calling `read`. Calling `read` with an uninitialized `buf` (of the kind one
+ /// obtains via [`MaybeUninit<T>`]) is not safe, and can lead to undefined behavior.
+ ///
+ /// [`MaybeUninit<T>`]: crate::mem::MaybeUninit
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters any form of I/O or other error, an error
+ /// variant will be returned. If an error is returned then it must be
+ /// guaranteed that no bytes were read.
+ ///
+ /// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the read
+ /// operation should be retried if there is nothing else to do.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`Ok(n)`]: Ok
+ /// [`File`]: crate::fs::File
+ /// [`TcpStream`]: crate::net::TcpStream
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // read up to 10 bytes
+ /// let n = f.read(&mut buffer[..])?;
+ ///
+ /// println!("The bytes: {:?}", &buffer[..n]);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
+
+ /// Like `read`, except that it reads into a slice of buffers.
+ ///
+ /// Data is copied to fill each buffer in order, with the final buffer
+ /// written to possibly being only partially filled. This method must
+ /// behave equivalently to a single call to `read` with concatenated
+ /// buffers.
+ ///
+ /// The default implementation calls `read` with either the first nonempty
+ /// buffer provided, or an empty one if none exists.
+ #[stable(feature = "iovec", since = "1.36.0")]
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> Result<usize> {
+ default_read_vectored(|b| self.read(b), bufs)
+ }
+
+ /// Determines if this `Read`er has an efficient `read_vectored`
+ /// implementation.
+ ///
+ /// If a `Read`er does not override the default `read_vectored`
+ /// implementation, code using it may want to avoid the method all together
+ /// and coalesce writes into a single buffer for higher performance.
+ ///
+ /// The default implementation returns `false`.
+ #[unstable(feature = "can_vector", issue = "69941")]
+ fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ /// Read all bytes until EOF in this source, placing them into `buf`.
+ ///
+ /// All bytes read from this source will be appended to the specified buffer
+ /// `buf`. This function will continuously call [`read()`] to append more data to
+ /// `buf` until [`read()`] returns either [`Ok(0)`] or an error of
+ /// non-[`ErrorKind::Interrupted`] kind.
+ ///
+ /// If successful, this function will return the total number of bytes read.
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters an error of the kind
+ /// [`ErrorKind::Interrupted`] then the error is ignored and the operation
+ /// will continue.
+ ///
+ /// If any other read error is encountered then this function immediately
+ /// returns. Any bytes which have already been read will be appended to
+ /// `buf`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`read()`]: Read::read
+ /// [`Ok(0)`]: Ok
+ /// [`File`]: crate::fs::File
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let mut buffer = Vec::new();
+ ///
+ /// // read the whole file
+ /// f.read_to_end(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// (See also the [`std::fs::read`] convenience function for reading from a
+ /// file.)
+ ///
+ /// [`std::fs::read`]: crate::fs::read
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> {
+ default_read_to_end(self, buf)
+ }
+
+ /// Read all bytes until EOF in this source, appending them to `buf`.
+ ///
+ /// If successful, this function returns the number of bytes which were read
+ /// and appended to `buf`.
+ ///
+ /// # Errors
+ ///
+ /// If the data in this stream is *not* valid UTF-8 then an error is
+ /// returned and `buf` is unchanged.
+ ///
+ /// See [`read_to_end`] for other error semantics.
+ ///
+ /// [`read_to_end`]: Read::read_to_end
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`File`]: crate::fs::File
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let mut buffer = String::new();
+ ///
+ /// f.read_to_string(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// (See also the [`std::fs::read_to_string`] convenience function for
+ /// reading from a file.)
+ ///
+ /// [`std::fs::read_to_string`]: crate::fs::read_to_string
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn read_to_string(&mut self, buf: &mut String) -> Result<usize> {
+ default_read_to_string(self, buf)
+ }
+
+ /// Read the exact number of bytes required to fill `buf`.
+ ///
+ /// This function reads as many bytes as necessary to completely fill the
+ /// specified buffer `buf`.
+ ///
+ /// No guarantees are provided about the contents of `buf` when this
+ /// function is called, implementations cannot rely on any property of the
+ /// contents of `buf` being true. It is recommended that implementations
+ /// only write data to `buf` instead of reading its contents. The
+ /// documentation on [`read`] has a more detailed explanation on this
+ /// subject.
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters an error of the kind
+ /// [`ErrorKind::Interrupted`] then the error is ignored and the operation
+ /// will continue.
+ ///
+ /// If this function encounters an "end of file" before completely filling
+ /// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`].
+ /// The contents of `buf` are unspecified in this case.
+ ///
+ /// If any other read error is encountered then this function immediately
+ /// returns. The contents of `buf` are unspecified in this case.
+ ///
+ /// If this function returns an error, it is unspecified how many bytes it
+ /// has read, but it will never read more than would be necessary to
+ /// completely fill the buffer.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`read`]: Read::read
+ /// [`File`]: crate::fs::File
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // read exactly 10 bytes
+ /// f.read_exact(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "read_exact", since = "1.6.0")]
+ fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
+ default_read_exact(self, buf)
+ }
+
+ /// Pull some bytes from this source into the specified buffer.
+ ///
+ /// This is equivalent to the [`read`](Read::read) method, except that it is passed a [`ReadBuf`] rather than `[u8]` to allow use
+ /// with uninitialized buffers. The new data will be appended to any existing contents of `buf`.
+ ///
+ /// The default implementation delegates to `read`.
+ #[unstable(feature = "read_buf", issue = "78485")]
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
+ default_read_buf(|b| self.read(b), buf)
+ }
+
+ /// Read the exact number of bytes required to fill `buf`.
+ ///
+ /// This is equivalent to the [`read_exact`](Read::read_exact) method, except that it is passed a [`ReadBuf`] rather than `[u8]` to
+ /// allow use with uninitialized buffers.
+ #[unstable(feature = "read_buf", issue = "78485")]
+ fn read_buf_exact(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
+ while buf.remaining() > 0 {
+ let prev_filled = buf.filled().len();
+ match self.read_buf(buf) {
+ Ok(()) => {}
+ Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ }
+
+ if buf.filled().len() == prev_filled {
+ return Err(Error::new(ErrorKind::UnexpectedEof, "failed to fill buffer"));
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Creates a "by reference" adaptor for this instance of `Read`.
+ ///
+ /// The returned adapter also implements `Read` and will simply borrow this
+ /// current reader.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`File`]: crate::fs::File
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::Read;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ /// let mut buffer = Vec::new();
+ /// let mut other_buffer = Vec::new();
+ ///
+ /// {
+ /// let reference = f.by_ref();
+ ///
+ /// // read at most 5 bytes
+ /// reference.take(5).read_to_end(&mut buffer)?;
+ ///
+ /// } // drop our &mut reference so we can use f again
+ ///
+ /// // original file still usable, read the rest
+ /// f.read_to_end(&mut other_buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn by_ref(&mut self) -> &mut Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+
+ /// Transforms this `Read` instance to an [`Iterator`] over its bytes.
+ ///
+ /// The returned type implements [`Iterator`] where the [`Item`] is
+ /// <code>[Result]<[u8], [io::Error]></code>.
+ /// The yielded item is [`Ok`] if a byte was successfully read and [`Err`]
+ /// otherwise. EOF is mapped to returning [`None`] from this iterator.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`Item`]: Iterator::Item
+ /// [`File`]: crate::fs::File "fs::File"
+ /// [Result]: crate::result::Result "Result"
+ /// [io::Error]: self::Error "io::Error"
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let f = File::open("foo.txt")?;
+ ///
+ /// for byte in f.bytes() {
+ /// println!("{}", byte.unwrap());
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bytes(self) -> Bytes<Self>
+ where
+ Self: Sized,
+ {
+ Bytes { inner: self }
+ }
+
+ /// Creates an adapter which will chain this stream with another.
+ ///
+ /// The returned `Read` instance will first read all bytes from this object
+ /// until EOF is encountered. Afterwards the output is equivalent to the
+ /// output of `next`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`File`]: crate::fs::File
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let f1 = File::open("foo.txt")?;
+ /// let f2 = File::open("bar.txt")?;
+ ///
+ /// let mut handle = f1.chain(f2);
+ /// let mut buffer = String::new();
+ ///
+ /// // read the value into a String. We could use any Read method here,
+ /// // this is just one example.
+ /// handle.read_to_string(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn chain<R: Read>(self, next: R) -> Chain<Self, R>
+ where
+ Self: Sized,
+ {
+ Chain { first: self, second: next, done_first: false }
+ }
+
+ /// Creates an adapter which will read at most `limit` bytes from it.
+ ///
+ /// This function returns a new instance of `Read` which will read at most
+ /// `limit` bytes, after which it will always return EOF ([`Ok(0)`]). Any
+ /// read errors will not count towards the number of bytes read and future
+ /// calls to [`read()`] may succeed.
+ ///
+ /// # Examples
+ ///
+ /// [`File`]s implement `Read`:
+ ///
+ /// [`File`]: crate::fs::File
+ /// [`Ok(0)`]: Ok
+ /// [`read()`]: Read::read
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let f = File::open("foo.txt")?;
+ /// let mut buffer = [0; 5];
+ ///
+ /// // read at most five bytes
+ /// let mut handle = f.take(5);
+ ///
+ /// handle.read(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn take(self, limit: u64) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ Take { inner: self, limit }
+ }
+}
+
+/// Read all bytes from a [reader][Read] into a new [`String`].
+///
+/// This is a convenience function for [`Read::read_to_string`]. Using this
+/// function avoids having to create a variable first and provides more type
+/// safety since you can only get the buffer out if there were no errors. (If you
+/// use [`Read::read_to_string`] you have to remember to check whether the read
+/// succeeded because otherwise your buffer will be empty or only partially full.)
+///
+/// # Performance
+///
+/// The downside of this function's increased ease of use and type safety is
+/// that it gives you less control over performance. For example, you can't
+/// pre-allocate memory like you can using [`String::with_capacity`] and
+/// [`Read::read_to_string`]. Also, you can't re-use the buffer if an error
+/// occurs while reading.
+///
+/// In many cases, this function's performance will be adequate and the ease of use
+/// and type safety tradeoffs will be worth it. However, there are cases where you
+/// need more control over performance, and in those cases you should definitely use
+/// [`Read::read_to_string`] directly.
+///
+/// Note that in some special cases, such as when reading files, this function will
+/// pre-allocate memory based on the size of the input it is reading. In those
+/// cases, the performance should be as good as if you had used
+/// [`Read::read_to_string`] with a manually pre-allocated buffer.
+///
+/// # Errors
+///
+/// This function forces you to handle errors because the output (the `String`)
+/// is wrapped in a [`Result`]. See [`Read::read_to_string`] for the errors
+/// that can occur. If any error occurs, you will get an [`Err`], so you
+/// don't have to worry about your buffer being empty or partially full.
+///
+/// # Examples
+///
+/// ```no_run
+/// #![feature(io_read_to_string)]
+///
+/// # use std::io;
+/// fn main() -> io::Result<()> {
+/// let stdin = io::read_to_string(io::stdin())?;
+/// println!("Stdin was:");
+/// println!("{stdin}");
+/// Ok(())
+/// }
+/// ```
+#[unstable(feature = "io_read_to_string", issue = "80218")]
+pub fn read_to_string<R: Read>(mut reader: R) -> Result<String> {
+ let mut buf = String::new();
+ reader.read_to_string(&mut buf)?;
+ Ok(buf)
+}
+
+/// A buffer type used with `Read::read_vectored`.
+///
+/// It is semantically a wrapper around an `&mut [u8]`, but is guaranteed to be
+/// ABI compatible with the `iovec` type on Unix platforms and `WSABUF` on
+/// Windows.
+#[stable(feature = "iovec", since = "1.36.0")]
+#[repr(transparent)]
+pub struct IoSliceMut<'a>(sys::io::IoSliceMut<'a>);
+
+#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+unsafe impl<'a> Send for IoSliceMut<'a> {}
+
+#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+unsafe impl<'a> Sync for IoSliceMut<'a> {}
+
+#[stable(feature = "iovec", since = "1.36.0")]
+impl<'a> fmt::Debug for IoSliceMut<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.0.as_slice(), fmt)
+ }
+}
+
+impl<'a> IoSliceMut<'a> {
+ /// Creates a new `IoSliceMut` wrapping a byte slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics on Windows if the slice is larger than 4GB.
+ #[stable(feature = "iovec", since = "1.36.0")]
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ IoSliceMut(sys::io::IoSliceMut::new(buf))
+ }
+
+ /// Advance the internal cursor of the slice.
+ ///
+ /// Also see [`IoSliceMut::advance_slices`] to advance the cursors of
+ /// multiple buffers.
+ ///
+ /// # Panics
+ ///
+ /// Panics when trying to advance beyond the end of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_slice_advance)]
+ ///
+ /// use std::io::IoSliceMut;
+ /// use std::ops::Deref;
+ ///
+ /// let mut data = [1; 8];
+ /// let mut buf = IoSliceMut::new(&mut data);
+ ///
+ /// // Mark 3 bytes as read.
+ /// buf.advance(3);
+ /// assert_eq!(buf.deref(), [1; 5].as_ref());
+ /// ```
+ #[unstable(feature = "io_slice_advance", issue = "62726")]
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ self.0.advance(n)
+ }
+
+ /// Advance a slice of slices.
+ ///
+ /// Shrinks the slice to remove any `IoSliceMut`s that are fully advanced over.
+ /// If the cursor ends up in the middle of an `IoSliceMut`, it is modified
+ /// to start at that cursor.
+ ///
+ /// For example, if we have a slice of two 8-byte `IoSliceMut`s, and we advance by 10 bytes,
+ /// the result will only include the second `IoSliceMut`, advanced by 2 bytes.
+ ///
+ /// # Panics
+ ///
+ /// Panics when trying to advance beyond the end of the slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_slice_advance)]
+ ///
+ /// use std::io::IoSliceMut;
+ /// use std::ops::Deref;
+ ///
+ /// let mut buf1 = [1; 8];
+ /// let mut buf2 = [2; 16];
+ /// let mut buf3 = [3; 8];
+ /// let mut bufs = &mut [
+ /// IoSliceMut::new(&mut buf1),
+ /// IoSliceMut::new(&mut buf2),
+ /// IoSliceMut::new(&mut buf3),
+ /// ][..];
+ ///
+ /// // Mark 10 bytes as read.
+ /// IoSliceMut::advance_slices(&mut bufs, 10);
+ /// assert_eq!(bufs[0].deref(), [2; 14].as_ref());
+ /// assert_eq!(bufs[1].deref(), [3; 8].as_ref());
+ /// ```
+ #[unstable(feature = "io_slice_advance", issue = "62726")]
+ #[inline]
+ pub fn advance_slices(bufs: &mut &mut [IoSliceMut<'a>], n: usize) {
+ // Number of buffers to remove.
+ let mut remove = 0;
+ // Total length of all the to be removed buffers.
+ let mut accumulated_len = 0;
+ for buf in bufs.iter() {
+ if accumulated_len + buf.len() > n {
+ break;
+ } else {
+ accumulated_len += buf.len();
+ remove += 1;
+ }
+ }
+
+ *bufs = &mut replace(bufs, &mut [])[remove..];
+ if bufs.is_empty() {
+ assert!(n == accumulated_len, "advancing io slices beyond their length");
+ } else {
+ bufs[0].advance(n - accumulated_len)
+ }
+ }
+}
+
+#[stable(feature = "iovec", since = "1.36.0")]
+impl<'a> Deref for IoSliceMut<'a> {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ self.0.as_slice()
+ }
+}
+
+#[stable(feature = "iovec", since = "1.36.0")]
+impl<'a> DerefMut for IoSliceMut<'a> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [u8] {
+ self.0.as_mut_slice()
+ }
+}
+
+/// A buffer type used with `Write::write_vectored`.
+///
+/// It is semantically a wrapper around a `&[u8]`, but is guaranteed to be
+/// ABI compatible with the `iovec` type on Unix platforms and `WSABUF` on
+/// Windows.
+#[stable(feature = "iovec", since = "1.36.0")]
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct IoSlice<'a>(sys::io::IoSlice<'a>);
+
+#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+unsafe impl<'a> Send for IoSlice<'a> {}
+
+#[stable(feature = "iovec-send-sync", since = "1.44.0")]
+unsafe impl<'a> Sync for IoSlice<'a> {}
+
+#[stable(feature = "iovec", since = "1.36.0")]
+impl<'a> fmt::Debug for IoSlice<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.0.as_slice(), fmt)
+ }
+}
+
+impl<'a> IoSlice<'a> {
+ /// Creates a new `IoSlice` wrapping a byte slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics on Windows if the slice is larger than 4GB.
+ #[stable(feature = "iovec", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ IoSlice(sys::io::IoSlice::new(buf))
+ }
+
+ /// Advance the internal cursor of the slice.
+ ///
+ /// Also see [`IoSlice::advance_slices`] to advance the cursors of multiple
+ /// buffers.
+ ///
+ /// # Panics
+ ///
+ /// Panics when trying to advance beyond the end of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_slice_advance)]
+ ///
+ /// use std::io::IoSlice;
+ /// use std::ops::Deref;
+ ///
+ /// let data = [1; 8];
+ /// let mut buf = IoSlice::new(&data);
+ ///
+ /// // Mark 3 bytes as read.
+ /// buf.advance(3);
+ /// assert_eq!(buf.deref(), [1; 5].as_ref());
+ /// ```
+ #[unstable(feature = "io_slice_advance", issue = "62726")]
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ self.0.advance(n)
+ }
+
+ /// Advance a slice of slices.
+ ///
+ /// Shrinks the slice to remove any `IoSlice`s that are fully advanced over.
+ /// If the cursor ends up in the middle of an `IoSlice`, it is modified
+ /// to start at that cursor.
+ ///
+ /// For example, if we have a slice of two 8-byte `IoSlice`s, and we advance by 10 bytes,
+ /// the result will only include the second `IoSlice`, advanced by 2 bytes.
+ ///
+ /// # Panics
+ ///
+ /// Panics when trying to advance beyond the end of the slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(io_slice_advance)]
+ ///
+ /// use std::io::IoSlice;
+ /// use std::ops::Deref;
+ ///
+ /// let buf1 = [1; 8];
+ /// let buf2 = [2; 16];
+ /// let buf3 = [3; 8];
+ /// let mut bufs = &mut [
+ /// IoSlice::new(&buf1),
+ /// IoSlice::new(&buf2),
+ /// IoSlice::new(&buf3),
+ /// ][..];
+ ///
+ /// // Mark 10 bytes as written.
+ /// IoSlice::advance_slices(&mut bufs, 10);
+ /// assert_eq!(bufs[0].deref(), [2; 14].as_ref());
+ /// assert_eq!(bufs[1].deref(), [3; 8].as_ref());
+ #[unstable(feature = "io_slice_advance", issue = "62726")]
+ #[inline]
+ pub fn advance_slices(bufs: &mut &mut [IoSlice<'a>], n: usize) {
+ // Number of buffers to remove.
+ let mut remove = 0;
+ // Total length of all the to be removed buffers.
+ let mut accumulated_len = 0;
+ for buf in bufs.iter() {
+ if accumulated_len + buf.len() > n {
+ break;
+ } else {
+ accumulated_len += buf.len();
+ remove += 1;
+ }
+ }
+
+ *bufs = &mut replace(bufs, &mut [])[remove..];
+ if bufs.is_empty() {
+ assert!(n == accumulated_len, "advancing io slices beyond their length");
+ } else {
+ bufs[0].advance(n - accumulated_len)
+ }
+ }
+}
+
+#[stable(feature = "iovec", since = "1.36.0")]
+impl<'a> Deref for IoSlice<'a> {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ self.0.as_slice()
+ }
+}
+
+/// A trait for objects which are byte-oriented sinks.
+///
+/// Implementors of the `Write` trait are sometimes called 'writers'.
+///
+/// Writers are defined by two required methods, [`write`] and [`flush`]:
+///
+/// * The [`write`] method will attempt to write some data into the object,
+/// returning how many bytes were successfully written.
+///
+/// * The [`flush`] method is useful for adapters and explicit buffers
+/// themselves for ensuring that all buffered data has been pushed out to the
+/// 'true sink'.
+///
+/// Writers are intended to be composable with one another. Many implementors
+/// throughout [`std::io`] take and provide types which implement the `Write`
+/// trait.
+///
+/// [`write`]: Write::write
+/// [`flush`]: Write::flush
+/// [`std::io`]: self
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::fs::File;
+///
+/// fn main() -> std::io::Result<()> {
+/// let data = b"some bytes";
+///
+/// let mut pos = 0;
+/// let mut buffer = File::create("foo.txt")?;
+///
+/// while pos < data.len() {
+/// let bytes_written = buffer.write(&data[pos..])?;
+/// pos += bytes_written;
+/// }
+/// Ok(())
+/// }
+/// ```
+///
+/// The trait also provides convenience methods like [`write_all`], which calls
+/// `write` in a loop until its entire input has been written.
+///
+/// [`write_all`]: Write::write_all
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(notable_trait)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "IoWrite")]
+pub trait Write {
+ /// Write a buffer into this writer, returning how many bytes were written.
+ ///
+ /// This function will attempt to write the entire contents of `buf`, but
+ /// the entire write might not succeed, or the write may also generate an
+ /// error. A call to `write` represents *at most one* attempt to write to
+ /// any wrapped object.
+ ///
+ /// Calls to `write` are not guaranteed to block waiting for data to be
+ /// written, and a write which would otherwise block can be indicated through
+ /// an [`Err`] variant.
+ ///
+ /// If the return value is [`Ok(n)`] then it must be guaranteed that
+ /// `n <= buf.len()`. A return value of `0` typically means that the
+ /// underlying object is no longer able to accept bytes and will likely not
+ /// be able to in the future as well, or that the buffer provided is empty.
+ ///
+ /// # Errors
+ ///
+ /// Each call to `write` may generate an I/O error indicating that the
+ /// operation could not be completed. If an error is returned then no bytes
+ /// in the buffer were written to this writer.
+ ///
+ /// It is **not** considered an error if the entire buffer could not be
+ /// written to this writer.
+ ///
+ /// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the
+ /// write operation should be retried if there is nothing else to do.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// // Writes some prefix of the byte string, not necessarily all of it.
+ /// buffer.write(b"some bytes")?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`Ok(n)`]: Ok
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write(&mut self, buf: &[u8]) -> Result<usize>;
+
+ /// Like [`write`], except that it writes from a slice of buffers.
+ ///
+ /// Data is copied from each buffer in order, with the final buffer
+ /// read from possibly being only partially consumed. This method must
+ /// behave as a call to [`write`] with the buffers concatenated would.
+ ///
+ /// The default implementation calls [`write`] with either the first nonempty
+ /// buffer provided, or an empty one if none exists.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::IoSlice;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let data1 = [1; 8];
+ /// let data2 = [15; 8];
+ /// let io_slice1 = IoSlice::new(&data1);
+ /// let io_slice2 = IoSlice::new(&data2);
+ ///
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// // Writes some prefix of the byte string, not necessarily all of it.
+ /// buffer.write_vectored(&[io_slice1, io_slice2])?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`write`]: Write::write
+ #[stable(feature = "iovec", since = "1.36.0")]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> Result<usize> {
+ default_write_vectored(|b| self.write(b), bufs)
+ }
+
+ /// Determines if this `Write`r has an efficient [`write_vectored`]
+ /// implementation.
+ ///
+ /// If a `Write`r does not override the default [`write_vectored`]
+ /// implementation, code using it may want to avoid the method all together
+ /// and coalesce writes into a single buffer for higher performance.
+ ///
+ /// The default implementation returns `false`.
+ ///
+ /// [`write_vectored`]: Write::write_vectored
+ #[unstable(feature = "can_vector", issue = "69941")]
+ fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ /// Flush this output stream, ensuring that all intermediately buffered
+ /// contents reach their destination.
+ ///
+ /// # Errors
+ ///
+ /// It is considered an error if not all bytes could be written due to
+ /// I/O errors or EOF being reached.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::prelude::*;
+ /// use std::io::BufWriter;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = BufWriter::new(File::create("foo.txt")?);
+ ///
+ /// buffer.write_all(b"some bytes")?;
+ /// buffer.flush()?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn flush(&mut self) -> Result<()>;
+
+ /// Attempts to write an entire buffer into this writer.
+ ///
+ /// This method will continuously call [`write`] until there is no more data
+ /// to be written or an error of non-[`ErrorKind::Interrupted`] kind is
+ /// returned. This method will not return until the entire buffer has been
+ /// successfully written or such an error occurs. The first error that is
+ /// not of [`ErrorKind::Interrupted`] kind generated from this method will be
+ /// returned.
+ ///
+ /// If the buffer contains no data, this will never call [`write`].
+ ///
+ /// # Errors
+ ///
+ /// This function will return the first error of
+ /// non-[`ErrorKind::Interrupted`] kind that [`write`] returns.
+ ///
+ /// [`write`]: Write::write
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// buffer.write_all(b"some bytes")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_all(&mut self, mut buf: &[u8]) -> Result<()> {
+ while !buf.is_empty() {
+ match self.write(buf) {
+ Ok(0) => {
+ return Err(error::const_io_error!(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ ));
+ }
+ Ok(n) => buf = &buf[n..],
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(())
+ }
+
+ /// Attempts to write multiple buffers into this writer.
+ ///
+ /// This method will continuously call [`write_vectored`] until there is no
+ /// more data to be written or an error of non-[`ErrorKind::Interrupted`]
+ /// kind is returned. This method will not return until all buffers have
+ /// been successfully written or such an error occurs. The first error that
+ /// is not of [`ErrorKind::Interrupted`] kind generated from this method
+ /// will be returned.
+ ///
+ /// If the buffer contains no data, this will never call [`write_vectored`].
+ ///
+ /// # Notes
+ ///
+ /// Unlike [`write_vectored`], this takes a *mutable* reference to
+ /// a slice of [`IoSlice`]s, not an immutable one. That's because we need to
+ /// modify the slice to keep track of the bytes already written.
+ ///
+ /// Once this function returns, the contents of `bufs` are unspecified, as
+ /// this depends on how many calls to [`write_vectored`] were necessary. It is
+ /// best to understand this function as taking ownership of `bufs` and to
+ /// not use `bufs` afterwards. The underlying buffers, to which the
+ /// [`IoSlice`]s point (but not the [`IoSlice`]s themselves), are unchanged and
+ /// can be reused.
+ ///
+ /// [`write_vectored`]: Write::write_vectored
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(write_all_vectored)]
+ /// # fn main() -> std::io::Result<()> {
+ ///
+ /// use std::io::{Write, IoSlice};
+ ///
+ /// let mut writer = Vec::new();
+ /// let bufs = &mut [
+ /// IoSlice::new(&[1]),
+ /// IoSlice::new(&[2, 3]),
+ /// IoSlice::new(&[4, 5, 6]),
+ /// ];
+ ///
+ /// writer.write_all_vectored(bufs)?;
+ /// // Note: the contents of `bufs` is now undefined, see the Notes section.
+ ///
+ /// assert_eq!(writer, &[1, 2, 3, 4, 5, 6]);
+ /// # Ok(()) }
+ /// ```
+ #[unstable(feature = "write_all_vectored", issue = "70436")]
+ fn write_all_vectored(&mut self, mut bufs: &mut [IoSlice<'_>]) -> Result<()> {
+ // Guarantee that bufs is empty if it contains no data,
+ // to avoid calling write_vectored if there is no data to be written.
+ IoSlice::advance_slices(&mut bufs, 0);
+ while !bufs.is_empty() {
+ match self.write_vectored(bufs) {
+ Ok(0) => {
+ return Err(error::const_io_error!(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ ));
+ }
+ Ok(n) => IoSlice::advance_slices(&mut bufs, n),
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(())
+ }
+
+ /// Writes a formatted string into this writer, returning any error
+ /// encountered.
+ ///
+ /// This method is primarily used to interface with the
+ /// [`format_args!()`] macro, and it is rare that this should
+ /// explicitly be called. The [`write!()`] macro should be favored to
+ /// invoke this method instead.
+ ///
+ /// This function internally uses the [`write_all`] method on
+ /// this trait and hence will continuously write data so long as no errors
+ /// are received. This also means that partial writes are not indicated in
+ /// this signature.
+ ///
+ /// [`write_all`]: Write::write_all
+ ///
+ /// # Errors
+ ///
+ /// This function will return any I/O error reported while formatting.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// // this call
+ /// write!(buffer, "{:.*}", 2, 1.234567)?;
+ /// // turns into this:
+ /// buffer.write_fmt(format_args!("{:.*}", 2, 1.234567))?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> Result<()> {
+ // Create a shim which translates a Write to a fmt::Write and saves
+ // off I/O errors. instead of discarding them
+ struct Adapter<'a, T: ?Sized + 'a> {
+ inner: &'a mut T,
+ error: Result<()>,
+ }
+
+ impl<T: Write + ?Sized> fmt::Write for Adapter<'_, T> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ match self.inner.write_all(s.as_bytes()) {
+ Ok(()) => Ok(()),
+ Err(e) => {
+ self.error = Err(e);
+ Err(fmt::Error)
+ }
+ }
+ }
+ }
+
+ let mut output = Adapter { inner: self, error: Ok(()) };
+ match fmt::write(&mut output, fmt) {
+ Ok(()) => Ok(()),
+ Err(..) => {
+ // check if the error came from the underlying `Write` or not
+ if output.error.is_err() {
+ output.error
+ } else {
+ Err(error::const_io_error!(ErrorKind::Uncategorized, "formatter error"))
+ }
+ }
+ }
+ }
+
+ /// Creates a "by reference" adapter for this instance of `Write`.
+ ///
+ /// The returned adapter also implements `Write` and will simply borrow this
+ /// current writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::Write;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// let reference = buffer.by_ref();
+ ///
+ /// // we can use reference just like our original buffer
+ /// reference.write_all(b"some bytes")?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn by_ref(&mut self) -> &mut Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+}
+
+/// The `Seek` trait provides a cursor which can be moved within a stream of
+/// bytes.
+///
+/// The stream typically has a fixed size, allowing seeking relative to either
+/// end or the current offset.
+///
+/// # Examples
+///
+/// [`File`]s implement `Seek`:
+///
+/// [`File`]: crate::fs::File
+///
+/// ```no_run
+/// use std::io;
+/// use std::io::prelude::*;
+/// use std::fs::File;
+/// use std::io::SeekFrom;
+///
+/// fn main() -> io::Result<()> {
+/// let mut f = File::open("foo.txt")?;
+///
+/// // move the cursor 42 bytes from the start of the file
+/// f.seek(SeekFrom::Start(42))?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Seek {
+ /// Seek to an offset, in bytes, in a stream.
+ ///
+ /// A seek beyond the end of a stream is allowed, but behavior is defined
+ /// by the implementation.
+ ///
+ /// If the seek operation completed successfully,
+ /// this method returns the new position from the start of the stream.
+ /// That position can be used later with [`SeekFrom::Start`].
+ ///
+ /// # Errors
+ ///
+ /// Seeking can fail, for example because it might involve flushing a buffer.
+ ///
+ /// Seeking to a negative offset is considered an error.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn seek(&mut self, pos: SeekFrom) -> Result<u64>;
+
+ /// Rewind to the beginning of a stream.
+ ///
+ /// This is a convenience method, equivalent to `seek(SeekFrom::Start(0))`.
+ ///
+ /// # Errors
+ ///
+ /// Rewinding can fail, for example because it might involve flushing a buffer.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::io::{Read, Seek, Write};
+ /// use std::fs::OpenOptions;
+ ///
+ /// let mut f = OpenOptions::new()
+ /// .write(true)
+ /// .read(true)
+ /// .create(true)
+ /// .open("foo.txt").unwrap();
+ ///
+ /// let hello = "Hello!\n";
+ /// write!(f, "{hello}").unwrap();
+ /// f.rewind().unwrap();
+ ///
+ /// let mut buf = String::new();
+ /// f.read_to_string(&mut buf).unwrap();
+ /// assert_eq!(&buf, hello);
+ /// ```
+ #[stable(feature = "seek_rewind", since = "1.55.0")]
+ fn rewind(&mut self) -> Result<()> {
+ self.seek(SeekFrom::Start(0))?;
+ Ok(())
+ }
+
+ /// Returns the length of this stream (in bytes).
+ ///
+ /// This method is implemented using up to three seek operations. If this
+ /// method returns successfully, the seek position is unchanged (i.e. the
+ /// position before calling this method is the same as afterwards).
+ /// However, if this method returns an error, the seek position is
+ /// unspecified.
+ ///
+ /// If you need to obtain the length of *many* streams and you don't care
+ /// about the seek position afterwards, you can reduce the number of seek
+ /// operations by simply calling `seek(SeekFrom::End(0))` and using its
+ /// return value (it is also the stream length).
+ ///
+ /// Note that length of a stream can change over time (for example, when
+ /// data is appended to a file). So calling this method multiple times does
+ /// not necessarily return the same length each time.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(seek_stream_len)]
+ /// use std::{
+ /// io::{self, Seek},
+ /// fs::File,
+ /// };
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt")?;
+ ///
+ /// let len = f.stream_len()?;
+ /// println!("The file is currently {len} bytes long");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "seek_stream_len", issue = "59359")]
+ fn stream_len(&mut self) -> Result<u64> {
+ let old_pos = self.stream_position()?;
+ let len = self.seek(SeekFrom::End(0))?;
+
+ // Avoid seeking a third time when we were already at the end of the
+ // stream. The branch is usually way cheaper than a seek operation.
+ if old_pos != len {
+ self.seek(SeekFrom::Start(old_pos))?;
+ }
+
+ Ok(len)
+ }
+
+ /// Returns the current seek position from the start of the stream.
+ ///
+ /// This is equivalent to `self.seek(SeekFrom::Current(0))`.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::{
+ /// io::{self, BufRead, BufReader, Seek},
+ /// fs::File,
+ /// };
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut f = BufReader::new(File::open("foo.txt")?);
+ ///
+ /// let before = f.stream_position()?;
+ /// f.read_line(&mut String::new())?;
+ /// let after = f.stream_position()?;
+ ///
+ /// println!("The first line was {} bytes long", after - before);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "seek_convenience", since = "1.51.0")]
+ fn stream_position(&mut self) -> Result<u64> {
+ self.seek(SeekFrom::Current(0))
+ }
+}
+
+/// Enumeration of possible methods to seek within an I/O object.
+///
+/// It is used by the [`Seek`] trait.
+#[derive(Copy, PartialEq, Eq, Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum SeekFrom {
+ /// Sets the offset to the provided number of bytes.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Start(#[stable(feature = "rust1", since = "1.0.0")] u64),
+
+ /// Sets the offset to the size of this object plus the specified number of
+ /// bytes.
+ ///
+ /// It is possible to seek beyond the end of an object, but it's an error to
+ /// seek before byte 0.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ End(#[stable(feature = "rust1", since = "1.0.0")] i64),
+
+ /// Sets the offset to the current position plus the specified number of
+ /// bytes.
+ ///
+ /// It is possible to seek beyond the end of an object, but it's an error to
+ /// seek before byte 0.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Current(#[stable(feature = "rust1", since = "1.0.0")] i64),
+}
+
+fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> Result<usize> {
+ let mut read = 0;
+ loop {
+ let (done, used) = {
+ let available = match r.fill_buf() {
+ Ok(n) => n,
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ };
+ match memchr::memchr(delim, available) {
+ Some(i) => {
+ buf.extend_from_slice(&available[..=i]);
+ (true, i + 1)
+ }
+ None => {
+ buf.extend_from_slice(available);
+ (false, available.len())
+ }
+ }
+ };
+ r.consume(used);
+ read += used;
+ if done || used == 0 {
+ return Ok(read);
+ }
+ }
+}
+
+/// A `BufRead` is a type of `Read`er which has an internal buffer, allowing it
+/// to perform extra ways of reading.
+///
+/// For example, reading line-by-line is inefficient without using a buffer, so
+/// if you want to read by line, you'll need `BufRead`, which includes a
+/// [`read_line`] method as well as a [`lines`] iterator.
+///
+/// # Examples
+///
+/// A locked standard input implements `BufRead`:
+///
+/// ```no_run
+/// use std::io;
+/// use std::io::prelude::*;
+///
+/// let stdin = io::stdin();
+/// for line in stdin.lock().lines() {
+/// println!("{}", line.unwrap());
+/// }
+/// ```
+///
+/// If you have something that implements [`Read`], you can use the [`BufReader`
+/// type][`BufReader`] to turn it into a `BufRead`.
+///
+/// For example, [`File`] implements [`Read`], but not `BufRead`.
+/// [`BufReader`] to the rescue!
+///
+/// [`File`]: crate::fs::File
+/// [`read_line`]: BufRead::read_line
+/// [`lines`]: BufRead::lines
+///
+/// ```no_run
+/// use std::io::{self, BufReader};
+/// use std::io::prelude::*;
+/// use std::fs::File;
+///
+/// fn main() -> io::Result<()> {
+/// let f = File::open("foo.txt")?;
+/// let f = BufReader::new(f);
+///
+/// for line in f.lines() {
+/// println!("{}", line.unwrap());
+/// }
+///
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait BufRead: Read {
+ /// Returns the contents of the internal buffer, filling it with more data
+ /// from the inner reader if it is empty.
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`consume`] method to function properly. When calling this
+ /// method, none of the contents will be "read" in the sense that later
+ /// calling `read` may return the same contents. As such, [`consume`] must
+ /// be called with the number of bytes that are consumed from this buffer to
+ /// ensure that the bytes are never returned twice.
+ ///
+ /// [`consume`]: BufRead::consume
+ ///
+ /// An empty buffer returned indicates that the stream has reached EOF.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an I/O error if the underlying reader was
+ /// read, but returned an error.
+ ///
+ /// # Examples
+ ///
+ /// A locked standard input implements `BufRead`:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ /// let mut stdin = stdin.lock();
+ ///
+ /// let buffer = stdin.fill_buf().unwrap();
+ ///
+ /// // work with buffer
+ /// println!("{buffer:?}");
+ ///
+ /// // ensure the bytes we worked with aren't returned again later
+ /// let length = buffer.len();
+ /// stdin.consume(length);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fill_buf(&mut self) -> Result<&[u8]>;
+
+ /// Tells this buffer that `amt` bytes have been consumed from the buffer,
+ /// so they should no longer be returned in calls to `read`.
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`fill_buf`] method to function properly. This function does
+ /// not perform any I/O, it simply informs this object that some amount of
+ /// its buffer, returned from [`fill_buf`], has been consumed and should
+ /// no longer be returned. As such, this function may do odd things if
+ /// [`fill_buf`] isn't called before calling it.
+ ///
+ /// The `amt` must be `<=` the number of bytes in the buffer returned by
+ /// [`fill_buf`].
+ ///
+ /// # Examples
+ ///
+ /// Since `consume()` is meant to be used with [`fill_buf`],
+ /// that method's example includes an example of `consume()`.
+ ///
+ /// [`fill_buf`]: BufRead::fill_buf
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn consume(&mut self, amt: usize);
+
+ /// Check if the underlying `Read` has any data left to be read.
+ ///
+ /// This function may fill the buffer to check for data,
+ /// so this functions returns `Result<bool>`, not `bool`.
+ ///
+ /// Default implementation calls `fill_buf` and checks that
+ /// returned slice is empty (which means that there is no data left,
+ /// since EOF is reached).
+ ///
+ /// Examples
+ ///
+ /// ```
+ /// #![feature(buf_read_has_data_left)]
+ /// use std::io;
+ /// use std::io::prelude::*;
+ ///
+ /// let stdin = io::stdin();
+ /// let mut stdin = stdin.lock();
+ ///
+ /// while stdin.has_data_left().unwrap() {
+ /// let mut line = String::new();
+ /// stdin.read_line(&mut line).unwrap();
+ /// // work with line
+ /// println!("{line:?}");
+ /// }
+ /// ```
+ #[unstable(feature = "buf_read_has_data_left", reason = "recently added", issue = "86423")]
+ fn has_data_left(&mut self) -> Result<bool> {
+ self.fill_buf().map(|b| !b.is_empty())
+ }
+
+ /// Read all bytes into `buf` until the delimiter `byte` or EOF is reached.
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// delimiter or EOF is found. Once found, all bytes up to, and including,
+ /// the delimiter (if found) will be appended to `buf`.
+ ///
+ /// If successful, this function will return the total number of bytes read.
+ ///
+ /// This function is blocking and should be used carefully: it is possible for
+ /// an attacker to continuously send bytes without ever sending the delimiter
+ /// or EOF.
+ ///
+ /// # Errors
+ ///
+ /// This function will ignore all instances of [`ErrorKind::Interrupted`] and
+ /// will otherwise return any errors returned by [`fill_buf`].
+ ///
+ /// If an I/O error is encountered then all bytes read so far will be
+ /// present in `buf` and its length will have been adjusted appropriately.
+ ///
+ /// [`fill_buf`]: BufRead::fill_buf
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to read all the bytes in a byte slice
+ /// in hyphen delimited segments:
+ ///
+ /// ```
+ /// use std::io::{self, BufRead};
+ ///
+ /// let mut cursor = io::Cursor::new(b"lorem-ipsum");
+ /// let mut buf = vec![];
+ ///
+ /// // cursor is at 'l'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 6);
+ /// assert_eq!(buf, b"lorem-");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'i'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 5);
+ /// assert_eq!(buf, b"ipsum");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, b"");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize> {
+ read_until(self, byte, buf)
+ }
+
+ /// Read all bytes until a newline (the `0xA` byte) is reached, and append
+ /// them to the provided buffer. You do not need to clear the buffer before
+ /// appending.
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// newline delimiter (the `0xA` byte) or EOF is found. Once found, all bytes
+ /// up to, and including, the delimiter (if found) will be appended to
+ /// `buf`.
+ ///
+ /// If successful, this function will return the total number of bytes read.
+ ///
+ /// If this function returns [`Ok(0)`], the stream has reached EOF.
+ ///
+ /// This function is blocking and should be used carefully: it is possible for
+ /// an attacker to continuously send bytes without ever sending a newline
+ /// or EOF.
+ ///
+ /// [`Ok(0)`]: Ok
+ ///
+ /// # Errors
+ ///
+ /// This function has the same error semantics as [`read_until`] and will
+ /// also return an error if the read bytes are not valid UTF-8. If an I/O
+ /// error is encountered then `buf` may contain some bytes already read in
+ /// the event that all data read so far was valid UTF-8.
+ ///
+ /// [`read_until`]: BufRead::read_until
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to read all the lines in a byte slice:
+ ///
+ /// ```
+ /// use std::io::{self, BufRead};
+ ///
+ /// let mut cursor = io::Cursor::new(b"foo\nbar");
+ /// let mut buf = String::new();
+ ///
+ /// // cursor is at 'f'
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 4);
+ /// assert_eq!(buf, "foo\n");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'b'
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 3);
+ /// assert_eq!(buf, "bar");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, "");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn read_line(&mut self, buf: &mut String) -> Result<usize> {
+ // Note that we are not calling the `.read_until` method here, but
+ // rather our hardcoded implementation. For more details as to why, see
+ // the comments in `read_to_end`.
+ unsafe { append_to_string(buf, |b| read_until(self, b'\n', b)) }
+ }
+
+ /// Returns an iterator over the contents of this reader split on the byte
+ /// `byte`.
+ ///
+ /// The iterator returned from this function will return instances of
+ /// <code>[io::Result]<[Vec]\<u8>></code>. Each vector returned will *not* have
+ /// the delimiter byte at the end.
+ ///
+ /// This function will yield errors whenever [`read_until`] would have
+ /// also yielded an error.
+ ///
+ /// [io::Result]: self::Result "io::Result"
+ /// [`read_until`]: BufRead::read_until
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to iterate over all hyphen delimited
+ /// segments in a byte slice
+ ///
+ /// ```
+ /// use std::io::{self, BufRead};
+ ///
+ /// let cursor = io::Cursor::new(b"lorem-ipsum-dolor");
+ ///
+ /// let mut split_iter = cursor.split(b'-').map(|l| l.unwrap());
+ /// assert_eq!(split_iter.next(), Some(b"lorem".to_vec()));
+ /// assert_eq!(split_iter.next(), Some(b"ipsum".to_vec()));
+ /// assert_eq!(split_iter.next(), Some(b"dolor".to_vec()));
+ /// assert_eq!(split_iter.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn split(self, byte: u8) -> Split<Self>
+ where
+ Self: Sized,
+ {
+ Split { buf: self, delim: byte }
+ }
+
+ /// Returns an iterator over the lines of this reader.
+ ///
+ /// The iterator returned from this function will yield instances of
+ /// <code>[io::Result]<[String]></code>. Each string returned will *not* have a newline
+ /// byte (the `0xA` byte) or `CRLF` (`0xD`, `0xA` bytes) at the end.
+ ///
+ /// [io::Result]: self::Result "io::Result"
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to iterate over all the lines in a byte
+ /// slice.
+ ///
+ /// ```
+ /// use std::io::{self, BufRead};
+ ///
+ /// let cursor = io::Cursor::new(b"lorem\nipsum\r\ndolor");
+ ///
+ /// let mut lines_iter = cursor.lines().map(|l| l.unwrap());
+ /// assert_eq!(lines_iter.next(), Some(String::from("lorem")));
+ /// assert_eq!(lines_iter.next(), Some(String::from("ipsum")));
+ /// assert_eq!(lines_iter.next(), Some(String::from("dolor")));
+ /// assert_eq!(lines_iter.next(), None);
+ /// ```
+ ///
+ /// # Errors
+ ///
+ /// Each line of the iterator has the same error semantics as [`BufRead::read_line`].
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn lines(self) -> Lines<Self>
+ where
+ Self: Sized,
+ {
+ Lines { buf: self }
+ }
+}
+
+/// Adapter to chain together two readers.
+///
+/// This struct is generally created by calling [`chain`] on a reader.
+/// Please see the documentation of [`chain`] for more details.
+///
+/// [`chain`]: Read::chain
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Chain<T, U> {
+ first: T,
+ second: U,
+ done_first: bool,
+}
+
+impl<T, U> Chain<T, U> {
+ /// Consumes the `Chain`, returning the wrapped readers.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.into_inner();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "more_io_inner_methods", since = "1.20.0")]
+ pub fn into_inner(self) -> (T, U) {
+ (self.first, self.second)
+ }
+
+ /// Gets references to the underlying readers in this `Chain`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.get_ref();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "more_io_inner_methods", since = "1.20.0")]
+ pub fn get_ref(&self) -> (&T, &U) {
+ (&self.first, &self.second)
+ }
+
+ /// Gets mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let mut chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.get_mut();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "more_io_inner_methods", since = "1.20.0")]
+ pub fn get_mut(&mut self) -> (&mut T, &mut U) {
+ (&mut self.first, &mut self.second)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Read, U: Read> Read for Chain<T, U> {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ if !self.done_first {
+ match self.first.read(buf)? {
+ 0 if !buf.is_empty() => self.done_first = true,
+ n => return Ok(n),
+ }
+ }
+ self.second.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> Result<usize> {
+ if !self.done_first {
+ match self.first.read_vectored(bufs)? {
+ 0 if bufs.iter().any(|b| !b.is_empty()) => self.done_first = true,
+ n => return Ok(n),
+ }
+ }
+ self.second.read_vectored(bufs)
+ }
+}
+
+#[stable(feature = "chain_bufread", since = "1.9.0")]
+impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> {
+ fn fill_buf(&mut self) -> Result<&[u8]> {
+ if !self.done_first {
+ match self.first.fill_buf()? {
+ buf if buf.is_empty() => {
+ self.done_first = true;
+ }
+ buf => return Ok(buf),
+ }
+ }
+ self.second.fill_buf()
+ }
+
+ fn consume(&mut self, amt: usize) {
+ if !self.done_first { self.first.consume(amt) } else { self.second.consume(amt) }
+ }
+}
+
+impl<T, U> SizeHint for Chain<T, U> {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ SizeHint::lower_bound(&self.first) + SizeHint::lower_bound(&self.second)
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ match (SizeHint::upper_bound(&self.first), SizeHint::upper_bound(&self.second)) {
+ (Some(first), Some(second)) => first.checked_add(second),
+ _ => None,
+ }
+ }
+}
+
+/// Reader adapter which limits the bytes read from an underlying reader.
+///
+/// This struct is generally created by calling [`take`] on a reader.
+/// Please see the documentation of [`take`] for more details.
+///
+/// [`take`]: Read::take
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Take<T> {
+ inner: T,
+ limit: u64,
+}
+
+impl<T> Take<T> {
+ /// Returns the number of bytes that can be read before this instance will
+ /// return EOF.
+ ///
+ /// # Note
+ ///
+ /// This instance may reach `EOF` after reading fewer bytes than indicated by
+ /// this method if the underlying [`Read`] instance reaches EOF.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let f = File::open("foo.txt")?;
+ ///
+ /// // read at most five bytes
+ /// let handle = f.take(5);
+ ///
+ /// println!("limit: {}", handle.limit());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn limit(&self) -> u64 {
+ self.limit
+ }
+
+ /// Sets the number of bytes that can be read before this instance will
+ /// return EOF. This is the same as constructing a new `Take` instance, so
+ /// the amount of bytes read and the previous limit value don't matter when
+ /// calling this method.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let f = File::open("foo.txt")?;
+ ///
+ /// // read at most five bytes
+ /// let mut handle = f.take(5);
+ /// handle.set_limit(10);
+ ///
+ /// assert_eq!(handle.limit(), 10);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "take_set_limit", since = "1.27.0")]
+ pub fn set_limit(&mut self, limit: u64) {
+ self.limit = limit;
+ }
+
+ /// Consumes the `Take`, returning the wrapped reader.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ ///
+ /// let mut buffer = [0; 5];
+ /// let mut handle = file.take(5);
+ /// handle.read(&mut buffer)?;
+ ///
+ /// let file = handle.into_inner();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "io_take_into_inner", since = "1.15.0")]
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Gets a reference to the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ ///
+ /// let mut buffer = [0; 5];
+ /// let mut handle = file.take(5);
+ /// handle.read(&mut buffer)?;
+ ///
+ /// let file = handle.get_ref();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "more_io_inner_methods", since = "1.20.0")]
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying reader.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying reader as doing so may corrupt the internal limit of this
+ /// `Take`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ ///
+ /// let mut buffer = [0; 5];
+ /// let mut handle = file.take(5);
+ /// handle.read(&mut buffer)?;
+ ///
+ /// let file = handle.get_mut();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "more_io_inner_methods", since = "1.20.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Read> Read for Take<T> {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ // Don't call into inner reader at all at EOF because it may still block
+ if self.limit == 0 {
+ return Ok(0);
+ }
+
+ let max = cmp::min(buf.len() as u64, self.limit) as usize;
+ let n = self.inner.read(&mut buf[..max])?;
+ assert!(n as u64 <= self.limit, "number of read bytes exceeds limit");
+ self.limit -= n as u64;
+ Ok(n)
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> Result<()> {
+ // Don't call into inner reader at all at EOF because it may still block
+ if self.limit == 0 {
+ return Ok(());
+ }
+
+ let prev_filled = buf.filled_len();
+
+ if self.limit <= buf.remaining() as u64 {
+ // if we just use an as cast to convert, limit may wrap around on a 32 bit target
+ let limit = cmp::min(self.limit, usize::MAX as u64) as usize;
+
+ let extra_init = cmp::min(limit as usize, buf.initialized_len() - buf.filled_len());
+
+ // SAFETY: no uninit data is written to ibuf
+ let ibuf = unsafe { &mut buf.unfilled_mut()[..limit] };
+
+ let mut sliced_buf = ReadBuf::uninit(ibuf);
+
+ // SAFETY: extra_init bytes of ibuf are known to be initialized
+ unsafe {
+ sliced_buf.assume_init(extra_init);
+ }
+
+ self.inner.read_buf(&mut sliced_buf)?;
+
+ let new_init = sliced_buf.initialized_len();
+ let filled = sliced_buf.filled_len();
+
+ // sliced_buf / ibuf must drop here
+
+ // SAFETY: new_init bytes of buf's unfilled buffer have been initialized
+ unsafe {
+ buf.assume_init(new_init);
+ }
+
+ buf.add_filled(filled);
+
+ self.limit -= filled as u64;
+ } else {
+ self.inner.read_buf(buf)?;
+
+ //inner may unfill
+ self.limit -= buf.filled_len().saturating_sub(prev_filled) as u64;
+ }
+
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: BufRead> BufRead for Take<T> {
+ fn fill_buf(&mut self) -> Result<&[u8]> {
+ // Don't call into inner reader at all at EOF because it may still block
+ if self.limit == 0 {
+ return Ok(&[]);
+ }
+
+ let buf = self.inner.fill_buf()?;
+ let cap = cmp::min(buf.len() as u64, self.limit) as usize;
+ Ok(&buf[..cap])
+ }
+
+ fn consume(&mut self, amt: usize) {
+ // Don't let callers reset the limit by passing an overlarge value
+ let amt = cmp::min(amt as u64, self.limit) as usize;
+ self.limit -= amt as u64;
+ self.inner.consume(amt);
+ }
+}
+
+impl<T> SizeHint for Take<T> {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ cmp::min(SizeHint::lower_bound(&self.inner) as u64, self.limit) as usize
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ match SizeHint::upper_bound(&self.inner) {
+ Some(upper_bound) => Some(cmp::min(upper_bound as u64, self.limit) as usize),
+ None => self.limit.try_into().ok(),
+ }
+ }
+}
+
+/// An iterator over `u8` values of a reader.
+///
+/// This struct is generally created by calling [`bytes`] on a reader.
+/// Please see the documentation of [`bytes`] for more details.
+///
+/// [`bytes`]: Read::bytes
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Bytes<R> {
+ inner: R,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<R: Read> Iterator for Bytes<R> {
+ type Item = Result<u8>;
+
+ fn next(&mut self) -> Option<Result<u8>> {
+ let mut byte = 0;
+ loop {
+ return match self.inner.read(slice::from_mut(&mut byte)) {
+ Ok(0) => None,
+ Ok(..) => Some(Ok(byte)),
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => Some(Err(e)),
+ };
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ SizeHint::size_hint(&self.inner)
+ }
+}
+
+trait SizeHint {
+ fn lower_bound(&self) -> usize;
+
+ fn upper_bound(&self) -> Option<usize>;
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.lower_bound(), self.upper_bound())
+ }
+}
+
+impl<T> SizeHint for T {
+ #[inline]
+ default fn lower_bound(&self) -> usize {
+ 0
+ }
+
+ #[inline]
+ default fn upper_bound(&self) -> Option<usize> {
+ None
+ }
+}
+
+impl<T> SizeHint for &mut T {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ SizeHint::lower_bound(*self)
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ SizeHint::upper_bound(*self)
+ }
+}
+
+impl<T> SizeHint for Box<T> {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ SizeHint::lower_bound(&**self)
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ SizeHint::upper_bound(&**self)
+ }
+}
+
+impl SizeHint for &[u8] {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+/// An iterator over the contents of an instance of `BufRead` split on a
+/// particular byte.
+///
+/// This struct is generally created by calling [`split`] on a `BufRead`.
+/// Please see the documentation of [`split`] for more details.
+///
+/// [`split`]: BufRead::split
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Split<B> {
+ buf: B,
+ delim: u8,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: BufRead> Iterator for Split<B> {
+ type Item = Result<Vec<u8>>;
+
+ fn next(&mut self) -> Option<Result<Vec<u8>>> {
+ let mut buf = Vec::new();
+ match self.buf.read_until(self.delim, &mut buf) {
+ Ok(0) => None,
+ Ok(_n) => {
+ if buf[buf.len() - 1] == self.delim {
+ buf.pop();
+ }
+ Some(Ok(buf))
+ }
+ Err(e) => Some(Err(e)),
+ }
+ }
+}
+
+/// An iterator over the lines of an instance of `BufRead`.
+///
+/// This struct is generally created by calling [`lines`] on a `BufRead`.
+/// Please see the documentation of [`lines`] for more details.
+///
+/// [`lines`]: BufRead::lines
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Lines<B> {
+ buf: B,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: BufRead> Iterator for Lines<B> {
+ type Item = Result<String>;
+
+ fn next(&mut self) -> Option<Result<String>> {
+ let mut buf = String::new();
+ match self.buf.read_line(&mut buf) {
+ Ok(0) => None,
+ Ok(_n) => {
+ if buf.ends_with('\n') {
+ buf.pop();
+ if buf.ends_with('\r') {
+ buf.pop();
+ }
+ }
+ Some(Ok(buf))
+ }
+ Err(e) => Some(Err(e)),
+ }
+ }
+}
diff --git a/library/std/src/io/prelude.rs b/library/std/src/io/prelude.rs
new file mode 100644
index 000000000..d80643101
--- /dev/null
+++ b/library/std/src/io/prelude.rs
@@ -0,0 +1,14 @@
+//! The I/O Prelude.
+//!
+//! The purpose of this module is to alleviate imports of many common I/O traits
+//! by adding a glob import to the top of I/O heavy modules:
+//!
+//! ```
+//! # #![allow(unused_imports)]
+//! use std::io::prelude::*;
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use super::{BufRead, Read, Seek, Write};
diff --git a/library/std/src/io/readbuf.rs b/library/std/src/io/readbuf.rs
new file mode 100644
index 000000000..78d1113f8
--- /dev/null
+++ b/library/std/src/io/readbuf.rs
@@ -0,0 +1,249 @@
+#![unstable(feature = "read_buf", issue = "78485")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::fmt::{self, Debug, Formatter};
+use crate::mem::MaybeUninit;
+
+/// A wrapper around a byte buffer that is incrementally filled and initialized.
+///
+/// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the
+/// buffer that has been logically filled with data, a region that has been initialized at some point but not yet
+/// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a
+/// subset of the initialized region.
+///
+/// In summary, the contents of the buffer can be visualized as:
+/// ```not_rust
+/// [ capacity ]
+/// [ filled | unfilled ]
+/// [ initialized | uninitialized ]
+/// ```
+pub struct ReadBuf<'a> {
+ buf: &'a mut [MaybeUninit<u8>],
+ filled: usize,
+ initialized: usize,
+}
+
+impl Debug for ReadBuf<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ReadBuf")
+ .field("init", &self.initialized())
+ .field("filled", &self.filled)
+ .field("capacity", &self.capacity())
+ .finish()
+ }
+}
+
+impl<'a> ReadBuf<'a> {
+ /// Creates a new `ReadBuf` from a fully initialized buffer.
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> {
+ let len = buf.len();
+
+ ReadBuf {
+ //SAFETY: initialized data never becoming uninitialized is an invariant of ReadBuf
+ buf: unsafe { (buf as *mut [u8]).as_uninit_slice_mut().unwrap() },
+ filled: 0,
+ initialized: len,
+ }
+ }
+
+ /// Creates a new `ReadBuf` from a fully uninitialized buffer.
+ ///
+ /// Use `assume_init` if part of the buffer is known to be already initialized.
+ #[inline]
+ pub fn uninit(buf: &'a mut [MaybeUninit<u8>]) -> ReadBuf<'a> {
+ ReadBuf { buf, filled: 0, initialized: 0 }
+ }
+
+ /// Returns the total capacity of the buffer.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.buf.len()
+ }
+
+ /// Returns a shared reference to the filled portion of the buffer.
+ #[inline]
+ pub fn filled(&self) -> &[u8] {
+ //SAFETY: We only slice the filled part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
+ }
+
+ /// Returns a mutable reference to the filled portion of the buffer.
+ #[inline]
+ pub fn filled_mut(&mut self) -> &mut [u8] {
+ //SAFETY: We only slice the filled part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[0..self.filled]) }
+ }
+
+ /// Returns a shared reference to the initialized portion of the buffer.
+ ///
+ /// This includes the filled portion.
+ #[inline]
+ pub fn initialized(&self) -> &[u8] {
+ //SAFETY: We only slice the initialized part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.initialized]) }
+ }
+
+ /// Returns a mutable reference to the initialized portion of the buffer.
+ ///
+ /// This includes the filled portion.
+ #[inline]
+ pub fn initialized_mut(&mut self) -> &mut [u8] {
+ //SAFETY: We only slice the initialized part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[0..self.initialized]) }
+ }
+
+ /// Returns a mutable reference to the unfilled part of the buffer without ensuring that it has been fully
+ /// initialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller must not de-initialize portions of the buffer that have already been initialized.
+ #[inline]
+ pub unsafe fn unfilled_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf[self.filled..]
+ }
+
+ /// Returns a mutable reference to the uninitialized part of the buffer.
+ ///
+ /// It is safe to uninitialize any of these bytes.
+ #[inline]
+ pub fn uninitialized_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf[self.initialized..]
+ }
+
+ /// Returns a mutable reference to the unfilled part of the buffer, ensuring it is fully initialized.
+ ///
+ /// Since `ReadBuf` tracks the region of the buffer that has been initialized, this is effectively "free" after
+ /// the first use.
+ #[inline]
+ pub fn initialize_unfilled(&mut self) -> &mut [u8] {
+ // should optimize out the assertion
+ self.initialize_unfilled_to(self.remaining())
+ }
+
+ /// Returns a mutable reference to the first `n` bytes of the unfilled part of the buffer, ensuring it is
+ /// fully initialized.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `self.remaining()` is less than `n`.
+ #[inline]
+ pub fn initialize_unfilled_to(&mut self, n: usize) -> &mut [u8] {
+ assert!(self.remaining() >= n);
+
+ let extra_init = self.initialized - self.filled;
+ // If we don't have enough initialized, do zeroing
+ if n > extra_init {
+ let uninit = n - extra_init;
+ let unfilled = &mut self.uninitialized_mut()[0..uninit];
+
+ for byte in unfilled.iter_mut() {
+ byte.write(0);
+ }
+
+ // SAFETY: we just initialized uninit bytes, and the previous bytes were already init
+ unsafe {
+ self.assume_init(n);
+ }
+ }
+
+ let filled = self.filled;
+
+ &mut self.initialized_mut()[filled..filled + n]
+ }
+
+ /// Returns the number of bytes at the end of the slice that have not yet been filled.
+ #[inline]
+ pub fn remaining(&self) -> usize {
+ self.capacity() - self.filled
+ }
+
+ /// Clears the buffer, resetting the filled region to empty.
+ ///
+ /// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
+ #[inline]
+ pub fn clear(&mut self) -> &mut Self {
+ self.set_filled(0) // The assertion in `set_filled` is optimized out
+ }
+
+ /// Increases the size of the filled region of the buffer.
+ ///
+ /// The number of initialized bytes is not changed.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the filled region of the buffer would become larger than the initialized region.
+ #[inline]
+ pub fn add_filled(&mut self, n: usize) -> &mut Self {
+ self.set_filled(self.filled + n)
+ }
+
+ /// Sets the size of the filled region of the buffer.
+ ///
+ /// The number of initialized bytes is not changed.
+ ///
+ /// Note that this can be used to *shrink* the filled region of the buffer in addition to growing it (for
+ /// example, by a `Read` implementation that compresses data in-place).
+ ///
+ /// # Panics
+ ///
+ /// Panics if the filled region of the buffer would become larger than the initialized region.
+ #[inline]
+ pub fn set_filled(&mut self, n: usize) -> &mut Self {
+ assert!(n <= self.initialized);
+
+ self.filled = n;
+ self
+ }
+
+ /// Asserts that the first `n` unfilled bytes of the buffer are initialized.
+ ///
+ /// `ReadBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
+ /// bytes than are already known to be initialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized.
+ #[inline]
+ pub unsafe fn assume_init(&mut self, n: usize) -> &mut Self {
+ self.initialized = cmp::max(self.initialized, self.filled + n);
+ self
+ }
+
+ /// Appends data to the buffer, advancing the written position and possibly also the initialized position.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `self.remaining()` is less than `buf.len()`.
+ #[inline]
+ pub fn append(&mut self, buf: &[u8]) {
+ assert!(self.remaining() >= buf.len());
+
+ // SAFETY: we do not de-initialize any of the elements of the slice
+ unsafe {
+ MaybeUninit::write_slice(&mut self.unfilled_mut()[..buf.len()], buf);
+ }
+
+ // SAFETY: We just added the entire contents of buf to the filled section.
+ unsafe {
+ self.assume_init(buf.len());
+ }
+ self.add_filled(buf.len());
+ }
+
+ /// Returns the amount of bytes that have been filled.
+ #[inline]
+ pub fn filled_len(&self) -> usize {
+ self.filled
+ }
+
+ /// Returns the amount of bytes that have been initialized.
+ #[inline]
+ pub fn initialized_len(&self) -> usize {
+ self.initialized
+ }
+}
diff --git a/library/std/src/io/readbuf/tests.rs b/library/std/src/io/readbuf/tests.rs
new file mode 100644
index 000000000..3b7a5a56d
--- /dev/null
+++ b/library/std/src/io/readbuf/tests.rs
@@ -0,0 +1,181 @@
+use super::ReadBuf;
+use crate::mem::MaybeUninit;
+
+/// Test that ReadBuf has the correct numbers when created with new
+#[test]
+fn new() {
+ let mut buf = [0; 16];
+ let rbuf = ReadBuf::new(&mut buf);
+
+ assert_eq!(rbuf.filled_len(), 0);
+ assert_eq!(rbuf.initialized_len(), 16);
+ assert_eq!(rbuf.capacity(), 16);
+ assert_eq!(rbuf.remaining(), 16);
+}
+
+/// Test that ReadBuf has the correct numbers when created with uninit
+#[test]
+fn uninit() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let rbuf = ReadBuf::uninit(&mut buf);
+
+ assert_eq!(rbuf.filled_len(), 0);
+ assert_eq!(rbuf.initialized_len(), 0);
+ assert_eq!(rbuf.capacity(), 16);
+ assert_eq!(rbuf.remaining(), 16);
+}
+
+#[test]
+fn initialize_unfilled() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ rbuf.initialize_unfilled();
+
+ assert_eq!(rbuf.initialized_len(), 16);
+}
+
+#[test]
+fn initialize_unfilled_to() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ rbuf.initialize_unfilled_to(8);
+
+ assert_eq!(rbuf.initialized_len(), 8);
+
+ rbuf.initialize_unfilled_to(4);
+
+ assert_eq!(rbuf.initialized_len(), 8);
+
+ rbuf.set_filled(8);
+
+ rbuf.initialize_unfilled_to(6);
+
+ assert_eq!(rbuf.initialized_len(), 14);
+
+ rbuf.initialize_unfilled_to(8);
+
+ assert_eq!(rbuf.initialized_len(), 16);
+}
+
+#[test]
+fn add_filled() {
+ let mut buf = [0; 16];
+ let mut rbuf = ReadBuf::new(&mut buf);
+
+ rbuf.add_filled(1);
+
+ assert_eq!(rbuf.filled_len(), 1);
+ assert_eq!(rbuf.remaining(), 15);
+}
+
+#[test]
+#[should_panic]
+fn add_filled_panic() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ rbuf.add_filled(1);
+}
+
+#[test]
+fn set_filled() {
+ let mut buf = [0; 16];
+ let mut rbuf = ReadBuf::new(&mut buf);
+
+ rbuf.set_filled(16);
+
+ assert_eq!(rbuf.filled_len(), 16);
+ assert_eq!(rbuf.remaining(), 0);
+
+ rbuf.set_filled(6);
+
+ assert_eq!(rbuf.filled_len(), 6);
+ assert_eq!(rbuf.remaining(), 10);
+}
+
+#[test]
+#[should_panic]
+fn set_filled_panic() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ rbuf.set_filled(16);
+}
+
+#[test]
+fn clear() {
+ let mut buf = [255; 16];
+ let mut rbuf = ReadBuf::new(&mut buf);
+
+ rbuf.set_filled(16);
+
+ assert_eq!(rbuf.filled_len(), 16);
+ assert_eq!(rbuf.remaining(), 0);
+
+ rbuf.clear();
+
+ assert_eq!(rbuf.filled_len(), 0);
+ assert_eq!(rbuf.remaining(), 16);
+
+ assert_eq!(rbuf.initialized(), [255; 16]);
+}
+
+#[test]
+fn assume_init() {
+ let mut buf = [MaybeUninit::uninit(); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ unsafe {
+ rbuf.assume_init(8);
+ }
+
+ assert_eq!(rbuf.initialized_len(), 8);
+
+ rbuf.add_filled(4);
+
+ unsafe {
+ rbuf.assume_init(2);
+ }
+
+ assert_eq!(rbuf.initialized_len(), 8);
+
+ unsafe {
+ rbuf.assume_init(8);
+ }
+
+ assert_eq!(rbuf.initialized_len(), 12);
+}
+
+#[test]
+fn append() {
+ let mut buf = [MaybeUninit::new(255); 16];
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ rbuf.append(&[0; 8]);
+
+ assert_eq!(rbuf.initialized_len(), 8);
+ assert_eq!(rbuf.filled_len(), 8);
+ assert_eq!(rbuf.filled(), [0; 8]);
+
+ rbuf.clear();
+
+ rbuf.append(&[1; 16]);
+
+ assert_eq!(rbuf.initialized_len(), 16);
+ assert_eq!(rbuf.filled_len(), 16);
+ assert_eq!(rbuf.filled(), [1; 16]);
+}
+
+#[test]
+fn filled_mut() {
+ let mut buf = [0; 16];
+ let mut rbuf = ReadBuf::new(&mut buf);
+
+ rbuf.add_filled(8);
+
+ let filled = rbuf.filled().to_vec();
+
+ assert_eq!(&*filled, &*rbuf.filled_mut());
+}
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
new file mode 100644
index 000000000..4d3736f79
--- /dev/null
+++ b/library/std/src/io/stdio.rs
@@ -0,0 +1,1042 @@
+#![cfg_attr(test, allow(unused))]
+
+#[cfg(test)]
+mod tests;
+
+use crate::io::prelude::*;
+
+use crate::cell::{Cell, RefCell};
+use crate::fmt;
+use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
+use crate::pin::Pin;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::{Arc, Mutex, MutexGuard, OnceLock};
+use crate::sys::stdio;
+use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
+
+type LocalStream = Arc<Mutex<Vec<u8>>>;
+
+thread_local! {
+ /// Used by the test crate to capture the output of the print macros and panics.
+ static OUTPUT_CAPTURE: Cell<Option<LocalStream>> = {
+ Cell::new(None)
+ }
+}
+
+/// Flag to indicate OUTPUT_CAPTURE is used.
+///
+/// If it is None and was never set on any thread, this flag is set to false,
+/// and OUTPUT_CAPTURE can be safely ignored on all threads, saving some time
+/// and memory registering an unused thread local.
+///
+/// Note about memory ordering: This contains information about whether a
+/// thread local variable might be in use. Although this is a global flag, the
+/// memory ordering between threads does not matter: we only want this flag to
+/// have a consistent order between set_output_capture and print_to *within
+/// the same thread*. Within the same thread, things always have a perfectly
+/// consistent order. So Ordering::Relaxed is fine.
+static OUTPUT_CAPTURE_USED: AtomicBool = AtomicBool::new(false);
+
+/// A handle to a raw instance of the standard input stream of this process.
+///
+/// This handle is not synchronized or buffered in any fashion. Constructed via
+/// the `std::io::stdio::stdin_raw` function.
+struct StdinRaw(stdio::Stdin);
+
+/// A handle to a raw instance of the standard output stream of this process.
+///
+/// This handle is not synchronized or buffered in any fashion. Constructed via
+/// the `std::io::stdio::stdout_raw` function.
+struct StdoutRaw(stdio::Stdout);
+
+/// A handle to a raw instance of the standard output stream of this process.
+///
+/// This handle is not synchronized or buffered in any fashion. Constructed via
+/// the `std::io::stdio::stderr_raw` function.
+struct StderrRaw(stdio::Stderr);
+
+/// Constructs a new raw handle to the standard input of this process.
+///
+/// The returned handle does not interact with any other handles created nor
+/// handles returned by `std::io::stdin`. Data buffered by the `std::io::stdin`
+/// handles is **not** available to raw handles returned from this function.
+///
+/// The returned handle has no external synchronization or buffering.
+#[unstable(feature = "libstd_sys_internals", issue = "none")]
+const fn stdin_raw() -> StdinRaw {
+ StdinRaw(stdio::Stdin::new())
+}
+
+/// Constructs a new raw handle to the standard output stream of this process.
+///
+/// The returned handle does not interact with any other handles created nor
+/// handles returned by `std::io::stdout`. Note that data is buffered by the
+/// `std::io::stdout` handles so writes which happen via this raw handle may
+/// appear before previous writes.
+///
+/// The returned handle has no external synchronization or buffering layered on
+/// top.
+#[unstable(feature = "libstd_sys_internals", issue = "none")]
+const fn stdout_raw() -> StdoutRaw {
+ StdoutRaw(stdio::Stdout::new())
+}
+
+/// Constructs a new raw handle to the standard error stream of this process.
+///
+/// The returned handle does not interact with any other handles created nor
+/// handles returned by `std::io::stderr`.
+///
+/// The returned handle has no external synchronization or buffering layered on
+/// top.
+#[unstable(feature = "libstd_sys_internals", issue = "none")]
+const fn stderr_raw() -> StderrRaw {
+ StderrRaw(stdio::Stderr::new())
+}
+
+impl Read for StdinRaw {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ handle_ebadf(self.0.read(buf), 0)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ handle_ebadf(self.0.read_vectored(bufs), 0)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ handle_ebadf(self.0.read_to_end(buf), 0)
+ }
+
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ handle_ebadf(self.0.read_to_string(buf), 0)
+ }
+}
+
+impl Write for StdoutRaw {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ handle_ebadf(self.0.write(buf), buf.len())
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total = bufs.iter().map(|b| b.len()).sum();
+ handle_ebadf(self.0.write_vectored(bufs), total)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ handle_ebadf(self.0.flush(), ())
+ }
+
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ handle_ebadf(self.0.write_all(buf), ())
+ }
+
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ handle_ebadf(self.0.write_all_vectored(bufs), ())
+ }
+
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ handle_ebadf(self.0.write_fmt(fmt), ())
+ }
+}
+
+impl Write for StderrRaw {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ handle_ebadf(self.0.write(buf), buf.len())
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total = bufs.iter().map(|b| b.len()).sum();
+ handle_ebadf(self.0.write_vectored(bufs), total)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ handle_ebadf(self.0.flush(), ())
+ }
+
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ handle_ebadf(self.0.write_all(buf), ())
+ }
+
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ handle_ebadf(self.0.write_all_vectored(bufs), ())
+ }
+
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ handle_ebadf(self.0.write_fmt(fmt), ())
+ }
+}
+
+fn handle_ebadf<T>(r: io::Result<T>, default: T) -> io::Result<T> {
+ match r {
+ Err(ref e) if stdio::is_ebadf(e) => Ok(default),
+ r => r,
+ }
+}
+
+/// A handle to the standard input stream of a process.
+///
+/// Each handle is a shared reference to a global buffer of input data to this
+/// process. A handle can be `lock`'d to gain full access to [`BufRead`] methods
+/// (e.g., `.lines()`). Reads to this handle are otherwise locked with respect
+/// to other reads.
+///
+/// This handle implements the `Read` trait, but beware that concurrent reads
+/// of `Stdin` must be executed with care.
+///
+/// Created by the [`io::stdin`] method.
+///
+/// [`io::stdin`]: stdin
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io;
+///
+/// fn main() -> io::Result<()> {
+/// let mut buffer = String::new();
+/// let stdin = io::stdin(); // We get `Stdin` here.
+/// stdin.read_line(&mut buffer)?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Stdin {
+ inner: &'static Mutex<BufReader<StdinRaw>>,
+}
+
+/// A locked reference to the [`Stdin`] handle.
+///
+/// This handle implements both the [`Read`] and [`BufRead`] traits, and
+/// is constructed via the [`Stdin::lock`] method.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::{self, BufRead};
+///
+/// fn main() -> io::Result<()> {
+/// let mut buffer = String::new();
+/// let stdin = io::stdin(); // We get `Stdin` here.
+/// {
+/// let mut handle = stdin.lock(); // We get `StdinLock` here.
+/// handle.read_line(&mut buffer)?;
+/// } // `StdinLock` is dropped here.
+/// Ok(())
+/// }
+/// ```
+#[must_use = "if unused stdin will immediately unlock"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct StdinLock<'a> {
+ inner: MutexGuard<'a, BufReader<StdinRaw>>,
+}
+
+/// Constructs a new handle to the standard input of the current process.
+///
+/// Each handle returned is a reference to a shared global buffer whose access
+/// is synchronized via a mutex. If you need more explicit control over
+/// locking, see the [`Stdin::lock`] method.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// # Examples
+///
+/// Using implicit synchronization:
+///
+/// ```no_run
+/// use std::io;
+///
+/// fn main() -> io::Result<()> {
+/// let mut buffer = String::new();
+/// io::stdin().read_line(&mut buffer)?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```no_run
+/// use std::io::{self, BufRead};
+///
+/// fn main() -> io::Result<()> {
+/// let mut buffer = String::new();
+/// let stdin = io::stdin();
+/// let mut handle = stdin.lock();
+///
+/// handle.read_line(&mut buffer)?;
+/// Ok(())
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn stdin() -> Stdin {
+ static INSTANCE: OnceLock<Mutex<BufReader<StdinRaw>>> = OnceLock::new();
+ Stdin {
+ inner: INSTANCE.get_or_init(|| {
+ Mutex::new(BufReader::with_capacity(stdio::STDIN_BUF_SIZE, stdin_raw()))
+ }),
+ }
+}
+
+impl Stdin {
+ /// Locks this handle to the standard input stream, returning a readable
+ /// guard.
+ ///
+ /// The lock is released when the returned lock goes out of scope. The
+ /// returned guard also implements the [`Read`] and [`BufRead`] traits for
+ /// accessing the underlying data.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::{self, BufRead};
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut buffer = String::new();
+ /// let stdin = io::stdin();
+ /// let mut handle = stdin.lock();
+ ///
+ /// handle.read_line(&mut buffer)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn lock(&self) -> StdinLock<'static> {
+ // Locks this handle with 'static lifetime. This depends on the
+ // implementation detail that the underlying `Mutex` is static.
+ StdinLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
+ }
+
+ /// Locks this handle and reads a line of input, appending it to the specified buffer.
+ ///
+ /// For detailed semantics of this method, see the documentation on
+ /// [`BufRead::read_line`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ ///
+ /// let mut input = String::new();
+ /// match io::stdin().read_line(&mut input) {
+ /// Ok(n) => {
+ /// println!("{n} bytes read");
+ /// println!("{input}");
+ /// }
+ /// Err(error) => println!("error: {error}"),
+ /// }
+ /// ```
+ ///
+ /// You can run the example one of two ways:
+ ///
+ /// - Pipe some text to it, e.g., `printf foo | path/to/executable`
+ /// - Give it text interactively by running the executable directly,
+ /// in which case it will wait for the Enter key to be pressed before
+ /// continuing
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn read_line(&self, buf: &mut String) -> io::Result<usize> {
+ self.lock().read_line(buf)
+ }
+
+ /// Consumes this handle and returns an iterator over input lines.
+ ///
+ /// For detailed semantics of this method, see the documentation on
+ /// [`BufRead::lines`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ ///
+ /// let lines = io::stdin().lines();
+ /// for line in lines {
+ /// println!("got a line: {}", line.unwrap());
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "stdin_forwarders", since = "1.62.0")]
+ pub fn lines(self) -> Lines<StdinLock<'static>> {
+ self.lock().lines()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Stdin {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Stdin").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.lock().read(buf)
+ }
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.lock().read_vectored(bufs)
+ }
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.lock().is_read_vectored()
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.lock().read_to_end(buf)
+ }
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ self.lock().read_to_string(buf)
+ }
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ self.lock().read_exact(buf)
+ }
+}
+
+// only used by platform-dependent io::copy specializations, i.e. unused on some platforms
+#[cfg(any(target_os = "linux", target_os = "android"))]
+impl StdinLock<'_> {
+ pub(crate) fn as_mut_buf(&mut self) -> &mut BufReader<impl Read> {
+ &mut self.inner
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for StdinLock<'_> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.inner.read_to_end(buf)
+ }
+
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ self.inner.read_to_string(buf)
+ }
+
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ self.inner.read_exact(buf)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl BufRead for StdinLock<'_> {
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ self.inner.fill_buf()
+ }
+
+ fn consume(&mut self, n: usize) {
+ self.inner.consume(n)
+ }
+
+ fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.inner.read_until(byte, buf)
+ }
+
+ fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
+ self.inner.read_line(buf)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for StdinLock<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("StdinLock").finish_non_exhaustive()
+ }
+}
+
+/// A handle to the global standard output stream of the current process.
+///
+/// Each handle shares a global buffer of data to be written to the standard
+/// output stream. Access is also synchronized via a lock and explicit control
+/// over locking is available via the [`lock`] method.
+///
+/// Created by the [`io::stdout`] method.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// [`lock`]: Stdout::lock
+/// [`io::stdout`]: stdout
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Stdout {
+ // FIXME: this should be LineWriter or BufWriter depending on the state of
+ // stdout (tty or not). Note that if this is not line buffered it
+ // should also flush-on-panic or some form of flush-on-abort.
+ inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
+}
+
+/// A locked reference to the [`Stdout`] handle.
+///
+/// This handle implements the [`Write`] trait, and is constructed via
+/// the [`Stdout::lock`] method. See its documentation for more.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+#[must_use = "if unused stdout will immediately unlock"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct StdoutLock<'a> {
+ inner: ReentrantMutexGuard<'a, RefCell<LineWriter<StdoutRaw>>>,
+}
+
+static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLock::new();
+
+/// Constructs a new handle to the standard output of the current process.
+///
+/// Each handle returned is a reference to a shared global buffer whose access
+/// is synchronized via a mutex. If you need more explicit control over
+/// locking, see the [`Stdout::lock`] method.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// # Examples
+///
+/// Using implicit synchronization:
+///
+/// ```no_run
+/// use std::io::{self, Write};
+///
+/// fn main() -> io::Result<()> {
+/// io::stdout().write_all(b"hello world")?;
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```no_run
+/// use std::io::{self, Write};
+///
+/// fn main() -> io::Result<()> {
+/// let stdout = io::stdout();
+/// let mut handle = stdout.lock();
+///
+/// handle.write_all(b"hello world")?;
+///
+/// Ok(())
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn stdout() -> Stdout {
+ Stdout {
+ inner: Pin::static_ref(&STDOUT).get_or_init_pin(
+ || unsafe { ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw()))) },
+ |mutex| unsafe { mutex.init() },
+ ),
+ }
+}
+
+pub fn cleanup() {
+ if let Some(instance) = STDOUT.get() {
+ // Flush the data and disable buffering during shutdown
+ // by replacing the line writer by one with zero
+ // buffering capacity.
+ // We use try_lock() instead of lock(), because someone
+ // might have leaked a StdoutLock, which would
+ // otherwise cause a deadlock here.
+ if let Some(lock) = Pin::static_ref(instance).try_lock() {
+ *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
+ }
+ }
+}
+
+impl Stdout {
+ /// Locks this handle to the standard output stream, returning a writable
+ /// guard.
+ ///
+ /// The lock is released when the returned lock goes out of scope. The
+ /// returned guard also implements the `Write` trait for writing data.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io::{self, Write};
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut stdout = io::stdout().lock();
+ ///
+ /// stdout.write_all(b"hello world")?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn lock(&self) -> StdoutLock<'static> {
+ // Locks this handle with 'static lifetime. This depends on the
+ // implementation detail that the underlying `ReentrantMutex` is
+ // static.
+ StdoutLock { inner: self.inner.lock() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Stdout {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Stdout").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&*self).write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (&*self).write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ io::Write::is_write_vectored(&&*self)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ (&*self).flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ (&*self).write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ (&*self).write_all_vectored(bufs)
+ }
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> io::Result<()> {
+ (&*self).write_fmt(args)
+ }
+}
+
+#[stable(feature = "write_mt", since = "1.48.0")]
+impl Write for &Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.lock().write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.lock().write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.lock().is_write_vectored()
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.lock().flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.lock().write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ self.lock().write_all_vectored(bufs)
+ }
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> io::Result<()> {
+ self.lock().write_fmt(args)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for StdoutLock<'_> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.borrow_mut().write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.borrow_mut().write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.inner.borrow_mut().is_write_vectored()
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.borrow_mut().flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.inner.borrow_mut().write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ self.inner.borrow_mut().write_all_vectored(bufs)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for StdoutLock<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("StdoutLock").finish_non_exhaustive()
+ }
+}
+
+/// A handle to the standard error stream of a process.
+///
+/// For more information, see the [`io::stderr`] method.
+///
+/// [`io::stderr`]: stderr
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Stderr {
+ inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
+}
+
+/// A locked reference to the [`Stderr`] handle.
+///
+/// This handle implements the [`Write`] trait and is constructed via
+/// the [`Stderr::lock`] method. See its documentation for more.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+#[must_use = "if unused stderr will immediately unlock"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct StderrLock<'a> {
+ inner: ReentrantMutexGuard<'a, RefCell<StderrRaw>>,
+}
+
+/// Constructs a new handle to the standard error of the current process.
+///
+/// This handle is not buffered.
+///
+/// ### Note: Windows Portability Considerations
+///
+/// When operating in a console, the Windows implementation of this stream does not support
+/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
+/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
+/// # Examples
+///
+/// Using implicit synchronization:
+///
+/// ```no_run
+/// use std::io::{self, Write};
+///
+/// fn main() -> io::Result<()> {
+/// io::stderr().write_all(b"hello world")?;
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// Using explicit synchronization:
+///
+/// ```no_run
+/// use std::io::{self, Write};
+///
+/// fn main() -> io::Result<()> {
+/// let stderr = io::stderr();
+/// let mut handle = stderr.lock();
+///
+/// handle.write_all(b"hello world")?;
+///
+/// Ok(())
+/// }
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn stderr() -> Stderr {
+ // Note that unlike `stdout()` we don't use `at_exit` here to register a
+ // destructor. Stderr is not buffered , so there's no need to run a
+ // destructor for flushing the buffer
+ static INSTANCE: OnceLock<ReentrantMutex<RefCell<StderrRaw>>> = OnceLock::new();
+
+ Stderr {
+ inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
+ || unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
+ |mutex| unsafe { mutex.init() },
+ ),
+ }
+}
+
+impl Stderr {
+ /// Locks this handle to the standard error stream, returning a writable
+ /// guard.
+ ///
+ /// The lock is released when the returned lock goes out of scope. The
+ /// returned guard also implements the [`Write`] trait for writing data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Write};
+ ///
+ /// fn foo() -> io::Result<()> {
+ /// let stderr = io::stderr();
+ /// let mut handle = stderr.lock();
+ ///
+ /// handle.write_all(b"hello world")?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn lock(&self) -> StderrLock<'static> {
+ // Locks this handle with 'static lifetime. This depends on the
+ // implementation detail that the underlying `ReentrantMutex` is
+ // static.
+ StderrLock { inner: self.inner.lock() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Stderr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Stderr").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&*self).write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (&*self).write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ io::Write::is_write_vectored(&&*self)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ (&*self).flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ (&*self).write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ (&*self).write_all_vectored(bufs)
+ }
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> io::Result<()> {
+ (&*self).write_fmt(args)
+ }
+}
+
+#[stable(feature = "write_mt", since = "1.48.0")]
+impl Write for &Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.lock().write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.lock().write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.lock().is_write_vectored()
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.lock().flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.lock().write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ self.lock().write_all_vectored(bufs)
+ }
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> io::Result<()> {
+ self.lock().write_fmt(args)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for StderrLock<'_> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.borrow_mut().write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.borrow_mut().write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.inner.borrow_mut().is_write_vectored()
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.borrow_mut().flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.inner.borrow_mut().write_all(buf)
+ }
+ fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
+ self.inner.borrow_mut().write_all_vectored(bufs)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for StderrLock<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("StderrLock").finish_non_exhaustive()
+ }
+}
+
+/// Sets the thread-local output capture buffer and returns the old one.
+#[unstable(
+ feature = "internal_output_capture",
+ reason = "this function is meant for use in the test crate \
+ and may disappear in the future",
+ issue = "none"
+)]
+#[doc(hidden)]
+pub fn set_output_capture(sink: Option<LocalStream>) -> Option<LocalStream> {
+ if sink.is_none() && !OUTPUT_CAPTURE_USED.load(Ordering::Relaxed) {
+ // OUTPUT_CAPTURE is definitely None since OUTPUT_CAPTURE_USED is false.
+ return None;
+ }
+ OUTPUT_CAPTURE_USED.store(true, Ordering::Relaxed);
+ OUTPUT_CAPTURE.with(move |slot| slot.replace(sink))
+}
+
+/// Write `args` to the capture buffer if enabled and possible, or `global_s`
+/// otherwise. `label` identifies the stream in a panic message.
+///
+/// This function is used to print error messages, so it takes extra
+/// care to avoid causing a panic when `local_s` is unusable.
+/// For instance, if the TLS key for the local stream is
+/// already destroyed, or if the local stream is locked by another
+/// thread, it will just fall back to the global stream.
+///
+/// However, if the actual I/O causes an error, this function does panic.
+fn print_to<T>(args: fmt::Arguments<'_>, global_s: fn() -> T, label: &str)
+where
+ T: Write,
+{
+ if OUTPUT_CAPTURE_USED.load(Ordering::Relaxed)
+ && OUTPUT_CAPTURE.try_with(|s| {
+ // Note that we completely remove a local sink to write to in case
+ // our printing recursively panics/prints, so the recursive
+ // panic/print goes to the global sink instead of our local sink.
+ s.take().map(|w| {
+ let _ = w.lock().unwrap_or_else(|e| e.into_inner()).write_fmt(args);
+ s.set(Some(w));
+ })
+ }) == Ok(Some(()))
+ {
+ // Successfully wrote to capture buffer.
+ return;
+ }
+
+ if let Err(e) = global_s().write_fmt(args) {
+ panic!("failed printing to {label}: {e}");
+ }
+}
+
+#[unstable(
+ feature = "print_internals",
+ reason = "implementation detail which may disappear or be replaced at any time",
+ issue = "none"
+)]
+#[doc(hidden)]
+#[cfg(not(test))]
+pub fn _print(args: fmt::Arguments<'_>) {
+ print_to(args, stdout, "stdout");
+}
+
+#[unstable(
+ feature = "print_internals",
+ reason = "implementation detail which may disappear or be replaced at any time",
+ issue = "none"
+)]
+#[doc(hidden)]
+#[cfg(not(test))]
+pub fn _eprint(args: fmt::Arguments<'_>) {
+ print_to(args, stderr, "stderr");
+}
+
+#[cfg(test)]
+pub use realstd::io::{_eprint, _print};
diff --git a/library/std/src/io/stdio/tests.rs b/library/std/src/io/stdio/tests.rs
new file mode 100644
index 000000000..f89fd27ce
--- /dev/null
+++ b/library/std/src/io/stdio/tests.rs
@@ -0,0 +1,166 @@
+use super::*;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::sync::mpsc::sync_channel;
+use crate::thread;
+
+#[test]
+fn stdout_unwind_safe() {
+ assert_unwind_safe::<Stdout>();
+}
+#[test]
+fn stdoutlock_unwind_safe() {
+ assert_unwind_safe::<StdoutLock<'_>>();
+ assert_unwind_safe::<StdoutLock<'static>>();
+}
+#[test]
+fn stderr_unwind_safe() {
+ assert_unwind_safe::<Stderr>();
+}
+#[test]
+fn stderrlock_unwind_safe() {
+ assert_unwind_safe::<StderrLock<'_>>();
+ assert_unwind_safe::<StderrLock<'static>>();
+}
+
+fn assert_unwind_safe<T: UnwindSafe + RefUnwindSafe>() {}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn panic_doesnt_poison() {
+ thread::spawn(|| {
+ let _a = stdin();
+ let _a = _a.lock();
+ let _a = stdout();
+ let _a = _a.lock();
+ let _a = stderr();
+ let _a = _a.lock();
+ panic!();
+ })
+ .join()
+ .unwrap_err();
+
+ let _a = stdin();
+ let _a = _a.lock();
+ let _a = stdout();
+ let _a = _a.lock();
+ let _a = stderr();
+ let _a = _a.lock();
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_lock_stderr() {
+ test_lock(stderr, || stderr().lock());
+}
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_lock_stdin() {
+ test_lock(stdin, || stdin().lock());
+}
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_lock_stdout() {
+ test_lock(stdout, || stdout().lock());
+}
+
+// Helper trait to make lock testing function generic.
+trait Stdio<'a>: 'static
+where
+ Self::Lock: 'a,
+{
+ type Lock;
+ fn lock(&'a self) -> Self::Lock;
+}
+impl<'a> Stdio<'a> for Stderr {
+ type Lock = StderrLock<'a>;
+ fn lock(&'a self) -> StderrLock<'a> {
+ self.lock()
+ }
+}
+impl<'a> Stdio<'a> for Stdin {
+ type Lock = StdinLock<'a>;
+ fn lock(&'a self) -> StdinLock<'a> {
+ self.lock()
+ }
+}
+impl<'a> Stdio<'a> for Stdout {
+ type Lock = StdoutLock<'a>;
+ fn lock(&'a self) -> StdoutLock<'a> {
+ self.lock()
+ }
+}
+
+// Helper trait to make lock testing function generic.
+trait StdioOwnedLock: 'static {}
+impl StdioOwnedLock for StderrLock<'static> {}
+impl StdioOwnedLock for StdinLock<'static> {}
+impl StdioOwnedLock for StdoutLock<'static> {}
+
+// Tests locking on stdio handles by starting two threads and checking that
+// they block each other appropriately.
+fn test_lock<T, U>(get_handle: fn() -> T, get_locked: fn() -> U)
+where
+ T: for<'a> Stdio<'a>,
+ U: StdioOwnedLock,
+{
+ // State enum to track different phases of the test, primarily when
+ // each lock is acquired and released.
+ #[derive(Debug, PartialEq)]
+ enum State {
+ Start1,
+ Acquire1,
+ Start2,
+ Release1,
+ Acquire2,
+ Release2,
+ }
+ use State::*;
+ // Logging vector to be checked to make sure lock acquisitions and
+ // releases happened in the correct order.
+ let log = Arc::new(Mutex::new(Vec::new()));
+ let ((tx1, rx1), (tx2, rx2)) = (sync_channel(0), sync_channel(0));
+ let th1 = {
+ let (log, tx) = (Arc::clone(&log), tx1);
+ thread::spawn(move || {
+ log.lock().unwrap().push(Start1);
+ let handle = get_handle();
+ {
+ let locked = handle.lock();
+ log.lock().unwrap().push(Acquire1);
+ tx.send(Acquire1).unwrap(); // notify of acquisition
+ tx.send(Release1).unwrap(); // wait for release command
+ log.lock().unwrap().push(Release1);
+ }
+ tx.send(Acquire1).unwrap(); // wait for th2 acquire
+ {
+ let locked = handle.lock();
+ log.lock().unwrap().push(Acquire1);
+ }
+ log.lock().unwrap().push(Release1);
+ })
+ };
+ let th2 = {
+ let (log, tx) = (Arc::clone(&log), tx2);
+ thread::spawn(move || {
+ tx.send(Start2).unwrap(); // wait for start command
+ let locked = get_locked();
+ log.lock().unwrap().push(Acquire2);
+ tx.send(Acquire2).unwrap(); // notify of acquisition
+ tx.send(Release2).unwrap(); // wait for release command
+ log.lock().unwrap().push(Release2);
+ })
+ };
+ assert_eq!(rx1.recv().unwrap(), Acquire1); // wait for th1 acquire
+ log.lock().unwrap().push(Start2);
+ assert_eq!(rx2.recv().unwrap(), Start2); // block th2
+ assert_eq!(rx1.recv().unwrap(), Release1); // release th1
+ assert_eq!(rx2.recv().unwrap(), Acquire2); // wait for th2 acquire
+ assert_eq!(rx1.recv().unwrap(), Acquire1); // block th1
+ assert_eq!(rx2.recv().unwrap(), Release2); // release th2
+ th2.join().unwrap();
+ th1.join().unwrap();
+ assert_eq!(
+ *log.lock().unwrap(),
+ [Start1, Acquire1, Start2, Release1, Acquire2, Release2, Acquire1, Release1]
+ );
+}
diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs
new file mode 100644
index 000000000..f357f33ec
--- /dev/null
+++ b/library/std/src/io/tests.rs
@@ -0,0 +1,623 @@
+use super::{repeat, Cursor, ReadBuf, SeekFrom};
+use crate::cmp::{self, min};
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{BufRead, BufReader, Read, Seek, Write};
+use crate::mem::MaybeUninit;
+use crate::ops::Deref;
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn read_until() {
+ let mut buf = Cursor::new(&b"12"[..]);
+ let mut v = Vec::new();
+ assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 2);
+ assert_eq!(v, b"12");
+
+ let mut buf = Cursor::new(&b"1233"[..]);
+ let mut v = Vec::new();
+ assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 3);
+ assert_eq!(v, b"123");
+ v.truncate(0);
+ assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 1);
+ assert_eq!(v, b"3");
+ v.truncate(0);
+ assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 0);
+ assert_eq!(v, []);
+}
+
+#[test]
+fn split() {
+ let buf = Cursor::new(&b"12"[..]);
+ let mut s = buf.split(b'3');
+ assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
+ assert!(s.next().is_none());
+
+ let buf = Cursor::new(&b"1233"[..]);
+ let mut s = buf.split(b'3');
+ assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
+ assert_eq!(s.next().unwrap().unwrap(), vec![]);
+ assert!(s.next().is_none());
+}
+
+#[test]
+fn read_line() {
+ let mut buf = Cursor::new(&b"12"[..]);
+ let mut v = String::new();
+ assert_eq!(buf.read_line(&mut v).unwrap(), 2);
+ assert_eq!(v, "12");
+
+ let mut buf = Cursor::new(&b"12\n\n"[..]);
+ let mut v = String::new();
+ assert_eq!(buf.read_line(&mut v).unwrap(), 3);
+ assert_eq!(v, "12\n");
+ v.truncate(0);
+ assert_eq!(buf.read_line(&mut v).unwrap(), 1);
+ assert_eq!(v, "\n");
+ v.truncate(0);
+ assert_eq!(buf.read_line(&mut v).unwrap(), 0);
+ assert_eq!(v, "");
+}
+
+#[test]
+fn lines() {
+ let buf = Cursor::new(&b"12\r"[..]);
+ let mut s = buf.lines();
+ assert_eq!(s.next().unwrap().unwrap(), "12\r".to_string());
+ assert!(s.next().is_none());
+
+ let buf = Cursor::new(&b"12\r\n\n"[..]);
+ let mut s = buf.lines();
+ assert_eq!(s.next().unwrap().unwrap(), "12".to_string());
+ assert_eq!(s.next().unwrap().unwrap(), "".to_string());
+ assert!(s.next().is_none());
+}
+
+#[test]
+fn buf_read_has_data_left() {
+ let mut buf = Cursor::new(&b"abcd"[..]);
+ assert!(buf.has_data_left().unwrap());
+ buf.read_exact(&mut [0; 2]).unwrap();
+ assert!(buf.has_data_left().unwrap());
+ buf.read_exact(&mut [0; 2]).unwrap();
+ assert!(!buf.has_data_left().unwrap());
+}
+
+#[test]
+fn read_to_end() {
+ let mut c = Cursor::new(&b""[..]);
+ let mut v = Vec::new();
+ assert_eq!(c.read_to_end(&mut v).unwrap(), 0);
+ assert_eq!(v, []);
+
+ let mut c = Cursor::new(&b"1"[..]);
+ let mut v = Vec::new();
+ assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
+ assert_eq!(v, b"1");
+
+ let cap = 1024 * 1024;
+ let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
+ let mut v = Vec::new();
+ let (a, b) = data.split_at(data.len() / 2);
+ assert_eq!(Cursor::new(a).read_to_end(&mut v).unwrap(), a.len());
+ assert_eq!(Cursor::new(b).read_to_end(&mut v).unwrap(), b.len());
+ assert_eq!(v, data);
+}
+
+#[test]
+fn read_to_string() {
+ let mut c = Cursor::new(&b""[..]);
+ let mut v = String::new();
+ assert_eq!(c.read_to_string(&mut v).unwrap(), 0);
+ assert_eq!(v, "");
+
+ let mut c = Cursor::new(&b"1"[..]);
+ let mut v = String::new();
+ assert_eq!(c.read_to_string(&mut v).unwrap(), 1);
+ assert_eq!(v, "1");
+
+ let mut c = Cursor::new(&b"\xff"[..]);
+ let mut v = String::new();
+ assert!(c.read_to_string(&mut v).is_err());
+}
+
+#[test]
+fn read_exact() {
+ let mut buf = [0; 4];
+
+ let mut c = Cursor::new(&b""[..]);
+ assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+
+ let mut c = Cursor::new(&b"123"[..]).chain(Cursor::new(&b"456789"[..]));
+ c.read_exact(&mut buf).unwrap();
+ assert_eq!(&buf, b"1234");
+ c.read_exact(&mut buf).unwrap();
+ assert_eq!(&buf, b"5678");
+ assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+}
+
+#[test]
+fn read_exact_slice() {
+ let mut buf = [0; 4];
+
+ let mut c = &b""[..];
+ assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+
+ let mut c = &b"123"[..];
+ assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+ // make sure the optimized (early returning) method is being used
+ assert_eq!(&buf, &[0; 4]);
+
+ let mut c = &b"1234"[..];
+ c.read_exact(&mut buf).unwrap();
+ assert_eq!(&buf, b"1234");
+
+ let mut c = &b"56789"[..];
+ c.read_exact(&mut buf).unwrap();
+ assert_eq!(&buf, b"5678");
+ assert_eq!(c, b"9");
+}
+
+#[test]
+fn read_buf_exact() {
+ let mut buf = [0; 4];
+ let mut buf = ReadBuf::new(&mut buf);
+
+ let mut c = Cursor::new(&b""[..]);
+ assert_eq!(c.read_buf_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+
+ let mut c = Cursor::new(&b"123456789"[..]);
+ c.read_buf_exact(&mut buf).unwrap();
+ assert_eq!(buf.filled(), b"1234");
+
+ buf.clear();
+
+ c.read_buf_exact(&mut buf).unwrap();
+ assert_eq!(buf.filled(), b"5678");
+
+ buf.clear();
+
+ assert_eq!(c.read_buf_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+}
+
+#[test]
+fn take_eof() {
+ struct R;
+
+ impl Read for R {
+ fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
+ Err(io::const_io_error!(io::ErrorKind::Other, ""))
+ }
+ }
+ impl BufRead for R {
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ Err(io::const_io_error!(io::ErrorKind::Other, ""))
+ }
+ fn consume(&mut self, _amt: usize) {}
+ }
+
+ let mut buf = [0; 1];
+ assert_eq!(0, R.take(0).read(&mut buf).unwrap());
+ assert_eq!(b"", R.take(0).fill_buf().unwrap());
+}
+
+fn cmp_bufread<Br1: BufRead, Br2: BufRead>(mut br1: Br1, mut br2: Br2, exp: &[u8]) {
+ let mut cat = Vec::new();
+ loop {
+ let consume = {
+ let buf1 = br1.fill_buf().unwrap();
+ let buf2 = br2.fill_buf().unwrap();
+ let minlen = if buf1.len() < buf2.len() { buf1.len() } else { buf2.len() };
+ assert_eq!(buf1[..minlen], buf2[..minlen]);
+ cat.extend_from_slice(&buf1[..minlen]);
+ minlen
+ };
+ if consume == 0 {
+ break;
+ }
+ br1.consume(consume);
+ br2.consume(consume);
+ }
+ assert_eq!(br1.fill_buf().unwrap().len(), 0);
+ assert_eq!(br2.fill_buf().unwrap().len(), 0);
+ assert_eq!(&cat[..], &exp[..])
+}
+
+#[test]
+fn chain_bufread() {
+ let testdata = b"ABCDEFGHIJKL";
+ let chain1 =
+ (&testdata[..3]).chain(&testdata[3..6]).chain(&testdata[6..9]).chain(&testdata[9..]);
+ let chain2 = (&testdata[..4]).chain(&testdata[4..8]).chain(&testdata[8..]);
+ cmp_bufread(chain1, chain2, &testdata[..]);
+}
+
+#[test]
+fn bufreader_size_hint() {
+ let testdata = b"ABCDEFGHIJKL";
+ let mut buf_reader = BufReader::new(&testdata[..]);
+ assert_eq!(buf_reader.buffer().len(), 0);
+
+ let buffer_length = testdata.len();
+ buf_reader.fill_buf().unwrap();
+
+ // Check that size hint matches buffer contents
+ let mut buffered_bytes = buf_reader.bytes();
+ let (lower_bound, _upper_bound) = buffered_bytes.size_hint();
+ assert_eq!(lower_bound, buffer_length);
+
+ // Check that size hint matches buffer contents after advancing
+ buffered_bytes.next().unwrap().unwrap();
+ let (lower_bound, _upper_bound) = buffered_bytes.size_hint();
+ assert_eq!(lower_bound, buffer_length - 1);
+}
+
+#[test]
+fn empty_size_hint() {
+ let size_hint = io::empty().bytes().size_hint();
+ assert_eq!(size_hint, (0, Some(0)));
+}
+
+#[test]
+fn slice_size_hint() {
+ let size_hint = (&[1, 2, 3]).bytes().size_hint();
+ assert_eq!(size_hint, (3, Some(3)));
+}
+
+#[test]
+fn take_size_hint() {
+ let size_hint = (&[1, 2, 3]).take(2).bytes().size_hint();
+ assert_eq!(size_hint, (2, Some(2)));
+
+ let size_hint = (&[1, 2, 3]).take(4).bytes().size_hint();
+ assert_eq!(size_hint, (3, Some(3)));
+
+ let size_hint = io::repeat(0).take(3).bytes().size_hint();
+ assert_eq!(size_hint, (3, Some(3)));
+}
+
+#[test]
+fn chain_empty_size_hint() {
+ let chain = io::empty().chain(io::empty());
+ let size_hint = chain.bytes().size_hint();
+ assert_eq!(size_hint, (0, Some(0)));
+}
+
+#[test]
+fn chain_size_hint() {
+ let testdata = b"ABCDEFGHIJKL";
+ let mut buf_reader_1 = BufReader::new(&testdata[..6]);
+ let mut buf_reader_2 = BufReader::new(&testdata[6..]);
+
+ buf_reader_1.fill_buf().unwrap();
+ buf_reader_2.fill_buf().unwrap();
+
+ let chain = buf_reader_1.chain(buf_reader_2);
+ let size_hint = chain.bytes().size_hint();
+ assert_eq!(size_hint, (testdata.len(), Some(testdata.len())));
+}
+
+#[test]
+fn chain_zero_length_read_is_not_eof() {
+ let a = b"A";
+ let b = b"B";
+ let mut s = String::new();
+ let mut chain = (&a[..]).chain(&b[..]);
+ chain.read(&mut []).unwrap();
+ chain.read_to_string(&mut s).unwrap();
+ assert_eq!("AB", s);
+}
+
+#[bench]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn bench_read_to_end(b: &mut test::Bencher) {
+ b.iter(|| {
+ let mut lr = repeat(1).take(10000000);
+ let mut vec = Vec::with_capacity(1024);
+ super::default_read_to_end(&mut lr, &mut vec)
+ });
+}
+
+#[test]
+fn seek_len() -> io::Result<()> {
+ let mut c = Cursor::new(vec![0; 15]);
+ assert_eq!(c.stream_len()?, 15);
+
+ c.seek(SeekFrom::End(0))?;
+ let old_pos = c.stream_position()?;
+ assert_eq!(c.stream_len()?, 15);
+ assert_eq!(c.stream_position()?, old_pos);
+
+ c.seek(SeekFrom::Start(7))?;
+ c.seek(SeekFrom::Current(2))?;
+ let old_pos = c.stream_position()?;
+ assert_eq!(c.stream_len()?, 15);
+ assert_eq!(c.stream_position()?, old_pos);
+
+ Ok(())
+}
+
+#[test]
+fn seek_position() -> io::Result<()> {
+ // All `asserts` are duplicated here to make sure the method does not
+ // change anything about the seek state.
+ let mut c = Cursor::new(vec![0; 15]);
+ assert_eq!(c.stream_position()?, 0);
+ assert_eq!(c.stream_position()?, 0);
+
+ c.seek(SeekFrom::End(0))?;
+ assert_eq!(c.stream_position()?, 15);
+ assert_eq!(c.stream_position()?, 15);
+
+ c.seek(SeekFrom::Start(7))?;
+ c.seek(SeekFrom::Current(2))?;
+ assert_eq!(c.stream_position()?, 9);
+ assert_eq!(c.stream_position()?, 9);
+
+ c.seek(SeekFrom::End(-3))?;
+ c.seek(SeekFrom::Current(1))?;
+ c.seek(SeekFrom::Current(-5))?;
+ assert_eq!(c.stream_position()?, 8);
+ assert_eq!(c.stream_position()?, 8);
+
+ c.rewind()?;
+ assert_eq!(c.stream_position()?, 0);
+ assert_eq!(c.stream_position()?, 0);
+
+ Ok(())
+}
+
+// A simple example reader which uses the default implementation of
+// read_to_end.
+struct ExampleSliceReader<'a> {
+ slice: &'a [u8],
+}
+
+impl<'a> Read for ExampleSliceReader<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let len = cmp::min(self.slice.len(), buf.len());
+ buf[..len].copy_from_slice(&self.slice[..len]);
+ self.slice = &self.slice[len..];
+ Ok(len)
+ }
+}
+
+#[test]
+fn test_read_to_end_capacity() -> io::Result<()> {
+ let input = &b"foo"[..];
+
+ // read_to_end() takes care not to over-allocate when a buffer is the
+ // exact size needed.
+ let mut vec1 = Vec::with_capacity(input.len());
+ ExampleSliceReader { slice: input }.read_to_end(&mut vec1)?;
+ assert_eq!(vec1.len(), input.len());
+ assert_eq!(vec1.capacity(), input.len(), "did not allocate more");
+
+ Ok(())
+}
+
+#[test]
+fn io_slice_mut_advance_slices() {
+ let mut buf1 = [1; 8];
+ let mut buf2 = [2; 16];
+ let mut buf3 = [3; 8];
+ let mut bufs = &mut [
+ IoSliceMut::new(&mut buf1),
+ IoSliceMut::new(&mut buf2),
+ IoSliceMut::new(&mut buf3),
+ ][..];
+
+ // Only in a single buffer..
+ IoSliceMut::advance_slices(&mut bufs, 1);
+ assert_eq!(bufs[0].deref(), [1; 7].as_ref());
+ assert_eq!(bufs[1].deref(), [2; 16].as_ref());
+ assert_eq!(bufs[2].deref(), [3; 8].as_ref());
+
+ // Removing a buffer, leaving others as is.
+ IoSliceMut::advance_slices(&mut bufs, 7);
+ assert_eq!(bufs[0].deref(), [2; 16].as_ref());
+ assert_eq!(bufs[1].deref(), [3; 8].as_ref());
+
+ // Removing a buffer and removing from the next buffer.
+ IoSliceMut::advance_slices(&mut bufs, 18);
+ assert_eq!(bufs[0].deref(), [3; 6].as_ref());
+}
+
+#[test]
+#[should_panic]
+fn io_slice_mut_advance_slices_empty_slice() {
+ let mut empty_bufs = &mut [][..];
+ IoSliceMut::advance_slices(&mut empty_bufs, 1);
+}
+
+#[test]
+#[should_panic]
+fn io_slice_mut_advance_slices_beyond_total_length() {
+ let mut buf1 = [1; 8];
+ let mut bufs = &mut [IoSliceMut::new(&mut buf1)][..];
+
+ IoSliceMut::advance_slices(&mut bufs, 9);
+ assert!(bufs.is_empty());
+}
+
+#[test]
+fn io_slice_advance_slices() {
+ let buf1 = [1; 8];
+ let buf2 = [2; 16];
+ let buf3 = [3; 8];
+ let mut bufs = &mut [IoSlice::new(&buf1), IoSlice::new(&buf2), IoSlice::new(&buf3)][..];
+
+ // Only in a single buffer..
+ IoSlice::advance_slices(&mut bufs, 1);
+ assert_eq!(bufs[0].deref(), [1; 7].as_ref());
+ assert_eq!(bufs[1].deref(), [2; 16].as_ref());
+ assert_eq!(bufs[2].deref(), [3; 8].as_ref());
+
+ // Removing a buffer, leaving others as is.
+ IoSlice::advance_slices(&mut bufs, 7);
+ assert_eq!(bufs[0].deref(), [2; 16].as_ref());
+ assert_eq!(bufs[1].deref(), [3; 8].as_ref());
+
+ // Removing a buffer and removing from the next buffer.
+ IoSlice::advance_slices(&mut bufs, 18);
+ assert_eq!(bufs[0].deref(), [3; 6].as_ref());
+}
+
+#[test]
+#[should_panic]
+fn io_slice_advance_slices_empty_slice() {
+ let mut empty_bufs = &mut [][..];
+ IoSlice::advance_slices(&mut empty_bufs, 1);
+}
+
+#[test]
+#[should_panic]
+fn io_slice_advance_slices_beyond_total_length() {
+ let buf1 = [1; 8];
+ let mut bufs = &mut [IoSlice::new(&buf1)][..];
+
+ IoSlice::advance_slices(&mut bufs, 9);
+ assert!(bufs.is_empty());
+}
+
+/// Create a new writer that reads from at most `n_bufs` and reads
+/// `per_call` bytes (in total) per call to write.
+fn test_writer(n_bufs: usize, per_call: usize) -> TestWriter {
+ TestWriter { n_bufs, per_call, written: Vec::new() }
+}
+
+struct TestWriter {
+ n_bufs: usize,
+ per_call: usize,
+ written: Vec<u8>,
+}
+
+impl Write for TestWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(buf)])
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let mut left = self.per_call;
+ let mut written = 0;
+ for buf in bufs.iter().take(self.n_bufs) {
+ let n = min(left, buf.len());
+ self.written.extend_from_slice(&buf[0..n]);
+ left -= n;
+ written += n;
+ }
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[test]
+fn test_writer_read_from_one_buf() {
+ let mut writer = test_writer(1, 2);
+
+ assert_eq!(writer.write(&[]).unwrap(), 0);
+ assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
+
+ // Read at most 2 bytes.
+ assert_eq!(writer.write(&[1, 1, 1]).unwrap(), 2);
+ let bufs = &[IoSlice::new(&[2, 2, 2])];
+ assert_eq!(writer.write_vectored(bufs).unwrap(), 2);
+
+ // Only read from first buf.
+ let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4, 4])];
+ assert_eq!(writer.write_vectored(bufs).unwrap(), 1);
+
+ assert_eq!(writer.written, &[1, 1, 2, 2, 3]);
+}
+
+#[test]
+fn test_writer_read_from_multiple_bufs() {
+ let mut writer = test_writer(3, 3);
+
+ // Read at most 3 bytes from two buffers.
+ let bufs = &[IoSlice::new(&[1]), IoSlice::new(&[2, 2, 2])];
+ assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
+
+ // Read at most 3 bytes from three buffers.
+ let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4]), IoSlice::new(&[5, 5])];
+ assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
+
+ assert_eq!(writer.written, &[1, 2, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_write_all_vectored() {
+ #[rustfmt::skip] // Becomes unreadable otherwise.
+ let tests: Vec<(_, &'static [u8])> = vec![
+ (vec![], &[]),
+ (vec![IoSlice::new(&[]), IoSlice::new(&[])], &[]),
+ (vec![IoSlice::new(&[1])], &[1]),
+ (vec![IoSlice::new(&[1, 2])], &[1, 2]),
+ (vec![IoSlice::new(&[1, 2, 3])], &[1, 2, 3]),
+ (vec![IoSlice::new(&[1, 2, 3, 4])], &[1, 2, 3, 4]),
+ (vec![IoSlice::new(&[1, 2, 3, 4, 5])], &[1, 2, 3, 4, 5]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2])], &[1, 2]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2])], &[1, 2, 2]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2])], &[1, 1, 2, 2]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
+ (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 1, 2, 2, 2]),
+ (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 2, 2, 2, 2]),
+ (vec![IoSlice::new(&[1, 1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 1, 2, 2, 2, 2]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2]), IoSlice::new(&[3])], &[1, 2, 3]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3])], &[1, 1, 2, 2, 3, 3]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 2, 2, 3, 3, 3]),
+ (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 1, 1, 2, 2, 2, 3, 3, 3]),
+ ];
+
+ let writer_configs = &[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)];
+
+ for (n_bufs, per_call) in writer_configs.iter().copied() {
+ for (mut input, wanted) in tests.clone().into_iter() {
+ let mut writer = test_writer(n_bufs, per_call);
+ assert!(writer.write_all_vectored(&mut *input).is_ok());
+ assert_eq!(&*writer.written, &*wanted);
+ }
+ }
+}
+
+// Issue 94981
+#[test]
+#[should_panic = "number of read bytes exceeds limit"]
+fn test_take_wrong_length() {
+ struct LieAboutSize(bool);
+
+ impl Read for LieAboutSize {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ // Lie about the read size at first time of read.
+ if core::mem::take(&mut self.0) { Ok(buf.len() + 1) } else { Ok(buf.len()) }
+ }
+ }
+
+ let mut buffer = vec![0; 4];
+ let mut reader = LieAboutSize(true).take(4);
+ // Primed the `Limit` by lying about the read size.
+ let _ = reader.read(&mut buffer[..]);
+}
+
+#[bench]
+fn bench_take_read(b: &mut test::Bencher) {
+ b.iter(|| {
+ let mut buf = [0; 64];
+
+ [255; 128].take(64).read(&mut buf).unwrap();
+ });
+}
+
+#[bench]
+fn bench_take_read_buf(b: &mut test::Bencher) {
+ b.iter(|| {
+ let mut buf = [MaybeUninit::uninit(); 64];
+
+ let mut rbuf = ReadBuf::uninit(&mut buf);
+
+ [255; 128].take(64).read_buf(&mut rbuf).unwrap();
+ });
+}
diff --git a/library/std/src/io/util.rs b/library/std/src/io/util.rs
new file mode 100644
index 000000000..c1300cd67
--- /dev/null
+++ b/library/std/src/io/util.rs
@@ -0,0 +1,270 @@
+#![allow(missing_copy_implementations)]
+
+#[cfg(test)]
+mod tests;
+
+use crate::fmt;
+use crate::io::{
+ self, BufRead, IoSlice, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, Write,
+};
+
+/// A reader which is always at EOF.
+///
+/// This struct is generally created by calling [`empty()`]. Please see
+/// the documentation of [`empty()`] for more details.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[non_exhaustive]
+#[derive(Copy, Clone, Default)]
+pub struct Empty;
+
+/// Constructs a new handle to an empty reader.
+///
+/// All reads from the returned reader will return <code>[Ok]\(0)</code>.
+///
+/// # Examples
+///
+/// A slightly sad example of not reading anything into a buffer:
+///
+/// ```
+/// use std::io::{self, Read};
+///
+/// let mut buffer = String::new();
+/// io::empty().read_to_string(&mut buffer).unwrap();
+/// assert!(buffer.is_empty());
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+pub const fn empty() -> Empty {
+ Empty
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for Empty {
+ #[inline]
+ fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+
+ #[inline]
+ fn read_buf(&mut self, _buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ Ok(())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl BufRead for Empty {
+ #[inline]
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ Ok(&[])
+ }
+ #[inline]
+ fn consume(&mut self, _n: usize) {}
+}
+
+#[stable(feature = "empty_seek", since = "1.51.0")]
+impl Seek for Empty {
+ fn seek(&mut self, _pos: SeekFrom) -> io::Result<u64> {
+ Ok(0)
+ }
+
+ fn stream_len(&mut self) -> io::Result<u64> {
+ Ok(0)
+ }
+
+ fn stream_position(&mut self) -> io::Result<u64> {
+ Ok(0)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Empty {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Empty").finish_non_exhaustive()
+ }
+}
+
+impl SizeHint for Empty {
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ Some(0)
+ }
+}
+
+/// A reader which yields one byte over and over and over and over and over and...
+///
+/// This struct is generally created by calling [`repeat()`]. Please
+/// see the documentation of [`repeat()`] for more details.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Repeat {
+ byte: u8,
+}
+
+/// Creates an instance of a reader that infinitely repeats one byte.
+///
+/// All reads from this reader will succeed by filling the specified buffer with
+/// the given byte.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::{self, Read};
+///
+/// let mut buffer = [0; 3];
+/// io::repeat(0b101).read_exact(&mut buffer).unwrap();
+/// assert_eq!(buffer, [0b101, 0b101, 0b101]);
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+pub const fn repeat(byte: u8) -> Repeat {
+ Repeat { byte }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for Repeat {
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ for slot in &mut *buf {
+ *slot = self.byte;
+ }
+ Ok(buf.len())
+ }
+
+ fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ // SAFETY: No uninit bytes are being written
+ for slot in unsafe { buf.unfilled_mut() } {
+ slot.write(self.byte);
+ }
+
+ let remaining = buf.remaining();
+
+ // SAFETY: the entire unfilled portion of buf has been initialized
+ unsafe {
+ buf.assume_init(remaining);
+ }
+
+ buf.add_filled(remaining);
+
+ Ok(())
+ }
+
+ #[inline]
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let mut nwritten = 0;
+ for buf in bufs {
+ nwritten += self.read(buf)?;
+ }
+ Ok(nwritten)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+}
+
+impl SizeHint for Repeat {
+ #[inline]
+ fn lower_bound(&self) -> usize {
+ usize::MAX
+ }
+
+ #[inline]
+ fn upper_bound(&self) -> Option<usize> {
+ None
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Repeat {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Repeat").finish_non_exhaustive()
+ }
+}
+
+/// A writer which will move data into the void.
+///
+/// This struct is generally created by calling [`sink`]. Please
+/// see the documentation of [`sink()`] for more details.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[non_exhaustive]
+#[derive(Copy, Clone, Default)]
+pub struct Sink;
+
+/// Creates an instance of a writer which will successfully consume all data.
+///
+/// All calls to [`write`] on the returned instance will return `Ok(buf.len())`
+/// and the contents of the buffer will not be inspected.
+///
+/// [`write`]: Write::write
+///
+/// # Examples
+///
+/// ```rust
+/// use std::io::{self, Write};
+///
+/// let buffer = vec![1, 2, 3, 5, 8];
+/// let num_bytes = io::sink().write(&buffer).unwrap();
+/// assert_eq!(num_bytes, 5);
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_io_structs", issue = "78812")]
+pub const fn sink() -> Sink {
+ Sink
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for Sink {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total_len = bufs.iter().map(|b| b.len()).sum();
+ Ok(total_len)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "write_mt", since = "1.48.0")]
+impl Write for &Sink {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total_len = bufs.iter().map(|b| b.len()).sum();
+ Ok(total_len)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Sink {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Sink").finish_non_exhaustive()
+ }
+}
diff --git a/library/std/src/io/util/tests.rs b/library/std/src/io/util/tests.rs
new file mode 100644
index 000000000..08972a59a
--- /dev/null
+++ b/library/std/src/io/util/tests.rs
@@ -0,0 +1,147 @@
+use crate::cmp::{max, min};
+use crate::io::prelude::*;
+use crate::io::{
+ copy, empty, repeat, sink, BufWriter, Empty, ReadBuf, Repeat, Result, SeekFrom, Sink,
+ DEFAULT_BUF_SIZE,
+};
+
+use crate::mem::MaybeUninit;
+
+#[test]
+fn copy_copies() {
+ let mut r = repeat(0).take(4);
+ let mut w = sink();
+ assert_eq!(copy(&mut r, &mut w).unwrap(), 4);
+
+ let mut r = repeat(0).take(1 << 17);
+ assert_eq!(copy(&mut r as &mut dyn Read, &mut w as &mut dyn Write).unwrap(), 1 << 17);
+}
+
+struct ShortReader {
+ cap: usize,
+ read_size: usize,
+ observed_buffer: usize,
+}
+
+impl Read for ShortReader {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ let bytes = min(self.cap, self.read_size);
+ self.cap -= bytes;
+ self.observed_buffer = max(self.observed_buffer, buf.len());
+ Ok(bytes)
+ }
+}
+
+struct WriteObserver {
+ observed_buffer: usize,
+}
+
+impl Write for WriteObserver {
+ fn write(&mut self, buf: &[u8]) -> Result<usize> {
+ self.observed_buffer = max(self.observed_buffer, buf.len());
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> Result<()> {
+ Ok(())
+ }
+}
+
+#[test]
+fn copy_specializes_bufwriter() {
+ let cap = 117 * 1024;
+ let buf_sz = 16 * 1024;
+ let mut r = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
+ let mut w = BufWriter::with_capacity(buf_sz, WriteObserver { observed_buffer: 0 });
+ assert_eq!(
+ copy(&mut r, &mut w).unwrap(),
+ cap as u64,
+ "expected the whole capacity to be copied"
+ );
+ assert_eq!(r.observed_buffer, buf_sz, "expected a large buffer to be provided to the reader");
+ assert!(w.get_mut().observed_buffer > DEFAULT_BUF_SIZE, "expected coalesced writes");
+}
+
+#[test]
+fn sink_sinks() {
+ let mut s = sink();
+ assert_eq!(s.write(&[]).unwrap(), 0);
+ assert_eq!(s.write(&[0]).unwrap(), 1);
+ assert_eq!(s.write(&[0; 1024]).unwrap(), 1024);
+ assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024);
+}
+
+#[test]
+fn empty_reads() {
+ let mut e = empty();
+ assert_eq!(e.read(&mut []).unwrap(), 0);
+ assert_eq!(e.read(&mut [0]).unwrap(), 0);
+ assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
+ assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0);
+
+ let mut buf = [];
+ let mut buf = ReadBuf::uninit(&mut buf);
+ e.read_buf(&mut buf).unwrap();
+ assert_eq!(buf.filled_len(), 0);
+ assert_eq!(buf.initialized_len(), 0);
+
+ let mut buf = [MaybeUninit::uninit()];
+ let mut buf = ReadBuf::uninit(&mut buf);
+ e.read_buf(&mut buf).unwrap();
+ assert_eq!(buf.filled_len(), 0);
+ assert_eq!(buf.initialized_len(), 0);
+
+ let mut buf = [MaybeUninit::uninit(); 1024];
+ let mut buf = ReadBuf::uninit(&mut buf);
+ e.read_buf(&mut buf).unwrap();
+ assert_eq!(buf.filled_len(), 0);
+ assert_eq!(buf.initialized_len(), 0);
+
+ let mut buf = [MaybeUninit::uninit(); 1024];
+ let mut buf = ReadBuf::uninit(&mut buf);
+ e.by_ref().read_buf(&mut buf).unwrap();
+ assert_eq!(buf.filled_len(), 0);
+ assert_eq!(buf.initialized_len(), 0);
+}
+
+#[test]
+fn empty_seeks() {
+ let mut e = empty();
+ assert!(matches!(e.seek(SeekFrom::Start(0)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Start(1)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Start(u64::MAX)), Ok(0)));
+
+ assert!(matches!(e.seek(SeekFrom::End(i64::MIN)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::End(-1)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::End(0)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::End(1)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::End(i64::MAX)), Ok(0)));
+
+ assert!(matches!(e.seek(SeekFrom::Current(i64::MIN)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Current(-1)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Current(0)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Current(1)), Ok(0)));
+ assert!(matches!(e.seek(SeekFrom::Current(i64::MAX)), Ok(0)));
+}
+
+#[test]
+fn repeat_repeats() {
+ let mut r = repeat(4);
+ let mut b = [0; 1024];
+ assert_eq!(r.read(&mut b).unwrap(), 1024);
+ assert!(b.iter().all(|b| *b == 4));
+}
+
+#[test]
+fn take_some_bytes() {
+ assert_eq!(repeat(4).take(100).bytes().count(), 100);
+ assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4);
+ assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20);
+}
+
+#[allow(dead_code)]
+fn const_utils() {
+ const _: Empty = empty();
+ const _: Repeat = repeat(b'c');
+ const _: Sink = sink();
+}
diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs
new file mode 100644
index 000000000..7157b5af0
--- /dev/null
+++ b/library/std/src/keyword_docs.rs
@@ -0,0 +1,2362 @@
+#[doc(keyword = "as")]
+//
+/// Cast between types, or rename an import.
+///
+/// `as` is most commonly used to turn primitive types into other primitive types, but it has other
+/// uses that include turning pointers into addresses, addresses into pointers, and pointers into
+/// other pointers.
+///
+/// ```rust
+/// let thing1: u8 = 89.0 as u8;
+/// assert_eq!('B' as u32, 66);
+/// assert_eq!(thing1 as char, 'Y');
+/// let thing2: f32 = thing1 as f32 + 10.5;
+/// assert_eq!(true as u8 + thing2 as u8, 100);
+/// ```
+///
+/// In general, any cast that can be performed via ascribing the type can also be done using `as`,
+/// so instead of writing `let x: u32 = 123`, you can write `let x = 123 as u32` (note: `let x: u32
+/// = 123` would be best in that situation). The same is not true in the other direction, however;
+/// explicitly using `as` allows a few more coercions that aren't allowed implicitly, such as
+/// changing the type of a raw pointer or turning closures into raw pointers.
+///
+/// `as` can be seen as the primitive for `From` and `Into`: `as` only works with primitives
+/// (`u8`, `bool`, `str`, pointers, ...) whereas `From` and `Into` also works with types like
+/// `String` or `Vec`.
+///
+/// `as` can also be used with the `_` placeholder when the destination type can be inferred. Note
+/// that this can cause inference breakage and usually such code should use an explicit type for
+/// both clarity and stability. This is most useful when converting pointers using `as *const _` or
+/// `as *mut _` though the [`cast`][const-cast] method is recommended over `as *const _` and it is
+/// [the same][mut-cast] for `as *mut _`: those methods make the intent clearer.
+///
+/// `as` is also used to rename imports in [`use`] and [`extern crate`][`crate`] statements:
+///
+/// ```
+/// # #[allow(unused_imports)]
+/// use std::{mem as memory, net as network};
+/// // Now you can use the names `memory` and `network` to refer to `std::mem` and `std::net`.
+/// ```
+/// For more information on what `as` is capable of, see the [Reference].
+///
+/// [Reference]: ../reference/expressions/operator-expr.html#type-cast-expressions
+/// [`crate`]: keyword.crate.html
+/// [`use`]: keyword.use.html
+/// [const-cast]: pointer::cast
+/// [mut-cast]: primitive.pointer.html#method.cast-1
+mod as_keyword {}
+
+#[doc(keyword = "break")]
+//
+/// Exit early from a loop.
+///
+/// When `break` is encountered, execution of the associated loop body is
+/// immediately terminated.
+///
+/// ```rust
+/// let mut last = 0;
+///
+/// for x in 1..100 {
+/// if x > 12 {
+/// break;
+/// }
+/// last = x;
+/// }
+///
+/// assert_eq!(last, 12);
+/// println!("{last}");
+/// ```
+///
+/// A break expression is normally associated with the innermost loop enclosing the
+/// `break` but a label can be used to specify which enclosing loop is affected.
+///
+/// ```rust
+/// 'outer: for i in 1..=5 {
+/// println!("outer iteration (i): {i}");
+///
+/// '_inner: for j in 1..=200 {
+/// println!(" inner iteration (j): {j}");
+/// if j >= 3 {
+/// // breaks from inner loop, lets outer loop continue.
+/// break;
+/// }
+/// if i >= 2 {
+/// // breaks from outer loop, and directly to "Bye".
+/// break 'outer;
+/// }
+/// }
+/// }
+/// println!("Bye.");
+/// ```
+///
+/// When associated with `loop`, a break expression may be used to return a value from that loop.
+/// This is only valid with `loop` and not with any other type of loop.
+/// If no value is specified, `break;` returns `()`.
+/// Every `break` within a loop must return the same type.
+///
+/// ```rust
+/// let (mut a, mut b) = (1, 1);
+/// let result = loop {
+/// if b > 10 {
+/// break b;
+/// }
+/// let c = a + b;
+/// a = b;
+/// b = c;
+/// };
+/// // first number in Fibonacci sequence over 10:
+/// assert_eq!(result, 13);
+/// println!("{result}");
+/// ```
+///
+/// For more details consult the [Reference on "break expression"] and the [Reference on "break and
+/// loop values"].
+///
+/// [Reference on "break expression"]: ../reference/expressions/loop-expr.html#break-expressions
+/// [Reference on "break and loop values"]:
+/// ../reference/expressions/loop-expr.html#break-and-loop-values
+mod break_keyword {}
+
+#[doc(keyword = "const")]
+//
+/// Compile-time constants, compile-time evaluable functions, and raw pointers.
+///
+/// ## Compile-time constants
+///
+/// Sometimes a certain value is used many times throughout a program, and it can become
+/// inconvenient to copy it over and over. What's more, it's not always possible or desirable to
+/// make it a variable that gets carried around to each function that needs it. In these cases, the
+/// `const` keyword provides a convenient alternative to code duplication:
+///
+/// ```rust
+/// const THING: u32 = 0xABAD1DEA;
+///
+/// let foo = 123 + THING;
+/// ```
+///
+/// Constants must be explicitly typed; unlike with `let`, you can't ignore their type and let the
+/// compiler figure it out. Any constant value can be defined in a `const`, which in practice happens
+/// to be most things that would be reasonable to have in a constant (barring `const fn`s). For
+/// example, you can't have a [`File`] as a `const`.
+///
+/// [`File`]: crate::fs::File
+///
+/// The only lifetime allowed in a constant is `'static`, which is the lifetime that encompasses
+/// all others in a Rust program. For example, if you wanted to define a constant string, it would
+/// look like this:
+///
+/// ```rust
+/// const WORDS: &'static str = "hello rust!";
+/// ```
+///
+/// Thanks to static lifetime elision, you usually don't have to explicitly use `'static`:
+///
+/// ```rust
+/// const WORDS: &str = "hello convenience!";
+/// ```
+///
+/// `const` items looks remarkably similar to `static` items, which introduces some confusion as
+/// to which one should be used at which times. To put it simply, constants are inlined wherever
+/// they're used, making using them identical to simply replacing the name of the `const` with its
+/// value. Static variables, on the other hand, point to a single location in memory, which all
+/// accesses share. This means that, unlike with constants, they can't have destructors, and act as
+/// a single value across the entire codebase.
+///
+/// Constants, like statics, should always be in `SCREAMING_SNAKE_CASE`.
+///
+/// For more detail on `const`, see the [Rust Book] or the [Reference].
+///
+/// ## Compile-time evaluable functions
+///
+/// The other main use of the `const` keyword is in `const fn`. This marks a function as being
+/// callable in the body of a `const` or `static` item and in array initializers (commonly called
+/// "const contexts"). `const fn` are restricted in the set of operations they can perform, to
+/// ensure that they can be evaluated at compile-time. See the [Reference][const-eval] for more
+/// detail.
+///
+/// Turning a `fn` into a `const fn` has no effect on run-time uses of that function.
+///
+/// ## Other uses of `const`
+///
+/// The `const` keyword is also used in raw pointers in combination with `mut`, as seen in `*const
+/// T` and `*mut T`. More about `const` as used in raw pointers can be read at the Rust docs for the [pointer primitive].
+///
+/// [pointer primitive]: pointer
+/// [Rust Book]: ../book/ch03-01-variables-and-mutability.html#constants
+/// [Reference]: ../reference/items/constant-items.html
+/// [const-eval]: ../reference/const_eval.html
+mod const_keyword {}
+
+#[doc(keyword = "continue")]
+//
+/// Skip to the next iteration of a loop.
+///
+/// When `continue` is encountered, the current iteration is terminated, returning control to the
+/// loop head, typically continuing with the next iteration.
+///
+/// ```rust
+/// // Printing odd numbers by skipping even ones
+/// for number in 1..=10 {
+/// if number % 2 == 0 {
+/// continue;
+/// }
+/// println!("{number}");
+/// }
+/// ```
+///
+/// Like `break`, `continue` is normally associated with the innermost enclosing loop, but labels
+/// may be used to specify the affected loop.
+///
+/// ```rust
+/// // Print Odd numbers under 30 with unit <= 5
+/// 'tens: for ten in 0..3 {
+/// '_units: for unit in 0..=9 {
+/// if unit % 2 == 0 {
+/// continue;
+/// }
+/// if unit > 5 {
+/// continue 'tens;
+/// }
+/// println!("{}", ten * 10 + unit);
+/// }
+/// }
+/// ```
+///
+/// See [continue expressions] from the reference for more details.
+///
+/// [continue expressions]: ../reference/expressions/loop-expr.html#continue-expressions
+mod continue_keyword {}
+
+#[doc(keyword = "crate")]
+//
+/// A Rust binary or library.
+///
+/// The primary use of the `crate` keyword is as a part of `extern crate` declarations, which are
+/// used to specify a dependency on a crate external to the one it's declared in. Crates are the
+/// fundamental compilation unit of Rust code, and can be seen as libraries or projects. More can
+/// be read about crates in the [Reference].
+///
+/// ```rust ignore
+/// extern crate rand;
+/// extern crate my_crate as thing;
+/// extern crate std; // implicitly added to the root of every Rust project
+/// ```
+///
+/// The `as` keyword can be used to change what the crate is referred to as in your project. If a
+/// crate name includes a dash, it is implicitly imported with the dashes replaced by underscores.
+///
+/// `crate` can also be used as in conjunction with `pub` to signify that the item it's attached to
+/// is public only to other members of the same crate it's in.
+///
+/// ```rust
+/// # #[allow(unused_imports)]
+/// pub(crate) use std::io::Error as IoError;
+/// pub(crate) enum CoolMarkerType { }
+/// pub struct PublicThing {
+/// pub(crate) semi_secret_thing: bool,
+/// }
+/// ```
+///
+/// `crate` is also used to represent the absolute path of a module, where `crate` refers to the
+/// root of the current crate. For instance, `crate::foo::bar` refers to the name `bar` inside the
+/// module `foo`, from anywhere else in the same crate.
+///
+/// [Reference]: ../reference/items/extern-crates.html
+mod crate_keyword {}
+
+#[doc(keyword = "else")]
+//
+/// What expression to evaluate when an [`if`] condition evaluates to [`false`].
+///
+/// `else` expressions are optional. When no else expressions are supplied it is assumed to evaluate
+/// to the unit type `()`.
+///
+/// The type that the `else` blocks evaluate to must be compatible with the type that the `if` block
+/// evaluates to.
+///
+/// As can be seen below, `else` must be followed by either: `if`, `if let`, or a block `{}` and it
+/// will return the value of that expression.
+///
+/// ```rust
+/// let result = if true == false {
+/// "oh no"
+/// } else if "something" == "other thing" {
+/// "oh dear"
+/// } else if let Some(200) = "blarg".parse::<i32>().ok() {
+/// "uh oh"
+/// } else {
+/// println!("Sneaky side effect.");
+/// "phew, nothing's broken"
+/// };
+/// ```
+///
+/// Here's another example but here we do not try and return an expression:
+///
+/// ```rust
+/// if true == false {
+/// println!("oh no");
+/// } else if "something" == "other thing" {
+/// println!("oh dear");
+/// } else if let Some(200) = "blarg".parse::<i32>().ok() {
+/// println!("uh oh");
+/// } else {
+/// println!("phew, nothing's broken");
+/// }
+/// ```
+///
+/// The above is _still_ an expression but it will always evaluate to `()`.
+///
+/// There is possibly no limit to the number of `else` blocks that could follow an `if` expression
+/// however if you have several then a [`match`] expression might be preferable.
+///
+/// Read more about control flow in the [Rust Book].
+///
+/// [Rust Book]: ../book/ch03-05-control-flow.html#handling-multiple-conditions-with-else-if
+/// [`match`]: keyword.match.html
+/// [`false`]: keyword.false.html
+/// [`if`]: keyword.if.html
+mod else_keyword {}
+
+#[doc(keyword = "enum")]
+//
+/// A type that can be any one of several variants.
+///
+/// Enums in Rust are similar to those of other compiled languages like C, but have important
+/// differences that make them considerably more powerful. What Rust calls enums are more commonly
+/// known as [Algebraic Data Types][ADT] if you're coming from a functional programming background.
+/// The important detail is that each enum variant can have data to go along with it.
+///
+/// ```rust
+/// # struct Coord;
+/// enum SimpleEnum {
+/// FirstVariant,
+/// SecondVariant,
+/// ThirdVariant,
+/// }
+///
+/// enum Location {
+/// Unknown,
+/// Anonymous,
+/// Known(Coord),
+/// }
+///
+/// enum ComplexEnum {
+/// Nothing,
+/// Something(u32),
+/// LotsOfThings {
+/// usual_struct_stuff: bool,
+/// blah: String,
+/// }
+/// }
+///
+/// enum EmptyEnum { }
+/// ```
+///
+/// The first enum shown is the usual kind of enum you'd find in a C-style language. The second
+/// shows off a hypothetical example of something storing location data, with `Coord` being any
+/// other type that's needed, for example a struct. The third example demonstrates the kind of
+/// data a variant can store, ranging from nothing, to a tuple, to an anonymous struct.
+///
+/// Instantiating enum variants involves explicitly using the enum's name as its namespace,
+/// followed by one of its variants. `SimpleEnum::SecondVariant` would be an example from above.
+/// When data follows along with a variant, such as with rust's built-in [`Option`] type, the data
+/// is added as the type describes, for example `Option::Some(123)`. The same follows with
+/// struct-like variants, with things looking like `ComplexEnum::LotsOfThings { usual_struct_stuff:
+/// true, blah: "hello!".to_string(), }`. Empty Enums are similar to [`!`] in that they cannot be
+/// instantiated at all, and are used mainly to mess with the type system in interesting ways.
+///
+/// For more information, take a look at the [Rust Book] or the [Reference]
+///
+/// [ADT]: https://en.wikipedia.org/wiki/Algebraic_data_type
+/// [Rust Book]: ../book/ch06-01-defining-an-enum.html
+/// [Reference]: ../reference/items/enumerations.html
+mod enum_keyword {}
+
+#[doc(keyword = "extern")]
+//
+/// Link to or import external code.
+///
+/// The `extern` keyword is used in two places in Rust. One is in conjunction with the [`crate`]
+/// keyword to make your Rust code aware of other Rust crates in your project, i.e., `extern crate
+/// lazy_static;`. The other use is in foreign function interfaces (FFI).
+///
+/// `extern` is used in two different contexts within FFI. The first is in the form of external
+/// blocks, for declaring function interfaces that Rust code can call foreign code by.
+///
+/// ```rust ignore
+/// #[link(name = "my_c_library")]
+/// extern "C" {
+/// fn my_c_function(x: i32) -> bool;
+/// }
+/// ```
+///
+/// This code would attempt to link with `libmy_c_library.so` on unix-like systems and
+/// `my_c_library.dll` on Windows at runtime, and panic if it can't find something to link to. Rust
+/// code could then use `my_c_function` as if it were any other unsafe Rust function. Working with
+/// non-Rust languages and FFI is inherently unsafe, so wrappers are usually built around C APIs.
+///
+/// The mirror use case of FFI is also done via the `extern` keyword:
+///
+/// ```rust
+/// #[no_mangle]
+/// pub extern "C" fn callable_from_c(x: i32) -> bool {
+/// x % 3 == 0
+/// }
+/// ```
+///
+/// If compiled as a dylib, the resulting .so could then be linked to from a C library, and the
+/// function could be used as if it was from any other library.
+///
+/// For more information on FFI, check the [Rust book] or the [Reference].
+///
+/// [Rust book]:
+/// ../book/ch19-01-unsafe-rust.html#using-extern-functions-to-call-external-code
+/// [Reference]: ../reference/items/external-blocks.html
+/// [`crate`]: keyword.crate.html
+mod extern_keyword {}
+
+#[doc(keyword = "false")]
+//
+/// A value of type [`bool`] representing logical **false**.
+///
+/// `false` is the logical opposite of [`true`].
+///
+/// See the documentation for [`true`] for more information.
+///
+/// [`true`]: keyword.true.html
+mod false_keyword {}
+
+#[doc(keyword = "fn")]
+//
+/// A function or function pointer.
+///
+/// Functions are the primary way code is executed within Rust. Function blocks, usually just
+/// called functions, can be defined in a variety of different places and be assigned many
+/// different attributes and modifiers.
+///
+/// Standalone functions that just sit within a module not attached to anything else are common,
+/// but most functions will end up being inside [`impl`] blocks, either on another type itself, or
+/// as a trait impl for that type.
+///
+/// ```rust
+/// fn standalone_function() {
+/// // code
+/// }
+///
+/// pub fn public_thing(argument: bool) -> String {
+/// // code
+/// # "".to_string()
+/// }
+///
+/// struct Thing {
+/// foo: i32,
+/// }
+///
+/// impl Thing {
+/// pub fn new() -> Self {
+/// Self {
+/// foo: 42,
+/// }
+/// }
+/// }
+/// ```
+///
+/// In addition to presenting fixed types in the form of `fn name(arg: type, ..) -> return_type`,
+/// functions can also declare a list of type parameters along with trait bounds that they fall
+/// into.
+///
+/// ```rust
+/// fn generic_function<T: Clone>(x: T) -> (T, T, T) {
+/// (x.clone(), x.clone(), x.clone())
+/// }
+///
+/// fn generic_where<T>(x: T) -> T
+/// where T: std::ops::Add<Output = T> + Copy
+/// {
+/// x + x + x
+/// }
+/// ```
+///
+/// Declaring trait bounds in the angle brackets is functionally identical to using a `where`
+/// clause. It's up to the programmer to decide which works better in each situation, but `where`
+/// tends to be better when things get longer than one line.
+///
+/// Along with being made public via `pub`, `fn` can also have an [`extern`] added for use in
+/// FFI.
+///
+/// For more information on the various types of functions and how they're used, consult the [Rust
+/// book] or the [Reference].
+///
+/// [`impl`]: keyword.impl.html
+/// [`extern`]: keyword.extern.html
+/// [Rust book]: ../book/ch03-03-how-functions-work.html
+/// [Reference]: ../reference/items/functions.html
+mod fn_keyword {}
+
+#[doc(keyword = "for")]
+//
+/// Iteration with [`in`], trait implementation with [`impl`], or [higher-ranked trait bounds]
+/// (`for<'a>`).
+///
+/// The `for` keyword is used in many syntactic locations:
+///
+/// * `for` is used in for-in-loops (see below).
+/// * `for` is used when implementing traits as in `impl Trait for Type` (see [`impl`] for more info
+/// on that).
+/// * `for` is also used for [higher-ranked trait bounds] as in `for<'a> &'a T: PartialEq<i32>`.
+///
+/// for-in-loops, or to be more precise, iterator loops, are a simple syntactic sugar over a common
+/// practice within Rust, which is to loop over anything that implements [`IntoIterator`] until the
+/// iterator returned by `.into_iter()` returns `None` (or the loop body uses `break`).
+///
+/// ```rust
+/// for i in 0..5 {
+/// println!("{}", i * 2);
+/// }
+///
+/// for i in std::iter::repeat(5) {
+/// println!("turns out {i} never stops being 5");
+/// break; // would loop forever otherwise
+/// }
+///
+/// 'outer: for x in 5..50 {
+/// for y in 0..10 {
+/// if x == y {
+/// break 'outer;
+/// }
+/// }
+/// }
+/// ```
+///
+/// As shown in the example above, `for` loops (along with all other loops) can be tagged, using
+/// similar syntax to lifetimes (only visually similar, entirely distinct in practice). Giving the
+/// same tag to `break` breaks the tagged loop, which is useful for inner loops. It is definitely
+/// not a goto.
+///
+/// A `for` loop expands as shown:
+///
+/// ```rust
+/// # fn code() { }
+/// # let iterator = 0..2;
+/// for loop_variable in iterator {
+/// code()
+/// }
+/// ```
+///
+/// ```rust
+/// # fn code() { }
+/// # let iterator = 0..2;
+/// {
+/// let result = match IntoIterator::into_iter(iterator) {
+/// mut iter => loop {
+/// match iter.next() {
+/// None => break,
+/// Some(loop_variable) => { code(); },
+/// };
+/// },
+/// };
+/// result
+/// }
+/// ```
+///
+/// More details on the functionality shown can be seen at the [`IntoIterator`] docs.
+///
+/// For more information on for-loops, see the [Rust book] or the [Reference].
+///
+/// See also, [`loop`], [`while`].
+///
+/// [`in`]: keyword.in.html
+/// [`impl`]: keyword.impl.html
+/// [`loop`]: keyword.loop.html
+/// [`while`]: keyword.while.html
+/// [higher-ranked trait bounds]: ../reference/trait-bounds.html#higher-ranked-trait-bounds
+/// [Rust book]:
+/// ../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+/// [Reference]: ../reference/expressions/loop-expr.html#iterator-loops
+mod for_keyword {}
+
+#[doc(keyword = "if")]
+//
+/// Evaluate a block if a condition holds.
+///
+/// `if` is a familiar construct to most programmers, and is the main way you'll often do logic in
+/// your code. However, unlike in most languages, `if` blocks can also act as expressions.
+///
+/// ```rust
+/// # let rude = true;
+/// if 1 == 2 {
+/// println!("whoops, mathematics broke");
+/// } else {
+/// println!("everything's fine!");
+/// }
+///
+/// let greeting = if rude {
+/// "sup nerd."
+/// } else {
+/// "hello, friend!"
+/// };
+///
+/// if let Ok(x) = "123".parse::<i32>() {
+/// println!("{} double that and you get {}!", greeting, x * 2);
+/// }
+/// ```
+///
+/// Shown above are the three typical forms an `if` block comes in. First is the usual kind of
+/// thing you'd see in many languages, with an optional `else` block. Second uses `if` as an
+/// expression, which is only possible if all branches return the same type. An `if` expression can
+/// be used everywhere you'd expect. The third kind of `if` block is an `if let` block, which
+/// behaves similarly to using a `match` expression:
+///
+/// ```rust
+/// if let Some(x) = Some(123) {
+/// // code
+/// # let _ = x;
+/// } else {
+/// // something else
+/// }
+///
+/// match Some(123) {
+/// Some(x) => {
+/// // code
+/// # let _ = x;
+/// },
+/// _ => {
+/// // something else
+/// },
+/// }
+/// ```
+///
+/// Each kind of `if` expression can be mixed and matched as needed.
+///
+/// ```rust
+/// if true == false {
+/// println!("oh no");
+/// } else if "something" == "other thing" {
+/// println!("oh dear");
+/// } else if let Some(200) = "blarg".parse::<i32>().ok() {
+/// println!("uh oh");
+/// } else {
+/// println!("phew, nothing's broken");
+/// }
+/// ```
+///
+/// The `if` keyword is used in one other place in Rust, namely as a part of pattern matching
+/// itself, allowing patterns such as `Some(x) if x > 200` to be used.
+///
+/// For more information on `if` expressions, see the [Rust book] or the [Reference].
+///
+/// [Rust book]: ../book/ch03-05-control-flow.html#if-expressions
+/// [Reference]: ../reference/expressions/if-expr.html
+mod if_keyword {}
+
+#[doc(keyword = "impl")]
+//
+/// Implement some functionality for a type.
+///
+/// The `impl` keyword is primarily used to define implementations on types. Inherent
+/// implementations are standalone, while trait implementations are used to implement traits for
+/// types, or other traits.
+///
+/// Functions and consts can both be defined in an implementation. A function defined in an
+/// `impl` block can be standalone, meaning it would be called like `Foo::bar()`. If the function
+/// takes `self`, `&self`, or `&mut self` as its first argument, it can also be called using
+/// method-call syntax, a familiar feature to any object oriented programmer, like `foo.bar()`.
+///
+/// ```rust
+/// struct Example {
+/// number: i32,
+/// }
+///
+/// impl Example {
+/// fn boo() {
+/// println!("boo! Example::boo() was called!");
+/// }
+///
+/// fn answer(&mut self) {
+/// self.number += 42;
+/// }
+///
+/// fn get_number(&self) -> i32 {
+/// self.number
+/// }
+/// }
+///
+/// trait Thingy {
+/// fn do_thingy(&self);
+/// }
+///
+/// impl Thingy for Example {
+/// fn do_thingy(&self) {
+/// println!("doing a thing! also, number is {}!", self.number);
+/// }
+/// }
+/// ```
+///
+/// For more information on implementations, see the [Rust book][book1] or the [Reference].
+///
+/// The other use of the `impl` keyword is in `impl Trait` syntax, which can be seen as a shorthand
+/// for "a concrete type that implements this trait". Its primary use is working with closures,
+/// which have type definitions generated at compile time that can't be simply typed out.
+///
+/// ```rust
+/// fn thing_returning_closure() -> impl Fn(i32) -> bool {
+/// println!("here's a closure for you!");
+/// |x: i32| x % 3 == 0
+/// }
+/// ```
+///
+/// For more information on `impl Trait` syntax, see the [Rust book][book2].
+///
+/// [book1]: ../book/ch05-03-method-syntax.html
+/// [Reference]: ../reference/items/implementations.html
+/// [book2]: ../book/ch10-02-traits.html#returning-types-that-implement-traits
+mod impl_keyword {}
+
+#[doc(keyword = "in")]
+//
+/// Iterate over a series of values with [`for`].
+///
+/// The expression immediately following `in` must implement the [`IntoIterator`] trait.
+///
+/// ## Literal Examples:
+///
+/// * `for _ in 1..3 {}` - Iterate over an exclusive range up to but excluding 3.
+/// * `for _ in 1..=3 {}` - Iterate over an inclusive range up to and including 3.
+///
+/// (Read more about [range patterns])
+///
+/// [`IntoIterator`]: ../book/ch13-04-performance.html
+/// [range patterns]: ../reference/patterns.html?highlight=range#range-patterns
+/// [`for`]: keyword.for.html
+///
+/// The other use of `in` is with the keyword `pub`. It allows users to declare an item as visible
+/// only within a given scope.
+///
+/// ## Literal Example:
+///
+/// * `pub(in crate::outer_mod) fn outer_mod_visible_fn() {}` - fn is visible in `outer_mod`
+///
+/// Starting with the 2018 edition, paths for `pub(in path)` must start with `crate`, `self` or
+/// `super`. The 2015 edition may also use paths starting with `::` or modules from the crate root.
+///
+/// For more information, see the [Reference].
+///
+/// [Reference]: ../reference/visibility-and-privacy.html#pubin-path-pubcrate-pubsuper-and-pubself
+mod in_keyword {}
+
+#[doc(keyword = "let")]
+//
+/// Bind a value to a variable.
+///
+/// The primary use for the `let` keyword is in `let` statements, which are used to introduce a new
+/// set of variables into the current scope, as given by a pattern.
+///
+/// ```rust
+/// # #![allow(unused_assignments)]
+/// let thing1: i32 = 100;
+/// let thing2 = 200 + thing1;
+///
+/// let mut changing_thing = true;
+/// changing_thing = false;
+///
+/// let (part1, part2) = ("first", "second");
+///
+/// struct Example {
+/// a: bool,
+/// b: u64,
+/// }
+///
+/// let Example { a, b: _ } = Example {
+/// a: true,
+/// b: 10004,
+/// };
+/// assert!(a);
+/// ```
+///
+/// The pattern is most commonly a single variable, which means no pattern matching is done and
+/// the expression given is bound to the variable. Apart from that, patterns used in `let` bindings
+/// can be as complicated as needed, given that the pattern is exhaustive. See the [Rust
+/// book][book1] for more information on pattern matching. The type of the pattern is optionally
+/// given afterwards, but if left blank is automatically inferred by the compiler if possible.
+///
+/// Variables in Rust are immutable by default, and require the `mut` keyword to be made mutable.
+///
+/// Multiple variables can be defined with the same name, known as shadowing. This doesn't affect
+/// the original variable in any way beyond being unable to directly access it beyond the point of
+/// shadowing. It continues to remain in scope, getting dropped only when it falls out of scope.
+/// Shadowed variables don't need to have the same type as the variables shadowing them.
+///
+/// ```rust
+/// let shadowing_example = true;
+/// let shadowing_example = 123.4;
+/// let shadowing_example = shadowing_example as u32;
+/// let mut shadowing_example = format!("cool! {shadowing_example}");
+/// shadowing_example += " something else!"; // not shadowing
+/// ```
+///
+/// Other places the `let` keyword is used include along with [`if`], in the form of `if let`
+/// expressions. They're useful if the pattern being matched isn't exhaustive, such as with
+/// enumerations. `while let` also exists, which runs a loop with a pattern matched value until
+/// that pattern can't be matched.
+///
+/// For more information on the `let` keyword, see the [Rust book][book2] or the [Reference]
+///
+/// [book1]: ../book/ch06-02-match.html
+/// [`if`]: keyword.if.html
+/// [book2]: ../book/ch18-01-all-the-places-for-patterns.html#let-statements
+/// [Reference]: ../reference/statements.html#let-statements
+mod let_keyword {}
+
+#[doc(keyword = "while")]
+//
+/// Loop while a condition is upheld.
+///
+/// A `while` expression is used for predicate loops. The `while` expression runs the conditional
+/// expression before running the loop body, then runs the loop body if the conditional
+/// expression evaluates to `true`, or exits the loop otherwise.
+///
+/// ```rust
+/// let mut counter = 0;
+///
+/// while counter < 10 {
+/// println!("{counter}");
+/// counter += 1;
+/// }
+/// ```
+///
+/// Like the [`for`] expression, we can use `break` and `continue`. A `while` expression
+/// cannot break with a value and always evaluates to `()` unlike [`loop`].
+///
+/// ```rust
+/// let mut i = 1;
+///
+/// while i < 100 {
+/// i *= 2;
+/// if i == 64 {
+/// break; // Exit when `i` is 64.
+/// }
+/// }
+/// ```
+///
+/// As `if` expressions have their pattern matching variant in `if let`, so too do `while`
+/// expressions with `while let`. The `while let` expression matches the pattern against the
+/// expression, then runs the loop body if pattern matching succeeds, or exits the loop otherwise.
+/// We can use `break` and `continue` in `while let` expressions just like in `while`.
+///
+/// ```rust
+/// let mut counter = Some(0);
+///
+/// while let Some(i) = counter {
+/// if i == 10 {
+/// counter = None;
+/// } else {
+/// println!("{i}");
+/// counter = Some (i + 1);
+/// }
+/// }
+/// ```
+///
+/// For more information on `while` and loops in general, see the [reference].
+///
+/// See also, [`for`], [`loop`].
+///
+/// [`for`]: keyword.for.html
+/// [`loop`]: keyword.loop.html
+/// [reference]: ../reference/expressions/loop-expr.html#predicate-loops
+mod while_keyword {}
+
+#[doc(keyword = "loop")]
+//
+/// Loop indefinitely.
+///
+/// `loop` is used to define the simplest kind of loop supported in Rust. It runs the code inside
+/// it until the code uses `break` or the program exits.
+///
+/// ```rust
+/// loop {
+/// println!("hello world forever!");
+/// # break;
+/// }
+///
+/// let mut i = 1;
+/// loop {
+/// println!("i is {i}");
+/// if i > 100 {
+/// break;
+/// }
+/// i *= 2;
+/// }
+/// assert_eq!(i, 128);
+/// ```
+///
+/// Unlike the other kinds of loops in Rust (`while`, `while let`, and `for`), loops can be used as
+/// expressions that return values via `break`.
+///
+/// ```rust
+/// let mut i = 1;
+/// let something = loop {
+/// i *= 2;
+/// if i > 100 {
+/// break i;
+/// }
+/// };
+/// assert_eq!(something, 128);
+/// ```
+///
+/// Every `break` in a loop has to have the same type. When it's not explicitly giving something,
+/// `break;` returns `()`.
+///
+/// For more information on `loop` and loops in general, see the [Reference].
+///
+/// See also, [`for`], [`while`].
+///
+/// [`for`]: keyword.for.html
+/// [`while`]: keyword.while.html
+/// [Reference]: ../reference/expressions/loop-expr.html
+mod loop_keyword {}
+
+#[doc(keyword = "match")]
+//
+/// Control flow based on pattern matching.
+///
+/// `match` can be used to run code conditionally. Every pattern must
+/// be handled exhaustively either explicitly or by using wildcards like
+/// `_` in the `match`. Since `match` is an expression, values can also be
+/// returned.
+///
+/// ```rust
+/// let opt = Option::None::<usize>;
+/// let x = match opt {
+/// Some(int) => int,
+/// None => 10,
+/// };
+/// assert_eq!(x, 10);
+///
+/// let a_number = Option::Some(10);
+/// match a_number {
+/// Some(x) if x <= 5 => println!("0 to 5 num = {x}"),
+/// Some(x @ 6..=10) => println!("6 to 10 num = {x}"),
+/// None => panic!(),
+/// // all other numbers
+/// _ => panic!(),
+/// }
+/// ```
+///
+/// `match` can be used to gain access to the inner members of an enum
+/// and use them directly.
+///
+/// ```rust
+/// enum Outer {
+/// Double(Option<u8>, Option<String>),
+/// Single(Option<u8>),
+/// Empty
+/// }
+///
+/// let get_inner = Outer::Double(None, Some(String::new()));
+/// match get_inner {
+/// Outer::Double(None, Some(st)) => println!("{st}"),
+/// Outer::Single(opt) => println!("{opt:?}"),
+/// _ => panic!(),
+/// }
+/// ```
+///
+/// For more information on `match` and matching in general, see the [Reference].
+///
+/// [Reference]: ../reference/expressions/match-expr.html
+mod match_keyword {}
+
+#[doc(keyword = "mod")]
+//
+/// Organize code into [modules].
+///
+/// Use `mod` to create new [modules] to encapsulate code, including other
+/// modules:
+///
+/// ```
+/// mod foo {
+/// mod bar {
+/// type MyType = (u8, u8);
+/// fn baz() {}
+/// }
+/// }
+/// ```
+///
+/// Like [`struct`]s and [`enum`]s, a module and its content are private by
+/// default, inaccessible to code outside of the module.
+///
+/// To learn more about allowing access, see the documentation for the [`pub`]
+/// keyword.
+///
+/// [`enum`]: keyword.enum.html
+/// [`pub`]: keyword.pub.html
+/// [`struct`]: keyword.struct.html
+/// [modules]: ../reference/items/modules.html
+mod mod_keyword {}
+
+#[doc(keyword = "move")]
+//
+/// Capture a [closure]'s environment by value.
+///
+/// `move` converts any variables captured by reference or mutable reference
+/// to variables captured by value.
+///
+/// ```rust
+/// let data = vec![1, 2, 3];
+/// let closure = move || println!("captured {data:?} by value");
+///
+/// // data is no longer available, it is owned by the closure
+/// ```
+///
+/// Note: `move` closures may still implement [`Fn`] or [`FnMut`], even though
+/// they capture variables by `move`. This is because the traits implemented by
+/// a closure type are determined by *what* the closure does with captured
+/// values, not *how* it captures them:
+///
+/// ```rust
+/// fn create_fn() -> impl Fn() {
+/// let text = "Fn".to_owned();
+/// move || println!("This is a: {text}")
+/// }
+///
+/// let fn_plain = create_fn();
+/// fn_plain();
+/// ```
+///
+/// `move` is often used when [threads] are involved.
+///
+/// ```rust
+/// let data = vec![1, 2, 3];
+///
+/// std::thread::spawn(move || {
+/// println!("captured {data:?} by value")
+/// }).join().unwrap();
+///
+/// // data was moved to the spawned thread, so we cannot use it here
+/// ```
+///
+/// `move` is also valid before an async block.
+///
+/// ```rust
+/// let capture = "hello".to_owned();
+/// let block = async move {
+/// println!("rust says {capture} from async block");
+/// };
+/// ```
+///
+/// For more information on the `move` keyword, see the [closures][closure] section
+/// of the Rust book or the [threads] section.
+///
+/// [closure]: ../book/ch13-01-closures.html
+/// [threads]: ../book/ch16-01-threads.html#using-move-closures-with-threads
+mod move_keyword {}
+
+#[doc(keyword = "mut")]
+//
+/// A mutable variable, reference, or pointer.
+///
+/// `mut` can be used in several situations. The first is mutable variables,
+/// which can be used anywhere you can bind a value to a variable name. Some
+/// examples:
+///
+/// ```rust
+/// // A mutable variable in the parameter list of a function.
+/// fn foo(mut x: u8, y: u8) -> u8 {
+/// x += y;
+/// x
+/// }
+///
+/// // Modifying a mutable variable.
+/// # #[allow(unused_assignments)]
+/// let mut a = 5;
+/// a = 6;
+///
+/// assert_eq!(foo(3, 4), 7);
+/// assert_eq!(a, 6);
+/// ```
+///
+/// The second is mutable references. They can be created from `mut` variables
+/// and must be unique: no other variables can have a mutable reference, nor a
+/// shared reference.
+///
+/// ```rust
+/// // Taking a mutable reference.
+/// fn push_two(v: &mut Vec<u8>) {
+/// v.push(2);
+/// }
+///
+/// // A mutable reference cannot be taken to a non-mutable variable.
+/// let mut v = vec![0, 1];
+/// // Passing a mutable reference.
+/// push_two(&mut v);
+///
+/// assert_eq!(v, vec![0, 1, 2]);
+/// ```
+///
+/// ```rust,compile_fail,E0502
+/// let mut v = vec![0, 1];
+/// let mut_ref_v = &mut v;
+/// ##[allow(unused)]
+/// let ref_v = &v;
+/// mut_ref_v.push(2);
+/// ```
+///
+/// Mutable raw pointers work much like mutable references, with the added
+/// possibility of not pointing to a valid object. The syntax is `*mut Type`.
+///
+/// More information on mutable references and pointers can be found in the [Reference].
+///
+/// [Reference]: ../reference/types/pointer.html#mutable-references-mut
+mod mut_keyword {}
+
+#[doc(keyword = "pub")]
+//
+/// Make an item visible to others.
+///
+/// The keyword `pub` makes any module, function, or data structure accessible from inside
+/// of external modules. The `pub` keyword may also be used in a `use` declaration to re-export
+/// an identifier from a namespace.
+///
+/// For more information on the `pub` keyword, please see the visibility section
+/// of the [reference] and for some examples, see [Rust by Example].
+///
+/// [reference]:../reference/visibility-and-privacy.html?highlight=pub#visibility-and-privacy
+/// [Rust by Example]:../rust-by-example/mod/visibility.html
+mod pub_keyword {}
+
+#[doc(keyword = "ref")]
+//
+/// Bind by reference during pattern matching.
+///
+/// `ref` annotates pattern bindings to make them borrow rather than move.
+/// It is **not** a part of the pattern as far as matching is concerned: it does
+/// not affect *whether* a value is matched, only *how* it is matched.
+///
+/// By default, [`match`] statements consume all they can, which can sometimes
+/// be a problem, when you don't really need the value to be moved and owned:
+///
+/// ```compile_fail,E0382
+/// let maybe_name = Some(String::from("Alice"));
+/// // The variable 'maybe_name' is consumed here ...
+/// match maybe_name {
+/// Some(n) => println!("Hello, {n}"),
+/// _ => println!("Hello, world"),
+/// }
+/// // ... and is now unavailable.
+/// println!("Hello again, {}", maybe_name.unwrap_or("world".into()));
+/// ```
+///
+/// Using the `ref` keyword, the value is only borrowed, not moved, making it
+/// available for use after the [`match`] statement:
+///
+/// ```
+/// let maybe_name = Some(String::from("Alice"));
+/// // Using `ref`, the value is borrowed, not moved ...
+/// match maybe_name {
+/// Some(ref n) => println!("Hello, {n}"),
+/// _ => println!("Hello, world"),
+/// }
+/// // ... so it's available here!
+/// println!("Hello again, {}", maybe_name.unwrap_or("world".into()));
+/// ```
+///
+/// # `&` vs `ref`
+///
+/// - `&` denotes that your pattern expects a reference to an object. Hence `&`
+/// is a part of said pattern: `&Foo` matches different objects than `Foo` does.
+///
+/// - `ref` indicates that you want a reference to an unpacked value. It is not
+/// matched against: `Foo(ref foo)` matches the same objects as `Foo(foo)`.
+///
+/// See also the [Reference] for more information.
+///
+/// [`match`]: keyword.match.html
+/// [Reference]: ../reference/patterns.html#identifier-patterns
+mod ref_keyword {}
+
+#[doc(keyword = "return")]
+//
+/// Return a value from a function.
+///
+/// A `return` marks the end of an execution path in a function:
+///
+/// ```
+/// fn foo() -> i32 {
+/// return 3;
+/// }
+/// assert_eq!(foo(), 3);
+/// ```
+///
+/// `return` is not needed when the returned value is the last expression in the
+/// function. In this case the `;` is omitted:
+///
+/// ```
+/// fn foo() -> i32 {
+/// 3
+/// }
+/// assert_eq!(foo(), 3);
+/// ```
+///
+/// `return` returns from the function immediately (an "early return"):
+///
+/// ```no_run
+/// use std::fs::File;
+/// use std::io::{Error, ErrorKind, Read, Result};
+///
+/// fn main() -> Result<()> {
+/// let mut file = match File::open("foo.txt") {
+/// Ok(f) => f,
+/// Err(e) => return Err(e),
+/// };
+///
+/// let mut contents = String::new();
+/// let size = match file.read_to_string(&mut contents) {
+/// Ok(s) => s,
+/// Err(e) => return Err(e),
+/// };
+///
+/// if contents.contains("impossible!") {
+/// return Err(Error::new(ErrorKind::Other, "oh no!"));
+/// }
+///
+/// if size > 9000 {
+/// return Err(Error::new(ErrorKind::Other, "over 9000!"));
+/// }
+///
+/// assert_eq!(contents, "Hello, world!");
+/// Ok(())
+/// }
+/// ```
+mod return_keyword {}
+
+#[doc(keyword = "self")]
+//
+/// The receiver of a method, or the current module.
+///
+/// `self` is used in two situations: referencing the current module and marking
+/// the receiver of a method.
+///
+/// In paths, `self` can be used to refer to the current module, either in a
+/// [`use`] statement or in a path to access an element:
+///
+/// ```
+/// # #![allow(unused_imports)]
+/// use std::io::{self, Read};
+/// ```
+///
+/// Is functionally the same as:
+///
+/// ```
+/// # #![allow(unused_imports)]
+/// use std::io;
+/// use std::io::Read;
+/// ```
+///
+/// Using `self` to access an element in the current module:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # fn main() {}
+/// fn foo() {}
+/// fn bar() {
+/// self::foo()
+/// }
+/// ```
+///
+/// `self` as the current receiver for a method allows to omit the parameter
+/// type most of the time. With the exception of this particularity, `self` is
+/// used much like any other parameter:
+///
+/// ```
+/// struct Foo(i32);
+///
+/// impl Foo {
+/// // No `self`.
+/// fn new() -> Self {
+/// Self(0)
+/// }
+///
+/// // Consuming `self`.
+/// fn consume(self) -> Self {
+/// Self(self.0 + 1)
+/// }
+///
+/// // Borrowing `self`.
+/// fn borrow(&self) -> &i32 {
+/// &self.0
+/// }
+///
+/// // Borrowing `self` mutably.
+/// fn borrow_mut(&mut self) -> &mut i32 {
+/// &mut self.0
+/// }
+/// }
+///
+/// // This method must be called with a `Type::` prefix.
+/// let foo = Foo::new();
+/// assert_eq!(foo.0, 0);
+///
+/// // Those two calls produces the same result.
+/// let foo = Foo::consume(foo);
+/// assert_eq!(foo.0, 1);
+/// let foo = foo.consume();
+/// assert_eq!(foo.0, 2);
+///
+/// // Borrowing is handled automatically with the second syntax.
+/// let borrow_1 = Foo::borrow(&foo);
+/// let borrow_2 = foo.borrow();
+/// assert_eq!(borrow_1, borrow_2);
+///
+/// // Borrowing mutably is handled automatically too with the second syntax.
+/// let mut foo = Foo::new();
+/// *Foo::borrow_mut(&mut foo) += 1;
+/// assert_eq!(foo.0, 1);
+/// *foo.borrow_mut() += 1;
+/// assert_eq!(foo.0, 2);
+/// ```
+///
+/// Note that this automatic conversion when calling `foo.method()` is not
+/// limited to the examples above. See the [Reference] for more information.
+///
+/// [`use`]: keyword.use.html
+/// [Reference]: ../reference/items/associated-items.html#methods
+mod self_keyword {}
+
+// FIXME: Once rustdoc can handle URL conflicts on case insensitive file systems, we can remove the
+// three next lines and put back: `#[doc(keyword = "Self")]`.
+#[doc(alias = "Self")]
+#[allow(rustc::existing_doc_keyword)]
+#[doc(keyword = "SelfTy")]
+//
+/// The implementing type within a [`trait`] or [`impl`] block, or the current type within a type
+/// definition.
+///
+/// Within a type definition:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// struct Node {
+/// elem: i32,
+/// // `Self` is a `Node` here.
+/// next: Option<Box<Self>>,
+/// }
+/// ```
+///
+/// In an [`impl`] block:
+///
+/// ```
+/// struct Foo(i32);
+///
+/// impl Foo {
+/// fn new() -> Self {
+/// Self(0)
+/// }
+/// }
+///
+/// assert_eq!(Foo::new().0, Foo(0).0);
+/// ```
+///
+/// Generic parameters are implicit with `Self`:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// struct Wrap<T> {
+/// elem: T,
+/// }
+///
+/// impl<T> Wrap<T> {
+/// fn new(elem: T) -> Self {
+/// Self { elem }
+/// }
+/// }
+/// ```
+///
+/// In a [`trait`] definition and related [`impl`] block:
+///
+/// ```
+/// trait Example {
+/// fn example() -> Self;
+/// }
+///
+/// struct Foo(i32);
+///
+/// impl Example for Foo {
+/// fn example() -> Self {
+/// Self(42)
+/// }
+/// }
+///
+/// assert_eq!(Foo::example().0, Foo(42).0);
+/// ```
+///
+/// [`impl`]: keyword.impl.html
+/// [`trait`]: keyword.trait.html
+mod self_upper_keyword {}
+
+#[doc(keyword = "static")]
+//
+/// A static item is a value which is valid for the entire duration of your
+/// program (a `'static` lifetime).
+///
+/// On the surface, `static` items seem very similar to [`const`]s: both contain
+/// a value, both require type annotations and both can only be initialized with
+/// constant functions and values. However, `static`s are notably different in
+/// that they represent a location in memory. That means that you can have
+/// references to `static` items and potentially even modify them, making them
+/// essentially global variables.
+///
+/// Static items do not call [`drop`] at the end of the program.
+///
+/// There are two types of `static` items: those declared in association with
+/// the [`mut`] keyword and those without.
+///
+/// Static items cannot be moved:
+///
+/// ```rust,compile_fail,E0507
+/// static VEC: Vec<u32> = vec![];
+///
+/// fn move_vec(v: Vec<u32>) -> Vec<u32> {
+/// v
+/// }
+///
+/// // This line causes an error
+/// move_vec(VEC);
+/// ```
+///
+/// # Simple `static`s
+///
+/// Accessing non-[`mut`] `static` items is considered safe, but some
+/// restrictions apply. Most notably, the type of a `static` value needs to
+/// implement the [`Sync`] trait, ruling out interior mutability containers
+/// like [`RefCell`]. See the [Reference] for more information.
+///
+/// ```rust
+/// static FOO: [i32; 5] = [1, 2, 3, 4, 5];
+///
+/// let r1 = &FOO as *const _;
+/// let r2 = &FOO as *const _;
+/// // With a strictly read-only static, references will have the same address
+/// assert_eq!(r1, r2);
+/// // A static item can be used just like a variable in many cases
+/// println!("{FOO:?}");
+/// ```
+///
+/// # Mutable `static`s
+///
+/// If a `static` item is declared with the [`mut`] keyword, then it is allowed
+/// to be modified by the program. However, accessing mutable `static`s can
+/// cause undefined behavior in a number of ways, for example due to data races
+/// in a multithreaded context. As such, all accesses to mutable `static`s
+/// require an [`unsafe`] block.
+///
+/// Despite their unsafety, mutable `static`s are necessary in many contexts:
+/// they can be used to represent global state shared by the whole program or in
+/// [`extern`] blocks to bind to variables from C libraries.
+///
+/// In an [`extern`] block:
+///
+/// ```rust,no_run
+/// # #![allow(dead_code)]
+/// extern "C" {
+/// static mut ERROR_MESSAGE: *mut std::os::raw::c_char;
+/// }
+/// ```
+///
+/// Mutable `static`s, just like simple `static`s, have some restrictions that
+/// apply to them. See the [Reference] for more information.
+///
+/// [`const`]: keyword.const.html
+/// [`extern`]: keyword.extern.html
+/// [`mut`]: keyword.mut.html
+/// [`unsafe`]: keyword.unsafe.html
+/// [`RefCell`]: cell::RefCell
+/// [Reference]: ../reference/items/static-items.html
+mod static_keyword {}
+
+#[doc(keyword = "struct")]
+//
+/// A type that is composed of other types.
+///
+/// Structs in Rust come in three flavors: Structs with named fields, tuple structs, and unit
+/// structs.
+///
+/// ```rust
+/// struct Regular {
+/// field1: f32,
+/// field2: String,
+/// pub field3: bool
+/// }
+///
+/// struct Tuple(u32, String);
+///
+/// struct Unit;
+/// ```
+///
+/// Regular structs are the most commonly used. Each field defined within them has a name and a
+/// type, and once defined can be accessed using `example_struct.field` syntax. The fields of a
+/// struct share its mutability, so `foo.bar = 2;` would only be valid if `foo` was mutable. Adding
+/// `pub` to a field makes it visible to code in other modules, as well as allowing it to be
+/// directly accessed and modified.
+///
+/// Tuple structs are similar to regular structs, but its fields have no names. They are used like
+/// tuples, with deconstruction possible via `let TupleStruct(x, y) = foo;` syntax. For accessing
+/// individual variables, the same syntax is used as with regular tuples, namely `foo.0`, `foo.1`,
+/// etc, starting at zero.
+///
+/// Unit structs are most commonly used as marker. They have a size of zero bytes, but unlike empty
+/// enums they can be instantiated, making them isomorphic to the unit type `()`. Unit structs are
+/// useful when you need to implement a trait on something, but don't need to store any data inside
+/// it.
+///
+/// # Instantiation
+///
+/// Structs can be instantiated in different ways, all of which can be mixed and
+/// matched as needed. The most common way to make a new struct is via a constructor method such as
+/// `new()`, but when that isn't available (or you're writing the constructor itself), struct
+/// literal syntax is used:
+///
+/// ```rust
+/// # struct Foo { field1: f32, field2: String, etc: bool }
+/// let example = Foo {
+/// field1: 42.0,
+/// field2: "blah".to_string(),
+/// etc: true,
+/// };
+/// ```
+///
+/// It's only possible to directly instantiate a struct using struct literal syntax when all of its
+/// fields are visible to you.
+///
+/// There are a handful of shortcuts provided to make writing constructors more convenient, most
+/// common of which is the Field Init shorthand. When there is a variable and a field of the same
+/// name, the assignment can be simplified from `field: field` into simply `field`. The following
+/// example of a hypothetical constructor demonstrates this:
+///
+/// ```rust
+/// struct User {
+/// name: String,
+/// admin: bool,
+/// }
+///
+/// impl User {
+/// pub fn new(name: String) -> Self {
+/// Self {
+/// name,
+/// admin: false,
+/// }
+/// }
+/// }
+/// ```
+///
+/// Another shortcut for struct instantiation is available, used when you need to make a new
+/// struct that has the same values as most of a previous struct of the same type, called struct
+/// update syntax:
+///
+/// ```rust
+/// # struct Foo { field1: String, field2: () }
+/// # let thing = Foo { field1: "".to_string(), field2: () };
+/// let updated_thing = Foo {
+/// field1: "a new value".to_string(),
+/// ..thing
+/// };
+/// ```
+///
+/// Tuple structs are instantiated in the same way as tuples themselves, except with the struct's
+/// name as a prefix: `Foo(123, false, 0.1)`.
+///
+/// Empty structs are instantiated with just their name, and don't need anything else. `let thing =
+/// EmptyStruct;`
+///
+/// # Style conventions
+///
+/// Structs are always written in CamelCase, with few exceptions. While the trailing comma on a
+/// struct's list of fields can be omitted, it's usually kept for convenience in adding and
+/// removing fields down the line.
+///
+/// For more information on structs, take a look at the [Rust Book][book] or the
+/// [Reference][reference].
+///
+/// [`PhantomData`]: marker::PhantomData
+/// [book]: ../book/ch05-01-defining-structs.html
+/// [reference]: ../reference/items/structs.html
+mod struct_keyword {}
+
+#[doc(keyword = "super")]
+//
+/// The parent of the current [module].
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// # fn main() {}
+/// mod a {
+/// pub fn foo() {}
+/// }
+/// mod b {
+/// pub fn foo() {
+/// super::a::foo(); // call a's foo function
+/// }
+/// }
+/// ```
+///
+/// It is also possible to use `super` multiple times: `super::super::foo`,
+/// going up the ancestor chain.
+///
+/// See the [Reference] for more information.
+///
+/// [module]: ../reference/items/modules.html
+/// [Reference]: ../reference/paths.html#super
+mod super_keyword {}
+
+#[doc(keyword = "trait")]
+//
+/// A common interface for a group of types.
+///
+/// A `trait` is like an interface that data types can implement. When a type
+/// implements a trait it can be treated abstractly as that trait using generics
+/// or trait objects.
+///
+/// Traits can be made up of three varieties of associated items:
+///
+/// - functions and methods
+/// - types
+/// - constants
+///
+/// Traits may also contain additional type parameters. Those type parameters
+/// or the trait itself can be constrained by other traits.
+///
+/// Traits can serve as markers or carry other logical semantics that
+/// aren't expressed through their items. When a type implements that
+/// trait it is promising to uphold its contract. [`Send`] and [`Sync`] are two
+/// such marker traits present in the standard library.
+///
+/// See the [Reference][Ref-Traits] for a lot more information on traits.
+///
+/// # Examples
+///
+/// Traits are declared using the `trait` keyword. Types can implement them
+/// using [`impl`] `Trait` [`for`] `Type`:
+///
+/// ```rust
+/// trait Zero {
+/// const ZERO: Self;
+/// fn is_zero(&self) -> bool;
+/// }
+///
+/// impl Zero for i32 {
+/// const ZERO: Self = 0;
+///
+/// fn is_zero(&self) -> bool {
+/// *self == Self::ZERO
+/// }
+/// }
+///
+/// assert_eq!(i32::ZERO, 0);
+/// assert!(i32::ZERO.is_zero());
+/// assert!(!4.is_zero());
+/// ```
+///
+/// With an associated type:
+///
+/// ```rust
+/// trait Builder {
+/// type Built;
+///
+/// fn build(&self) -> Self::Built;
+/// }
+/// ```
+///
+/// Traits can be generic, with constraints or without:
+///
+/// ```rust
+/// trait MaybeFrom<T> {
+/// fn maybe_from(value: T) -> Option<Self>
+/// where
+/// Self: Sized;
+/// }
+/// ```
+///
+/// Traits can build upon the requirements of other traits. In the example
+/// below `Iterator` is a **supertrait** and `ThreeIterator` is a **subtrait**:
+///
+/// ```rust
+/// trait ThreeIterator: std::iter::Iterator {
+/// fn next_three(&mut self) -> Option<[Self::Item; 3]>;
+/// }
+/// ```
+///
+/// Traits can be used in functions, as parameters:
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// fn debug_iter<I: Iterator>(it: I) where I::Item: std::fmt::Debug {
+/// for elem in it {
+/// println!("{elem:#?}");
+/// }
+/// }
+///
+/// // u8_len_1, u8_len_2 and u8_len_3 are equivalent
+///
+/// fn u8_len_1(val: impl Into<Vec<u8>>) -> usize {
+/// val.into().len()
+/// }
+///
+/// fn u8_len_2<T: Into<Vec<u8>>>(val: T) -> usize {
+/// val.into().len()
+/// }
+///
+/// fn u8_len_3<T>(val: T) -> usize
+/// where
+/// T: Into<Vec<u8>>,
+/// {
+/// val.into().len()
+/// }
+/// ```
+///
+/// Or as return types:
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// fn from_zero_to(v: u8) -> impl Iterator<Item = u8> {
+/// (0..v).into_iter()
+/// }
+/// ```
+///
+/// The use of the [`impl`] keyword in this position allows the function writer
+/// to hide the concrete type as an implementation detail which can change
+/// without breaking user's code.
+///
+/// # Trait objects
+///
+/// A *trait object* is an opaque value of another type that implements a set of
+/// traits. A trait object implements all specified traits as well as their
+/// supertraits (if any).
+///
+/// The syntax is the following: `dyn BaseTrait + AutoTrait1 + ... AutoTraitN`.
+/// Only one `BaseTrait` can be used so this will not compile:
+///
+/// ```rust,compile_fail,E0225
+/// trait A {}
+/// trait B {}
+///
+/// let _: Box<dyn A + B>;
+/// ```
+///
+/// Neither will this, which is a syntax error:
+///
+/// ```rust,compile_fail
+/// trait A {}
+/// trait B {}
+///
+/// let _: Box<dyn A + dyn B>;
+/// ```
+///
+/// On the other hand, this is correct:
+///
+/// ```rust
+/// trait A {}
+///
+/// let _: Box<dyn A + Send + Sync>;
+/// ```
+///
+/// The [Reference][Ref-Trait-Objects] has more information about trait objects,
+/// their limitations and the differences between editions.
+///
+/// # Unsafe traits
+///
+/// Some traits may be unsafe to implement. Using the [`unsafe`] keyword in
+/// front of the trait's declaration is used to mark this:
+///
+/// ```rust
+/// unsafe trait UnsafeTrait {}
+///
+/// unsafe impl UnsafeTrait for i32 {}
+/// ```
+///
+/// # Differences between the 2015 and 2018 editions
+///
+/// In the 2015 edition the parameters pattern was not needed for traits:
+///
+/// ```rust,edition2015
+/// # #![allow(anonymous_parameters)]
+/// trait Tr {
+/// fn f(i32);
+/// }
+/// ```
+///
+/// This behavior is no longer valid in edition 2018.
+///
+/// [`for`]: keyword.for.html
+/// [`impl`]: keyword.impl.html
+/// [`unsafe`]: keyword.unsafe.html
+/// [Ref-Traits]: ../reference/items/traits.html
+/// [Ref-Trait-Objects]: ../reference/types/trait-object.html
+mod trait_keyword {}
+
+#[doc(keyword = "true")]
+//
+/// A value of type [`bool`] representing logical **true**.
+///
+/// Logically `true` is not equal to [`false`].
+///
+/// ## Control structures that check for **true**
+///
+/// Several of Rust's control structures will check for a `bool` condition evaluating to **true**.
+///
+/// * The condition in an [`if`] expression must be of type `bool`.
+/// Whenever that condition evaluates to **true**, the `if` expression takes
+/// on the value of the first block. If however, the condition evaluates
+/// to `false`, the expression takes on value of the `else` block if there is one.
+///
+/// * [`while`] is another control flow construct expecting a `bool`-typed condition.
+/// As long as the condition evaluates to **true**, the `while` loop will continually
+/// evaluate its associated block.
+///
+/// * [`match`] arms can have guard clauses on them.
+///
+/// [`if`]: keyword.if.html
+/// [`while`]: keyword.while.html
+/// [`match`]: ../reference/expressions/match-expr.html#match-guards
+/// [`false`]: keyword.false.html
+mod true_keyword {}
+
+#[doc(keyword = "type")]
+//
+/// Define an alias for an existing type.
+///
+/// The syntax is `type Name = ExistingType;`.
+///
+/// # Examples
+///
+/// `type` does **not** create a new type:
+///
+/// ```rust
+/// type Meters = u32;
+/// type Kilograms = u32;
+///
+/// let m: Meters = 3;
+/// let k: Kilograms = 3;
+///
+/// assert_eq!(m, k);
+/// ```
+///
+/// In traits, `type` is used to declare an [associated type]:
+///
+/// ```rust
+/// trait Iterator {
+/// // associated type declaration
+/// type Item;
+/// fn next(&mut self) -> Option<Self::Item>;
+/// }
+///
+/// struct Once<T>(Option<T>);
+///
+/// impl<T> Iterator for Once<T> {
+/// // associated type definition
+/// type Item = T;
+/// fn next(&mut self) -> Option<Self::Item> {
+/// self.0.take()
+/// }
+/// }
+/// ```
+///
+/// [`trait`]: keyword.trait.html
+/// [associated type]: ../reference/items/associated-items.html#associated-types
+mod type_keyword {}
+
+#[doc(keyword = "unsafe")]
+//
+/// Code or interfaces whose [memory safety] cannot be verified by the type
+/// system.
+///
+/// The `unsafe` keyword has two uses: to declare the existence of contracts the
+/// compiler can't check (`unsafe fn` and `unsafe trait`), and to declare that a
+/// programmer has checked that these contracts have been upheld (`unsafe {}`
+/// and `unsafe impl`, but also `unsafe fn` -- see below). They are not mutually
+/// exclusive, as can be seen in `unsafe fn`.
+///
+/// # Unsafe abilities
+///
+/// **No matter what, Safe Rust can't cause Undefined Behavior**. This is
+/// referred to as [soundness]: a well-typed program actually has the desired
+/// properties. The [Nomicon][nomicon-soundness] has a more detailed explanation
+/// on the subject.
+///
+/// To ensure soundness, Safe Rust is restricted enough that it can be
+/// automatically checked. Sometimes, however, it is necessary to write code
+/// that is correct for reasons which are too clever for the compiler to
+/// understand. In those cases, you need to use Unsafe Rust.
+///
+/// Here are the abilities Unsafe Rust has in addition to Safe Rust:
+///
+/// - Dereference [raw pointers]
+/// - Implement `unsafe` [`trait`]s
+/// - Call `unsafe` functions
+/// - Mutate [`static`]s (including [`extern`]al ones)
+/// - Access fields of [`union`]s
+///
+/// However, this extra power comes with extra responsibilities: it is now up to
+/// you to ensure soundness. The `unsafe` keyword helps by clearly marking the
+/// pieces of code that need to worry about this.
+///
+/// ## The different meanings of `unsafe`
+///
+/// Not all uses of `unsafe` are equivalent: some are here to mark the existence
+/// of a contract the programmer must check, others are to say "I have checked
+/// the contract, go ahead and do this". The following
+/// [discussion on Rust Internals] has more in-depth explanations about this but
+/// here is a summary of the main points:
+///
+/// - `unsafe fn`: calling this function means abiding by a contract the
+/// compiler cannot enforce.
+/// - `unsafe trait`: implementing the [`trait`] means abiding by a
+/// contract the compiler cannot enforce.
+/// - `unsafe {}`: the contract necessary to call the operations inside the
+/// block has been checked by the programmer and is guaranteed to be respected.
+/// - `unsafe impl`: the contract necessary to implement the trait has been
+/// checked by the programmer and is guaranteed to be respected.
+///
+/// `unsafe fn` also acts like an `unsafe {}` block
+/// around the code inside the function. This means it is not just a signal to
+/// the caller, but also promises that the preconditions for the operations
+/// inside the function are upheld. Mixing these two meanings can be confusing
+/// and [proposal]s exist to use `unsafe {}` blocks inside such functions when
+/// making `unsafe` operations.
+///
+/// See the [Rustnomicon] and the [Reference] for more informations.
+///
+/// # Examples
+///
+/// ## Marking elements as `unsafe`
+///
+/// `unsafe` can be used on functions. Note that functions and statics declared
+/// in [`extern`] blocks are implicitly marked as `unsafe` (but not functions
+/// declared as `extern "something" fn ...`). Mutable statics are always unsafe,
+/// wherever they are declared. Methods can also be declared as `unsafe`:
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// static mut FOO: &str = "hello";
+///
+/// unsafe fn unsafe_fn() {}
+///
+/// extern "C" {
+/// fn unsafe_extern_fn();
+/// static BAR: *mut u32;
+/// }
+///
+/// trait SafeTraitWithUnsafeMethod {
+/// unsafe fn unsafe_method(&self);
+/// }
+///
+/// struct S;
+///
+/// impl S {
+/// unsafe fn unsafe_method_on_struct() {}
+/// }
+/// ```
+///
+/// Traits can also be declared as `unsafe`:
+///
+/// ```rust
+/// unsafe trait UnsafeTrait {}
+/// ```
+///
+/// Since `unsafe fn` and `unsafe trait` indicate that there is a safety
+/// contract that the compiler cannot enforce, documenting it is important. The
+/// standard library has many examples of this, like the following which is an
+/// extract from [`Vec::set_len`]. The `# Safety` section explains the contract
+/// that must be fulfilled to safely call the function.
+///
+/// ```rust,ignore (stub-to-show-doc-example)
+/// /// Forces the length of the vector to `new_len`.
+/// ///
+/// /// This is a low-level operation that maintains none of the normal
+/// /// invariants of the type. Normally changing the length of a vector
+/// /// is done using one of the safe operations instead, such as
+/// /// `truncate`, `resize`, `extend`, or `clear`.
+/// ///
+/// /// # Safety
+/// ///
+/// /// - `new_len` must be less than or equal to `capacity()`.
+/// /// - The elements at `old_len..new_len` must be initialized.
+/// pub unsafe fn set_len(&mut self, new_len: usize)
+/// ```
+///
+/// ## Using `unsafe {}` blocks and `impl`s
+///
+/// Performing `unsafe` operations requires an `unsafe {}` block:
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// /// Dereference the given pointer.
+/// ///
+/// /// # Safety
+/// ///
+/// /// `ptr` must be aligned and must not be dangling.
+/// unsafe fn deref_unchecked(ptr: *const i32) -> i32 {
+/// *ptr
+/// }
+///
+/// let a = 3;
+/// let b = &a as *const _;
+/// // SAFETY: `a` has not been dropped and references are always aligned,
+/// // so `b` is a valid address.
+/// unsafe { assert_eq!(*b, deref_unchecked(b)); };
+/// ```
+///
+/// Traits marked as `unsafe` must be [`impl`]emented using `unsafe impl`. This
+/// makes a guarantee to other `unsafe` code that the implementation satisfies
+/// the trait's safety contract. The [Send] and [Sync] traits are examples of
+/// this behaviour in the standard library.
+///
+/// ```rust
+/// /// Implementors of this trait must guarantee an element is always
+/// /// accessible with index 3.
+/// unsafe trait ThreeIndexable<T> {
+/// /// Returns a reference to the element with index 3 in `&self`.
+/// fn three(&self) -> &T;
+/// }
+///
+/// // The implementation of `ThreeIndexable` for `[T; 4]` is `unsafe`
+/// // because the implementor must abide by a contract the compiler cannot
+/// // check but as a programmer we know there will always be a valid element
+/// // at index 3 to access.
+/// unsafe impl<T> ThreeIndexable<T> for [T; 4] {
+/// fn three(&self) -> &T {
+/// // SAFETY: implementing the trait means there always is an element
+/// // with index 3 accessible.
+/// unsafe { self.get_unchecked(3) }
+/// }
+/// }
+///
+/// let a = [1, 2, 4, 8];
+/// assert_eq!(a.three(), &8);
+/// ```
+///
+/// [`extern`]: keyword.extern.html
+/// [`trait`]: keyword.trait.html
+/// [`static`]: keyword.static.html
+/// [`union`]: keyword.union.html
+/// [`impl`]: keyword.impl.html
+/// [raw pointers]: ../reference/types/pointer.html
+/// [memory safety]: ../book/ch19-01-unsafe-rust.html
+/// [Rustnomicon]: ../nomicon/index.html
+/// [nomicon-soundness]: ../nomicon/safe-unsafe-meaning.html
+/// [soundness]: https://rust-lang.github.io/unsafe-code-guidelines/glossary.html#soundness-of-code--of-a-library
+/// [Reference]: ../reference/unsafety.html
+/// [proposal]: https://github.com/rust-lang/rfcs/pull/2585
+/// [discussion on Rust Internals]: https://internals.rust-lang.org/t/what-does-unsafe-mean/6696
+mod unsafe_keyword {}
+
+#[doc(keyword = "use")]
+//
+/// Import or rename items from other crates or modules.
+///
+/// Usually a `use` keyword is used to shorten the path required to refer to a module item.
+/// The keyword may appear in modules, blocks and even functions, usually at the top.
+///
+/// The most basic usage of the keyword is `use path::to::item;`,
+/// though a number of convenient shortcuts are supported:
+///
+/// * Simultaneously binding a list of paths with a common prefix,
+/// using the glob-like brace syntax `use a::b::{c, d, e::f, g::h::i};`
+/// * Simultaneously binding a list of paths with a common prefix and their common parent module,
+/// using the [`self`] keyword, such as `use a::b::{self, c, d::e};`
+/// * Rebinding the target name as a new local name, using the syntax `use p::q::r as x;`.
+/// This can also be used with the last two features: `use a::b::{self as ab, c as abc}`.
+/// * Binding all paths matching a given prefix,
+/// using the asterisk wildcard syntax `use a::b::*;`.
+/// * Nesting groups of the previous features multiple times,
+/// such as `use a::b::{self as ab, c, d::{*, e::f}};`
+/// * Reexporting with visibility modifiers such as `pub use a::b;`
+/// * Importing with `_` to only import the methods of a trait without binding it to a name
+/// (to avoid conflict for example): `use ::std::io::Read as _;`.
+///
+/// Using path qualifiers like [`crate`], [`super`] or [`self`] is supported: `use crate::a::b;`.
+///
+/// Note that when the wildcard `*` is used on a type, it does not import its methods (though
+/// for `enum`s it imports the variants, as shown in the example below).
+///
+/// ```compile_fail,edition2018
+/// enum ExampleEnum {
+/// VariantA,
+/// VariantB,
+/// }
+///
+/// impl ExampleEnum {
+/// fn new() -> Self {
+/// Self::VariantA
+/// }
+/// }
+///
+/// use ExampleEnum::*;
+///
+/// // Compiles.
+/// let _ = VariantA;
+///
+/// // Does not compile !
+/// let n = new();
+/// ```
+///
+/// For more information on `use` and paths in general, see the [Reference].
+///
+/// The differences about paths and the `use` keyword between the 2015 and 2018 editions
+/// can also be found in the [Reference].
+///
+/// [`crate`]: keyword.crate.html
+/// [`self`]: keyword.self.html
+/// [`super`]: keyword.super.html
+/// [Reference]: ../reference/items/use-declarations.html
+mod use_keyword {}
+
+#[doc(keyword = "where")]
+//
+/// Add constraints that must be upheld to use an item.
+///
+/// `where` allows specifying constraints on lifetime and generic parameters.
+/// The [RFC] introducing `where` contains detailed informations about the
+/// keyword.
+///
+/// # Examples
+///
+/// `where` can be used for constraints with traits:
+///
+/// ```rust
+/// fn new<T: Default>() -> T {
+/// T::default()
+/// }
+///
+/// fn new_where<T>() -> T
+/// where
+/// T: Default,
+/// {
+/// T::default()
+/// }
+///
+/// assert_eq!(0.0, new());
+/// assert_eq!(0.0, new_where());
+///
+/// assert_eq!(0, new());
+/// assert_eq!(0, new_where());
+/// ```
+///
+/// `where` can also be used for lifetimes.
+///
+/// This compiles because `longer` outlives `shorter`, thus the constraint is
+/// respected:
+///
+/// ```rust
+/// fn select<'short, 'long>(s1: &'short str, s2: &'long str, second: bool) -> &'short str
+/// where
+/// 'long: 'short,
+/// {
+/// if second { s2 } else { s1 }
+/// }
+///
+/// let outer = String::from("Long living ref");
+/// let longer = &outer;
+/// {
+/// let inner = String::from("Short living ref");
+/// let shorter = &inner;
+///
+/// assert_eq!(select(shorter, longer, false), shorter);
+/// assert_eq!(select(shorter, longer, true), longer);
+/// }
+/// ```
+///
+/// On the other hand, this will not compile because the `where 'b: 'a` clause
+/// is missing: the `'b` lifetime is not known to live at least as long as `'a`
+/// which means this function cannot ensure it always returns a valid reference:
+///
+/// ```rust,compile_fail
+/// fn select<'a, 'b>(s1: &'a str, s2: &'b str, second: bool) -> &'a str
+/// {
+/// if second { s2 } else { s1 }
+/// }
+/// ```
+///
+/// `where` can also be used to express more complicated constraints that cannot
+/// be written with the `<T: Trait>` syntax:
+///
+/// ```rust
+/// fn first_or_default<I>(mut i: I) -> I::Item
+/// where
+/// I: Iterator,
+/// I::Item: Default,
+/// {
+/// i.next().unwrap_or_else(I::Item::default)
+/// }
+///
+/// assert_eq!(first_or_default([1, 2, 3].into_iter()), 1);
+/// assert_eq!(first_or_default(Vec::<i32>::new().into_iter()), 0);
+/// ```
+///
+/// `where` is available anywhere generic and lifetime parameters are available,
+/// as can be seen with the [`Cow`](crate::borrow::Cow) type from the standard
+/// library:
+///
+/// ```rust
+/// # #![allow(dead_code)]
+/// pub enum Cow<'a, B>
+/// where
+/// B: 'a + ToOwned + ?Sized,
+/// {
+/// Borrowed(&'a B),
+/// Owned(<B as ToOwned>::Owned),
+/// }
+/// ```
+///
+/// [RFC]: https://github.com/rust-lang/rfcs/blob/master/text/0135-where.md
+mod where_keyword {}
+
+// 2018 Edition keywords
+
+#[doc(alias = "promise")]
+#[doc(keyword = "async")]
+//
+/// Return a [`Future`] instead of blocking the current thread.
+///
+/// Use `async` in front of `fn`, `closure`, or a `block` to turn the marked code into a `Future`.
+/// As such the code will not be run immediately, but will only be evaluated when the returned
+/// future is [`.await`]ed.
+///
+/// We have written an [async book] detailing `async`/`await` and trade-offs compared to using threads.
+///
+/// ## Editions
+///
+/// `async` is a keyword from the 2018 edition onwards.
+///
+/// It is available for use in stable Rust from version 1.39 onwards.
+///
+/// [`Future`]: future::Future
+/// [`.await`]: ../std/keyword.await.html
+/// [async book]: https://rust-lang.github.io/async-book/
+mod async_keyword {}
+
+#[doc(keyword = "await")]
+//
+/// Suspend execution until the result of a [`Future`] is ready.
+///
+/// `.await`ing a future will suspend the current function's execution until the executor
+/// has run the future to completion.
+///
+/// Read the [async book] for details on how [`async`]/`await` and executors work.
+///
+/// ## Editions
+///
+/// `await` is a keyword from the 2018 edition onwards.
+///
+/// It is available for use in stable Rust from version 1.39 onwards.
+///
+/// [`Future`]: future::Future
+/// [async book]: https://rust-lang.github.io/async-book/
+/// [`async`]: ../std/keyword.async.html
+mod await_keyword {}
+
+#[doc(keyword = "dyn")]
+//
+/// `dyn` is a prefix of a [trait object]'s type.
+///
+/// The `dyn` keyword is used to highlight that calls to methods on the associated `Trait`
+/// are [dynamically dispatched]. To use the trait this way, it must be 'object safe'.
+///
+/// Unlike generic parameters or `impl Trait`, the compiler does not know the concrete type that
+/// is being passed. That is, the type has been [erased].
+/// As such, a `dyn Trait` reference contains _two_ pointers.
+/// One pointer goes to the data (e.g., an instance of a struct).
+/// Another pointer goes to a map of method call names to function pointers
+/// (known as a virtual method table or vtable).
+///
+/// At run-time, when a method needs to be called on the `dyn Trait`, the vtable is consulted to get
+/// the function pointer and then that function pointer is called.
+///
+/// See the Reference for more information on [trait objects][ref-trait-obj]
+/// and [object safety][ref-obj-safety].
+///
+/// ## Trade-offs
+///
+/// The above indirection is the additional runtime cost of calling a function on a `dyn Trait`.
+/// Methods called by dynamic dispatch generally cannot be inlined by the compiler.
+///
+/// However, `dyn Trait` is likely to produce smaller code than `impl Trait` / generic parameters as
+/// the method won't be duplicated for each concrete type.
+///
+/// [trait object]: ../book/ch17-02-trait-objects.html
+/// [dynamically dispatched]: https://en.wikipedia.org/wiki/Dynamic_dispatch
+/// [ref-trait-obj]: ../reference/types/trait-object.html
+/// [ref-obj-safety]: ../reference/items/traits.html#object-safety
+/// [erased]: https://en.wikipedia.org/wiki/Type_erasure
+mod dyn_keyword {}
+
+#[doc(keyword = "union")]
+//
+/// The [Rust equivalent of a C-style union][union].
+///
+/// A `union` looks like a [`struct`] in terms of declaration, but all of its
+/// fields exist in the same memory, superimposed over one another. For instance,
+/// if we wanted some bits in memory that we sometimes interpret as a `u32` and
+/// sometimes as an `f32`, we could write:
+///
+/// ```rust
+/// union IntOrFloat {
+/// i: u32,
+/// f: f32,
+/// }
+///
+/// let mut u = IntOrFloat { f: 1.0 };
+/// // Reading the fields of a union is always unsafe
+/// assert_eq!(unsafe { u.i }, 1065353216);
+/// // Updating through any of the field will modify all of them
+/// u.i = 1073741824;
+/// assert_eq!(unsafe { u.f }, 2.0);
+/// ```
+///
+/// # Matching on unions
+///
+/// It is possible to use pattern matching on `union`s. A single field name must
+/// be used and it must match the name of one of the `union`'s field.
+/// Like reading from a `union`, pattern matching on a `union` requires `unsafe`.
+///
+/// ```rust
+/// union IntOrFloat {
+/// i: u32,
+/// f: f32,
+/// }
+///
+/// let u = IntOrFloat { f: 1.0 };
+///
+/// unsafe {
+/// match u {
+/// IntOrFloat { i: 10 } => println!("Found exactly ten!"),
+/// // Matching the field `f` provides an `f32`.
+/// IntOrFloat { f } => println!("Found f = {f} !"),
+/// }
+/// }
+/// ```
+///
+/// # References to union fields
+///
+/// All fields in a `union` are all at the same place in memory which means
+/// borrowing one borrows the entire `union`, for the same lifetime:
+///
+/// ```rust,compile_fail,E0502
+/// union IntOrFloat {
+/// i: u32,
+/// f: f32,
+/// }
+///
+/// let mut u = IntOrFloat { f: 1.0 };
+///
+/// let f = unsafe { &u.f };
+/// // This will not compile because the field has already been borrowed, even
+/// // if only immutably
+/// let i = unsafe { &mut u.i };
+///
+/// *i = 10;
+/// println!("f = {f} and i = {i}");
+/// ```
+///
+/// See the [Reference][union] for more informations on `union`s.
+///
+/// [`struct`]: keyword.struct.html
+/// [union]: ../reference/items/unions.html
+mod union_keyword {}
diff --git a/library/std/src/lazy.rs b/library/std/src/lazy.rs
new file mode 100644
index 000000000..f8c06c3f9
--- /dev/null
+++ b/library/std/src/lazy.rs
@@ -0,0 +1 @@
+//! Lazy values and one-time initialization of static data.
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
new file mode 100644
index 000000000..20d25a608
--- /dev/null
+++ b/library/std/src/lib.rs
@@ -0,0 +1,633 @@
+//! # The Rust Standard Library
+//!
+//! The Rust Standard Library is the foundation of portable Rust software, a
+//! set of minimal and battle-tested shared abstractions for the [broader Rust
+//! ecosystem][crates.io]. It offers core types, like [`Vec<T>`] and
+//! [`Option<T>`], library-defined [operations on language
+//! primitives](#primitives), [standard macros](#macros), [I/O] and
+//! [multithreading], among [many other things][other].
+//!
+//! `std` is available to all Rust crates by default. Therefore, the
+//! standard library can be accessed in [`use`] statements through the path
+//! `std`, as in [`use std::env`].
+//!
+//! # How to read this documentation
+//!
+//! If you already know the name of what you are looking for, the fastest way to
+//! find it is to use the <a href="#" onclick="focusSearchBar();">search
+//! bar</a> at the top of the page.
+//!
+//! Otherwise, you may want to jump to one of these useful sections:
+//!
+//! * [`std::*` modules](#modules)
+//! * [Primitive types](#primitives)
+//! * [Standard macros](#macros)
+//! * [The Rust Prelude]
+//!
+//! If this is your first time, the documentation for the standard library is
+//! written to be casually perused. Clicking on interesting things should
+//! generally lead you to interesting places. Still, there are important bits
+//! you don't want to miss, so read on for a tour of the standard library and
+//! its documentation!
+//!
+//! Once you are familiar with the contents of the standard library you may
+//! begin to find the verbosity of the prose distracting. At this stage in your
+//! development you may want to press the `[-]` button near the top of the
+//! page to collapse it into a more skimmable view.
+//!
+//! While you are looking at that `[-]` button also notice the `source`
+//! link. Rust's API documentation comes with the source code and you are
+//! encouraged to read it. The standard library source is generally high
+//! quality and a peek behind the curtains is often enlightening.
+//!
+//! # What is in the standard library documentation?
+//!
+//! First of all, The Rust Standard Library is divided into a number of focused
+//! modules, [all listed further down this page](#modules). These modules are
+//! the bedrock upon which all of Rust is forged, and they have mighty names
+//! like [`std::slice`] and [`std::cmp`]. Modules' documentation typically
+//! includes an overview of the module along with examples, and are a smart
+//! place to start familiarizing yourself with the library.
+//!
+//! Second, implicit methods on [primitive types] are documented here. This can
+//! be a source of confusion for two reasons:
+//!
+//! 1. While primitives are implemented by the compiler, the standard library
+//! implements methods directly on the primitive types (and it is the only
+//! library that does so), which are [documented in the section on
+//! primitives](#primitives).
+//! 2. The standard library exports many modules *with the same name as
+//! primitive types*. These define additional items related to the primitive
+//! type, but not the all-important methods.
+//!
+//! So for example there is a [page for the primitive type
+//! `i32`](primitive::i32) that lists all the methods that can be called on
+//! 32-bit integers (very useful), and there is a [page for the module
+//! `std::i32`] that documents the constant values [`MIN`] and [`MAX`] (rarely
+//! useful).
+//!
+//! Note the documentation for the primitives [`str`] and [`[T]`][prim@slice] (also
+//! called 'slice'). Many method calls on [`String`] and [`Vec<T>`] are actually
+//! calls to methods on [`str`] and [`[T]`][prim@slice] respectively, via [deref
+//! coercions][deref-coercions].
+//!
+//! Third, the standard library defines [The Rust Prelude], a small collection
+//! of items - mostly traits - that are imported into every module of every
+//! crate. The traits in the prelude are pervasive, making the prelude
+//! documentation a good entry point to learning about the library.
+//!
+//! And finally, the standard library exports a number of standard macros, and
+//! [lists them on this page](#macros) (technically, not all of the standard
+//! macros are defined by the standard library - some are defined by the
+//! compiler - but they are documented here the same). Like the prelude, the
+//! standard macros are imported by default into all crates.
+//!
+//! # Contributing changes to the documentation
+//!
+//! Check out the rust contribution guidelines [here](
+//! https://rustc-dev-guide.rust-lang.org/contributing.html#writing-documentation).
+//! The source for this documentation can be found on
+//! [GitHub](https://github.com/rust-lang/rust).
+//! To contribute changes, make sure you read the guidelines first, then submit
+//! pull-requests for your suggested changes.
+//!
+//! Contributions are appreciated! If you see a part of the docs that can be
+//! improved, submit a PR, or chat with us first on [Discord][rust-discord]
+//! #docs.
+//!
+//! # A Tour of The Rust Standard Library
+//!
+//! The rest of this crate documentation is dedicated to pointing out notable
+//! features of The Rust Standard Library.
+//!
+//! ## Containers and collections
+//!
+//! The [`option`] and [`result`] modules define optional and error-handling
+//! types, [`Option<T>`] and [`Result<T, E>`]. The [`iter`] module defines
+//! Rust's iterator trait, [`Iterator`], which works with the [`for`] loop to
+//! access collections.
+//!
+//! The standard library exposes three common ways to deal with contiguous
+//! regions of memory:
+//!
+//! * [`Vec<T>`] - A heap-allocated *vector* that is resizable at runtime.
+//! * [`[T; N]`][prim@array] - An inline *array* with a fixed size at compile time.
+//! * [`[T]`][prim@slice] - A dynamically sized *slice* into any other kind of contiguous
+//! storage, whether heap-allocated or not.
+//!
+//! Slices can only be handled through some kind of *pointer*, and as such come
+//! in many flavors such as:
+//!
+//! * `&[T]` - *shared slice*
+//! * `&mut [T]` - *mutable slice*
+//! * [`Box<[T]>`][owned slice] - *owned slice*
+//!
+//! [`str`], a UTF-8 string slice, is a primitive type, and the standard library
+//! defines many methods for it. Rust [`str`]s are typically accessed as
+//! immutable references: `&str`. Use the owned [`String`] for building and
+//! mutating strings.
+//!
+//! For converting to strings use the [`format!`] macro, and for converting from
+//! strings use the [`FromStr`] trait.
+//!
+//! Data may be shared by placing it in a reference-counted box or the [`Rc`]
+//! type, and if further contained in a [`Cell`] or [`RefCell`], may be mutated
+//! as well as shared. Likewise, in a concurrent setting it is common to pair an
+//! atomically-reference-counted box, [`Arc`], with a [`Mutex`] to get the same
+//! effect.
+//!
+//! The [`collections`] module defines maps, sets, linked lists and other
+//! typical collection types, including the common [`HashMap<K, V>`].
+//!
+//! ## Platform abstractions and I/O
+//!
+//! Besides basic data types, the standard library is largely concerned with
+//! abstracting over differences in common platforms, most notably Windows and
+//! Unix derivatives.
+//!
+//! Common types of I/O, including [files], [TCP], [UDP], are defined in the
+//! [`io`], [`fs`], and [`net`] modules.
+//!
+//! The [`thread`] module contains Rust's threading abstractions. [`sync`]
+//! contains further primitive shared memory types, including [`atomic`] and
+//! [`mpsc`], which contains the channel types for message passing.
+//!
+//! [I/O]: io
+//! [`MIN`]: i32::MIN
+//! [`MAX`]: i32::MAX
+//! [page for the module `std::i32`]: crate::i32
+//! [TCP]: net::TcpStream
+//! [The Rust Prelude]: prelude
+//! [UDP]: net::UdpSocket
+//! [`Arc`]: sync::Arc
+//! [owned slice]: boxed
+//! [`Cell`]: cell::Cell
+//! [`FromStr`]: str::FromStr
+//! [`HashMap<K, V>`]: collections::HashMap
+//! [`Mutex`]: sync::Mutex
+//! [`Option<T>`]: option::Option
+//! [`Rc`]: rc::Rc
+//! [`RefCell`]: cell::RefCell
+//! [`Result<T, E>`]: result::Result
+//! [`Vec<T>`]: vec::Vec
+//! [`atomic`]: sync::atomic
+//! [`for`]: ../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+//! [`str`]: prim@str
+//! [`mpsc`]: sync::mpsc
+//! [`std::cmp`]: cmp
+//! [`std::slice`]: mod@slice
+//! [`use std::env`]: env/index.html
+//! [`use`]: ../book/ch07-02-defining-modules-to-control-scope-and-privacy.html
+//! [crates.io]: https://crates.io
+//! [deref-coercions]: ../book/ch15-02-deref.html#implicit-deref-coercions-with-functions-and-methods
+//! [files]: fs::File
+//! [multithreading]: thread
+//! [other]: #what-is-in-the-standard-library-documentation
+//! [primitive types]: ../book/ch03-02-data-types.html
+//! [rust-discord]: https://discord.gg/rust-lang
+//! [array]: prim@array
+//! [slice]: prim@slice
+#![cfg_attr(not(feature = "restricted-std"), stable(feature = "rust1", since = "1.0.0"))]
+#![cfg_attr(feature = "restricted-std", unstable(feature = "restricted_std", issue = "none"))]
+#![doc(
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(deny(warnings))),
+ test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
+)]
+#![doc(cfg_hide(
+ not(test),
+ not(any(test, bootstrap)),
+ no_global_oom_handling,
+ not(no_global_oom_handling)
+))]
+// Don't link to std. We are std.
+#![no_std]
+#![warn(deprecated_in_future)]
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![allow(explicit_outlives_requirements)]
+#![allow(unused_lifetimes)]
+// Tell the compiler to link to either panic_abort or panic_unwind
+#![needs_panic_runtime]
+// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
+#![cfg_attr(not(bootstrap), deny(ffi_unwind_calls))]
+// std may use features in a platform-specific way
+#![allow(unused_features)]
+#![cfg_attr(test, feature(internal_output_capture, print_internals, update_panic_count, rt))]
+#![cfg_attr(
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ feature(slice_index_methods, coerce_unsized, sgx_platform)
+)]
+#![deny(rustc::existing_doc_keyword)]
+//
+// Language features:
+#![feature(alloc_error_handler)]
+#![feature(allocator_internals)]
+#![feature(allow_internal_unsafe)]
+#![feature(allow_internal_unstable)]
+#![feature(box_syntax)]
+#![feature(c_unwind)]
+#![feature(cfg_target_thread_local)]
+#![feature(concat_idents)]
+#![feature(const_mut_refs)]
+#![feature(const_trait_impl)]
+#![feature(decl_macro)]
+#![feature(deprecated_suggestion)]
+#![feature(doc_cfg)]
+#![feature(doc_cfg_hide)]
+#![feature(doc_masked)]
+#![feature(doc_notable_trait)]
+#![feature(dropck_eyepatch)]
+#![feature(exhaustive_patterns)]
+#![feature(intra_doc_pointers)]
+#![feature(label_break_value)]
+#![feature(lang_items)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(linkage)]
+#![feature(min_specialization)]
+#![feature(must_not_suspend)]
+#![feature(needs_panic_runtime)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(platform_intrinsics)]
+#![feature(prelude_import)]
+#![feature(rustc_attrs)]
+#![feature(rustdoc_internals)]
+#![feature(staged_api)]
+#![feature(thread_local)]
+#![feature(try_blocks)]
+//
+// Library features (core):
+#![feature(array_error_internals)]
+#![feature(atomic_mut_ptr)]
+#![feature(char_error_internals)]
+#![feature(char_internals)]
+#![feature(core_intrinsics)]
+#![feature(cstr_from_bytes_until_nul)]
+#![feature(cstr_internals)]
+#![feature(duration_checked_float)]
+#![feature(duration_constants)]
+#![feature(exact_size_is_empty)]
+#![feature(exclusive_wrapper)]
+#![feature(extend_one)]
+#![feature(float_minimum_maximum)]
+#![feature(hasher_prefixfree_extras)]
+#![feature(hashmap_internals)]
+#![feature(int_error_internals)]
+#![feature(is_some_with)]
+#![feature(maybe_uninit_slice)]
+#![feature(maybe_uninit_write_slice)]
+#![feature(mixed_integer_ops)]
+#![feature(nonnull_slice_from_raw_parts)]
+#![feature(panic_can_unwind)]
+#![feature(panic_info_message)]
+#![feature(panic_internals)]
+#![feature(portable_simd)]
+#![feature(prelude_2024)]
+#![feature(provide_any)]
+#![feature(ptr_as_uninit)]
+#![feature(raw_os_nonzero)]
+#![feature(slice_internals)]
+#![feature(slice_ptr_get)]
+#![feature(std_internals)]
+#![feature(str_internals)]
+#![feature(strict_provenance)]
+//
+// Library features (alloc):
+#![feature(alloc_layout_extra)]
+#![feature(allocator_api)]
+#![feature(get_mut_unchecked)]
+#![feature(map_try_insert)]
+#![feature(new_uninit)]
+#![feature(thin_box)]
+#![feature(try_reserve_kind)]
+#![feature(vec_into_raw_parts)]
+#![feature(slice_concat_trait)]
+//
+// Library features (unwind):
+#![feature(panic_unwind)]
+//
+// Only for re-exporting:
+#![feature(assert_matches)]
+#![feature(async_iterator)]
+#![feature(c_variadic)]
+#![feature(cfg_accessible)]
+#![feature(cfg_eval)]
+#![feature(concat_bytes)]
+#![feature(const_format_args)]
+#![feature(core_panic)]
+#![feature(custom_test_frameworks)]
+#![feature(edition_panic)]
+#![feature(format_args_nl)]
+#![feature(log_syntax)]
+#![feature(once_cell)]
+#![feature(saturating_int_impl)]
+#![feature(stdsimd)]
+#![feature(test)]
+#![feature(trace_macros)]
+//
+// Only used in tests/benchmarks:
+#![feature(bench_black_box)]
+//
+// Only for const-ness:
+#![feature(const_io_structs)]
+#![feature(const_ip)]
+#![feature(const_ipv4)]
+#![feature(const_ipv6)]
+#![feature(const_socketaddr)]
+#![feature(thread_local_internals)]
+//
+#![default_lib_allocator]
+
+// Explicitly import the prelude. The compiler uses this same unstable attribute
+// to import the prelude implicitly when building crates that depend on std.
+#[prelude_import]
+#[allow(unused)]
+use prelude::rust_2021::*;
+
+// Access to Bencher, etc.
+#[cfg(test)]
+extern crate test;
+
+#[allow(unused_imports)] // macros from `alloc` are not used on all platforms
+#[macro_use]
+extern crate alloc as alloc_crate;
+#[doc(masked)]
+#[allow(unused_extern_crates)]
+extern crate libc;
+
+// We always need an unwinder currently for backtraces
+#[doc(masked)]
+#[allow(unused_extern_crates)]
+extern crate unwind;
+
+#[doc(masked)]
+#[allow(unused_extern_crates)]
+#[cfg(feature = "miniz_oxide")]
+extern crate miniz_oxide;
+
+// During testing, this crate is not actually the "real" std library, but rather
+// it links to the real std library, which was compiled from this same source
+// code. So any lang items std defines are conditionally excluded (or else they
+// would generate duplicate lang item errors), and any globals it defines are
+// _not_ the globals used by "real" std. So this import, defined only during
+// testing gives test-std access to real-std lang items and globals. See #2912
+#[cfg(test)]
+extern crate std as realstd;
+
+// The standard macros that are not built-in to the compiler.
+#[macro_use]
+mod macros;
+
+// The runtime entry point and a few unstable public functions used by the
+// compiler
+#[macro_use]
+pub mod rt;
+
+// The Rust prelude
+pub mod prelude;
+
+// Public module declarations and re-exports
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::borrow;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::boxed;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::fmt;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::format;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::rc;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::slice;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::str;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::string;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::vec;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::any;
+#[stable(feature = "core_array", since = "1.36.0")]
+pub use core::array;
+#[unstable(feature = "async_iterator", issue = "79024")]
+pub use core::async_iter;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::cell;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::char;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::clone;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::cmp;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::convert;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::default;
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use core::future;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::hash;
+#[stable(feature = "core_hint", since = "1.27.0")]
+pub use core::hint;
+#[stable(feature = "i128", since = "1.26.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::i128;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::i16;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::i32;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::i64;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::i8;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::intrinsics;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::isize;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::iter;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::marker;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::mem;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::ops;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::option;
+#[stable(feature = "pin", since = "1.33.0")]
+pub use core::pin;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::ptr;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::result;
+#[stable(feature = "i128", since = "1.26.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::u128;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::u16;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::u32;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::u64;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::u8;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::usize;
+
+pub mod f32;
+pub mod f64;
+
+#[macro_use]
+pub mod thread;
+pub mod ascii;
+pub mod backtrace;
+pub mod collections;
+pub mod env;
+pub mod error;
+pub mod ffi;
+pub mod fs;
+pub mod io;
+pub mod net;
+pub mod num;
+pub mod os;
+pub mod panic;
+pub mod path;
+pub mod process;
+pub mod sync;
+pub mod time;
+
+#[unstable(feature = "once_cell", issue = "74465")]
+pub mod lazy;
+
+// Pull in `std_float` crate into libstd. The contents of
+// `std_float` are in a different repository: rust-lang/portable-simd.
+#[path = "../../portable-simd/crates/std_float/src/lib.rs"]
+#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+#[allow(rustdoc::bare_urls)]
+#[unstable(feature = "portable_simd", issue = "86656")]
+mod std_float;
+
+#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+#[unstable(feature = "portable_simd", issue = "86656")]
+pub mod simd {
+ #[doc(inline)]
+ pub use crate::std_float::StdFloat;
+ #[doc(inline)]
+ pub use core::simd::*;
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub mod task {
+ //! Types and Traits for working with asynchronous tasks.
+
+ #[doc(inline)]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub use core::task::*;
+
+ #[doc(inline)]
+ #[stable(feature = "wake_trait", since = "1.51.0")]
+ pub use alloc::task::*;
+}
+
+#[doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
+#[stable(feature = "simd_arch", since = "1.27.0")]
+pub mod arch {
+ #[stable(feature = "simd_arch", since = "1.27.0")]
+ // The `no_inline`-attribute is required to make the documentation of all
+ // targets available.
+ // See https://github.com/rust-lang/rust/pull/57808#issuecomment-457390549 for
+ // more information.
+ #[doc(no_inline)] // Note (#82861): required for correct documentation
+ pub use core::arch::*;
+
+ #[stable(feature = "simd_aarch64", since = "1.60.0")]
+ pub use std_detect::is_aarch64_feature_detected;
+ #[stable(feature = "simd_x86", since = "1.27.0")]
+ pub use std_detect::is_x86_feature_detected;
+ #[unstable(feature = "stdsimd", issue = "48556")]
+ pub use std_detect::{
+ is_arm_feature_detected, is_mips64_feature_detected, is_mips_feature_detected,
+ is_powerpc64_feature_detected, is_powerpc_feature_detected, is_riscv_feature_detected,
+ };
+}
+
+// This was stabilized in the crate root so we have to keep it there.
+#[stable(feature = "simd_x86", since = "1.27.0")]
+pub use std_detect::is_x86_feature_detected;
+
+// Platform-abstraction modules
+mod sys;
+mod sys_common;
+
+pub mod alloc;
+
+// Private support modules
+mod panicking;
+
+#[path = "../../backtrace/src/lib.rs"]
+#[allow(dead_code, unused_attributes)]
+mod backtrace_rs;
+
+// Re-export macros defined in libcore.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated, deprecated_in_future)]
+pub use core::{
+ assert_eq, assert_ne, debug_assert, debug_assert_eq, debug_assert_ne, matches, todo, r#try,
+ unimplemented, unreachable, write, writeln,
+};
+
+// Re-export built-in macros defined through libcore.
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+pub use core::{
+ assert, assert_matches, cfg, column, compile_error, concat, concat_idents, const_format_args,
+ env, file, format_args, format_args_nl, include, include_bytes, include_str, line, log_syntax,
+ module_path, option_env, stringify, trace_macros,
+};
+
+#[unstable(
+ feature = "concat_bytes",
+ issue = "87555",
+ reason = "`concat_bytes` is not stable enough for use and is subject to change"
+)]
+pub use core::concat_bytes;
+
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use core::primitive;
+
+// Include a number of private modules that exist solely to provide
+// the rustdoc documentation for primitive types. Using `include!`
+// because rustdoc only looks for these modules at the crate level.
+include!("primitive_docs.rs");
+
+// Include a number of private modules that exist solely to provide
+// the rustdoc documentation for the existing keywords. Using `include!`
+// because rustdoc only looks for these modules at the crate level.
+include!("keyword_docs.rs");
+
+// This is required to avoid an unstable error when `restricted-std` is not
+// enabled. The use of #![feature(restricted_std)] in rustc-std-workspace-std
+// is unconditional, so the unstable feature needs to be defined somewhere.
+#[unstable(feature = "restricted_std", issue = "none")]
+mod __restricted_std_workaround {}
+
+mod sealed {
+ /// This trait being unreachable from outside the crate
+ /// prevents outside implementations of our extension traits.
+ /// This allows adding more trait methods in the future.
+ #[unstable(feature = "sealed", issue = "none")]
+ pub trait Sealed {}
+}
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
new file mode 100644
index 000000000..0cb21ef53
--- /dev/null
+++ b/library/std/src/macros.rs
@@ -0,0 +1,333 @@
+//! Standard library macros
+//!
+//! This module contains a set of macros which are exported from the standard
+//! library. Each macro is available for use when linking against the standard
+//! library.
+
+#[doc = include_str!("../../core/src/macros/panic.md")]
+#[macro_export]
+#[rustc_builtin_macro(std_panic)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow_internal_unstable(edition_panic)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "std_panic_macro")]
+macro_rules! panic {
+ // Expands to either `$crate::panic::panic_2015` or `$crate::panic::panic_2021`
+ // depending on the edition of the caller.
+ ($($arg:tt)*) => {
+ /* compiler built-in */
+ };
+}
+
+/// Prints to the standard output.
+///
+/// Equivalent to the [`println!`] macro except that a newline is not printed at
+/// the end of the message.
+///
+/// Note that stdout is frequently line-buffered by default so it may be
+/// necessary to use [`io::stdout().flush()`][flush] to ensure the output is emitted
+/// immediately.
+///
+/// Use `print!` only for the primary output of your program. Use
+/// [`eprint!`] instead to print error and progress messages.
+///
+/// [flush]: crate::io::Write::flush
+/// [`println!`]: crate::println
+/// [`eprint!`]: crate::eprint
+///
+/// # Panics
+///
+/// Panics if writing to `io::stdout()` fails.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::{self, Write};
+///
+/// print!("this ");
+/// print!("will ");
+/// print!("be ");
+/// print!("on ");
+/// print!("the ");
+/// print!("same ");
+/// print!("line ");
+///
+/// io::stdout().flush().unwrap();
+///
+/// print!("this string has a newline, why not choose println! instead?\n");
+///
+/// io::stdout().flush().unwrap();
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "print_macro")]
+#[allow_internal_unstable(print_internals)]
+macro_rules! print {
+ ($($arg:tt)*) => {{
+ $crate::io::_print($crate::format_args!($($arg)*));
+ }};
+}
+
+/// Prints to the standard output, with a newline.
+///
+/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
+/// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
+///
+/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
+/// See [`std::fmt`] for more information.
+///
+/// Use `println!` only for the primary output of your program. Use
+/// [`eprintln!`] instead to print error and progress messages.
+///
+/// [`std::fmt`]: crate::fmt
+/// [`eprintln!`]: crate::eprintln
+///
+/// # Panics
+///
+/// Panics if writing to [`io::stdout`] fails.
+///
+/// [`io::stdout`]: crate::io::stdout
+///
+/// # Examples
+///
+/// ```
+/// println!(); // prints just a newline
+/// println!("hello there!");
+/// println!("format {} arguments", "some");
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "println_macro")]
+#[allow_internal_unstable(print_internals, format_args_nl)]
+macro_rules! println {
+ () => {
+ $crate::print!("\n")
+ };
+ ($($arg:tt)*) => {{
+ $crate::io::_print($crate::format_args_nl!($($arg)*));
+ }};
+}
+
+/// Prints to the standard error.
+///
+/// Equivalent to the [`print!`] macro, except that output goes to
+/// [`io::stderr`] instead of [`io::stdout`]. See [`print!`] for
+/// example usage.
+///
+/// Use `eprint!` only for error and progress messages. Use `print!`
+/// instead for the primary output of your program.
+///
+/// [`io::stderr`]: crate::io::stderr
+/// [`io::stdout`]: crate::io::stdout
+///
+/// # Panics
+///
+/// Panics if writing to `io::stderr` fails.
+///
+/// # Examples
+///
+/// ```
+/// eprint!("Error: Could not complete task");
+/// ```
+#[macro_export]
+#[stable(feature = "eprint", since = "1.19.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "eprint_macro")]
+#[allow_internal_unstable(print_internals)]
+macro_rules! eprint {
+ ($($arg:tt)*) => {{
+ $crate::io::_eprint($crate::format_args!($($arg)*));
+ }};
+}
+
+/// Prints to the standard error, with a newline.
+///
+/// Equivalent to the [`println!`] macro, except that output goes to
+/// [`io::stderr`] instead of [`io::stdout`]. See [`println!`] for
+/// example usage.
+///
+/// Use `eprintln!` only for error and progress messages. Use `println!`
+/// instead for the primary output of your program.
+///
+/// [`io::stderr`]: crate::io::stderr
+/// [`io::stdout`]: crate::io::stdout
+/// [`println!`]: crate::println
+///
+/// # Panics
+///
+/// Panics if writing to `io::stderr` fails.
+///
+/// # Examples
+///
+/// ```
+/// eprintln!("Error: Could not complete task");
+/// ```
+#[macro_export]
+#[stable(feature = "eprint", since = "1.19.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "eprintln_macro")]
+#[allow_internal_unstable(print_internals, format_args_nl)]
+macro_rules! eprintln {
+ () => {
+ $crate::eprint!("\n")
+ };
+ ($($arg:tt)*) => {{
+ $crate::io::_eprint($crate::format_args_nl!($($arg)*));
+ }};
+}
+
+/// Prints and returns the value of a given expression for quick and dirty
+/// debugging.
+///
+/// An example:
+///
+/// ```rust
+/// let a = 2;
+/// let b = dbg!(a * 2) + 1;
+/// // ^-- prints: [src/main.rs:2] a * 2 = 4
+/// assert_eq!(b, 5);
+/// ```
+///
+/// The macro works by using the `Debug` implementation of the type of
+/// the given expression to print the value to [stderr] along with the
+/// source location of the macro invocation as well as the source code
+/// of the expression.
+///
+/// Invoking the macro on an expression moves and takes ownership of it
+/// before returning the evaluated expression unchanged. If the type
+/// of the expression does not implement `Copy` and you don't want
+/// to give up ownership, you can instead borrow with `dbg!(&expr)`
+/// for some expression `expr`.
+///
+/// The `dbg!` macro works exactly the same in release builds.
+/// This is useful when debugging issues that only occur in release
+/// builds or when debugging in release mode is significantly faster.
+///
+/// Note that the macro is intended as a debugging tool and therefore you
+/// should avoid having uses of it in version control for long periods
+/// (other than in tests and similar).
+/// Debug output from production code is better done with other facilities
+/// such as the [`debug!`] macro from the [`log`] crate.
+///
+/// # Stability
+///
+/// The exact output printed by this macro should not be relied upon
+/// and is subject to future changes.
+///
+/// # Panics
+///
+/// Panics if writing to `io::stderr` fails.
+///
+/// # Further examples
+///
+/// With a method call:
+///
+/// ```rust
+/// fn foo(n: usize) {
+/// if let Some(_) = dbg!(n.checked_sub(4)) {
+/// // ...
+/// }
+/// }
+///
+/// foo(3)
+/// ```
+///
+/// This prints to [stderr]:
+///
+/// ```text,ignore
+/// [src/main.rs:4] n.checked_sub(4) = None
+/// ```
+///
+/// Naive factorial implementation:
+///
+/// ```rust
+/// fn factorial(n: u32) -> u32 {
+/// if dbg!(n <= 1) {
+/// dbg!(1)
+/// } else {
+/// dbg!(n * factorial(n - 1))
+/// }
+/// }
+///
+/// dbg!(factorial(4));
+/// ```
+///
+/// This prints to [stderr]:
+///
+/// ```text,ignore
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = true
+/// [src/main.rs:4] 1 = 1
+/// [src/main.rs:5] n * factorial(n - 1) = 2
+/// [src/main.rs:5] n * factorial(n - 1) = 6
+/// [src/main.rs:5] n * factorial(n - 1) = 24
+/// [src/main.rs:11] factorial(4) = 24
+/// ```
+///
+/// The `dbg!(..)` macro moves the input:
+///
+/// ```compile_fail
+/// /// A wrapper around `usize` which importantly is not Copyable.
+/// #[derive(Debug)]
+/// struct NoCopy(usize);
+///
+/// let a = NoCopy(42);
+/// let _ = dbg!(a); // <-- `a` is moved here.
+/// let _ = dbg!(a); // <-- `a` is moved again; error!
+/// ```
+///
+/// You can also use `dbg!()` without a value to just print the
+/// file and line whenever it's reached.
+///
+/// Finally, if you want to `dbg!(..)` multiple values, it will treat them as
+/// a tuple (and return it, too):
+///
+/// ```
+/// assert_eq!(dbg!(1usize, 2u32), (1, 2));
+/// ```
+///
+/// However, a single argument with a trailing comma will still not be treated
+/// as a tuple, following the convention of ignoring trailing commas in macro
+/// invocations. You can use a 1-tuple directly if you need one:
+///
+/// ```
+/// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored
+/// assert_eq!((1,), dbg!((1u32,))); // 1-tuple
+/// ```
+///
+/// [stderr]: https://en.wikipedia.org/wiki/Standard_streams#Standard_error_(stderr)
+/// [`debug!`]: https://docs.rs/log/*/log/macro.debug.html
+/// [`log`]: https://crates.io/crates/log
+#[macro_export]
+#[cfg_attr(not(test), rustc_diagnostic_item = "dbg_macro")]
+#[stable(feature = "dbg_macro", since = "1.32.0")]
+macro_rules! dbg {
+ // NOTE: We cannot use `concat!` to make a static string as a format argument
+ // of `eprintln!` because `file!` could contain a `{` or
+ // `$val` expression could be a block (`{ .. }`), in which case the `eprintln!`
+ // will be malformed.
+ () => {
+ $crate::eprintln!("[{}:{}]", $crate::file!(), $crate::line!())
+ };
+ ($val:expr $(,)?) => {
+ // Use of `match` here is intentional because it affects the lifetimes
+ // of temporaries - https://stackoverflow.com/a/48732525/1063961
+ match $val {
+ tmp => {
+ $crate::eprintln!("[{}:{}] {} = {:#?}",
+ $crate::file!(), $crate::line!(), $crate::stringify!($val), &tmp);
+ tmp
+ }
+ }
+ };
+ ($($val:expr),+ $(,)?) => {
+ ($($crate::dbg!($val)),+,)
+ };
+}
+
+#[cfg(test)]
+macro_rules! assert_approx_eq {
+ ($a:expr, $b:expr) => {{
+ let (a, b) = (&$a, &$b);
+ assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b);
+ }};
+}
diff --git a/library/std/src/net/addr.rs b/library/std/src/net/addr.rs
new file mode 100644
index 000000000..53fee952a
--- /dev/null
+++ b/library/std/src/net/addr.rs
@@ -0,0 +1,988 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::hash;
+use crate::io::{self, Write};
+use crate::iter;
+use crate::mem;
+use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+use crate::option;
+use crate::slice;
+use crate::sys::net::netc as c;
+use crate::sys_common::net::LookupHost;
+use crate::sys_common::{FromInner, IntoInner};
+use crate::vec;
+
+/// An internet socket address, either IPv4 or IPv6.
+///
+/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well
+/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and
+/// [`SocketAddrV6`]'s respective documentation for more details.
+///
+/// The size of a `SocketAddr` instance may vary depending on the target operating
+/// system.
+///
+/// [IP address]: IpAddr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+///
+/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+///
+/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
+/// assert_eq!(socket.port(), 8080);
+/// assert_eq!(socket.is_ipv4(), true);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum SocketAddr {
+ /// An IPv4 socket address.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4),
+ /// An IPv6 socket address.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6),
+}
+
+/// An IPv4 socket address.
+///
+/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as
+/// stated in [IETF RFC 793].
+///
+/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
+///
+/// The size of a `SocketAddrV4` struct may vary depending on the target operating
+/// system. Do not assume that this type has the same memory layout as the underlying
+/// system representation.
+///
+/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793
+/// [`IPv4` address]: Ipv4Addr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{Ipv4Addr, SocketAddrV4};
+///
+/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+///
+/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket));
+/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
+/// assert_eq!(socket.port(), 8080);
+/// ```
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SocketAddrV4 {
+ ip: Ipv4Addr,
+ port: u16,
+}
+
+/// An IPv6 socket address.
+///
+/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well
+/// as fields containing the traffic class, the flow label, and a scope identifier
+/// (see [IETF RFC 2553, Section 3.3] for more details).
+///
+/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses.
+///
+/// The size of a `SocketAddrV6` struct may vary depending on the target operating
+/// system. Do not assume that this type has the same memory layout as the underlying
+/// system representation.
+///
+/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+/// [`IPv6` address]: Ipv6Addr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{Ipv6Addr, SocketAddrV6};
+///
+/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+///
+/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket));
+/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
+/// assert_eq!(socket.port(), 8080);
+/// ```
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SocketAddrV6 {
+ ip: Ipv6Addr,
+ port: u16,
+ flowinfo: u32,
+ scope_id: u32,
+}
+
+impl SocketAddr {
+ /// Creates a new socket address from an [IP address] and a port number.
+ ///
+ /// [IP address]: IpAddr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: IpAddr, port: u16) -> SocketAddr {
+ match ip {
+ IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)),
+ IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)),
+ }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
+ /// ```
+ #[must_use]
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> IpAddr {
+ match *self {
+ SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()),
+ SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()),
+ }
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
+ /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1)));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: IpAddr) {
+ // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away.
+ match (self, new_ip) {
+ (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip),
+ (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip),
+ (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()),
+ }
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ match *self {
+ SocketAddr::V4(ref a) => a.port(),
+ SocketAddr::V6(ref a) => a.port(),
+ }
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// socket.set_port(1025);
+ /// assert_eq!(socket.port(), 1025);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ match *self {
+ SocketAddr::V4(ref mut a) => a.set_port(new_port),
+ SocketAddr::V6(ref mut a) => a.set_port(new_port),
+ }
+ }
+
+ /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
+ /// [`IPv4` address], and [`false`] otherwise.
+ ///
+ /// [IP address]: IpAddr
+ /// [`IPv4` address]: IpAddr::V4
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ /// assert_eq!(socket.is_ipv4(), true);
+ /// assert_eq!(socket.is_ipv6(), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "sockaddr_checker", since = "1.16.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn is_ipv4(&self) -> bool {
+ matches!(*self, SocketAddr::V4(_))
+ }
+
+ /// Returns [`true`] if the [IP address] in this `SocketAddr` is an
+ /// [`IPv6` address], and [`false`] otherwise.
+ ///
+ /// [IP address]: IpAddr
+ /// [`IPv6` address]: IpAddr::V6
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr, SocketAddr};
+ ///
+ /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080);
+ /// assert_eq!(socket.is_ipv4(), false);
+ /// assert_eq!(socket.is_ipv6(), true);
+ /// ```
+ #[must_use]
+ #[stable(feature = "sockaddr_checker", since = "1.16.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn is_ipv6(&self) -> bool {
+ matches!(*self, SocketAddr::V6(_))
+ }
+}
+
+impl SocketAddrV4 {
+ /// Creates a new socket address from an [`IPv4` address] and a port number.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 {
+ SocketAddrV4 { ip, port }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1));
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> &Ipv4Addr {
+ &self.ip
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1));
+ /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: Ipv4Addr) {
+ self.ip = new_ip;
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ self.port
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV4, Ipv4Addr};
+ ///
+ /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080);
+ /// socket.set_port(4242);
+ /// assert_eq!(socket.port(), 4242);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ self.port = new_port;
+ }
+}
+
+impl SocketAddrV6 {
+ /// Creates a new socket address from an [`IPv6` address], a 16-bit port number,
+ /// and the `flowinfo` and `scope_id` fields.
+ ///
+ /// For more information on the meaning and layout of the `flowinfo` and `scope_id`
+ /// parameters, see [IETF RFC 2553, Section 3.3].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 {
+ SocketAddrV6 { ip, port, flowinfo, scope_id }
+ }
+
+ /// Returns the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn ip(&self) -> &Ipv6Addr {
+ &self.ip
+ }
+
+ /// Changes the IP address associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
+ /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0));
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_ip(&mut self, new_ip: Ipv6Addr) {
+ self.ip = new_ip;
+ }
+
+ /// Returns the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// assert_eq!(socket.port(), 8080);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn port(&self) -> u16 {
+ self.port
+ }
+
+ /// Changes the port number associated with this socket address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0);
+ /// socket.set_port(4242);
+ /// assert_eq!(socket.port(), 4242);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_port(&mut self, new_port: u16) {
+ self.port = new_port;
+ }
+
+ /// Returns the flow information associated with this address.
+ ///
+ /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`,
+ /// as specified in [IETF RFC 2553, Section 3.3].
+ /// It combines information about the flow label and the traffic class as specified
+ /// in [IETF RFC 2460], respectively [Section 6] and [Section 7].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460
+ /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6
+ /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
+ /// assert_eq!(socket.flowinfo(), 10);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn flowinfo(&self) -> u32 {
+ self.flowinfo
+ }
+
+ /// Changes the flow information associated with this socket address.
+ ///
+ /// See [`SocketAddrV6::flowinfo`]'s documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0);
+ /// socket.set_flowinfo(56);
+ /// assert_eq!(socket.flowinfo(), 56);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_flowinfo(&mut self, new_flowinfo: u32) {
+ self.flowinfo = new_flowinfo;
+ }
+
+ /// Returns the scope ID associated with this address.
+ ///
+ /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`,
+ /// as specified in [IETF RFC 2553, Section 3.3].
+ ///
+ /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
+ /// assert_eq!(socket.scope_id(), 78);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")]
+ pub const fn scope_id(&self) -> u32 {
+ self.scope_id
+ }
+
+ /// Changes the scope ID associated with this socket address.
+ ///
+ /// See [`SocketAddrV6::scope_id`]'s documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{SocketAddrV6, Ipv6Addr};
+ ///
+ /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78);
+ /// socket.set_scope_id(42);
+ /// assert_eq!(socket.scope_id(), 42);
+ /// ```
+ #[stable(feature = "sockaddr_setters", since = "1.9.0")]
+ pub fn set_scope_id(&mut self, new_scope_id: u32) {
+ self.scope_id = new_scope_id;
+ }
+}
+
+impl FromInner<c::sockaddr_in> for SocketAddrV4 {
+ fn from_inner(addr: c::sockaddr_in) -> SocketAddrV4 {
+ SocketAddrV4 { ip: Ipv4Addr::from_inner(addr.sin_addr), port: u16::from_be(addr.sin_port) }
+ }
+}
+
+impl FromInner<c::sockaddr_in6> for SocketAddrV6 {
+ fn from_inner(addr: c::sockaddr_in6) -> SocketAddrV6 {
+ SocketAddrV6 {
+ ip: Ipv6Addr::from_inner(addr.sin6_addr),
+ port: u16::from_be(addr.sin6_port),
+ flowinfo: addr.sin6_flowinfo,
+ scope_id: addr.sin6_scope_id,
+ }
+ }
+}
+
+impl IntoInner<c::sockaddr_in> for SocketAddrV4 {
+ fn into_inner(self) -> c::sockaddr_in {
+ c::sockaddr_in {
+ sin_family: c::AF_INET as c::sa_family_t,
+ sin_port: self.port.to_be(),
+ sin_addr: self.ip.into_inner(),
+ ..unsafe { mem::zeroed() }
+ }
+ }
+}
+
+impl IntoInner<c::sockaddr_in6> for SocketAddrV6 {
+ fn into_inner(self) -> c::sockaddr_in6 {
+ c::sockaddr_in6 {
+ sin6_family: c::AF_INET6 as c::sa_family_t,
+ sin6_port: self.port.to_be(),
+ sin6_addr: self.ip.into_inner(),
+ sin6_flowinfo: self.flowinfo,
+ sin6_scope_id: self.scope_id,
+ ..unsafe { mem::zeroed() }
+ }
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<SocketAddrV4> for SocketAddr {
+ /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`].
+ fn from(sock4: SocketAddrV4) -> SocketAddr {
+ SocketAddr::V4(sock4)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<SocketAddrV6> for SocketAddr {
+ /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`].
+ fn from(sock6: SocketAddrV6) -> SocketAddr {
+ SocketAddr::V6(sock6)
+ }
+}
+
+#[stable(feature = "addr_from_into_ip", since = "1.17.0")]
+impl<I: Into<IpAddr>> From<(I, u16)> for SocketAddr {
+ /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`].
+ ///
+ /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`]
+ /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`].
+ ///
+ /// `u16` is treated as port of the newly created [`SocketAddr`].
+ fn from(pieces: (I, u16)) -> SocketAddr {
+ SocketAddr::new(pieces.0.into(), pieces.1)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ SocketAddr::V4(ref a) => a.fmt(f),
+ SocketAddr::V6(ref a) => a.fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddrV4 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Fast path: if there's no alignment stuff, write to the output buffer
+ // directly
+ if f.precision().is_none() && f.width().is_none() {
+ write!(f, "{}:{}", self.ip(), self.port())
+ } else {
+ const IPV4_SOCKET_BUF_LEN: usize = (3 * 4) // the segments
+ + 3 // the separators
+ + 1 + 5; // the port
+ let mut buf = [0; IPV4_SOCKET_BUF_LEN];
+ let mut buf_slice = &mut buf[..];
+
+ // Unwrap is fine because writing to a sufficiently-sized
+ // buffer is infallible
+ write!(buf_slice, "{}:{}", self.ip(), self.port()).unwrap();
+ let len = IPV4_SOCKET_BUF_LEN - buf_slice.len();
+
+ // This unsafe is OK because we know what is being written to the buffer
+ let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
+ f.pad(buf)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddrV4 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for SocketAddrV6 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Fast path: if there's no alignment stuff, write to the output
+ // buffer directly
+ if f.precision().is_none() && f.width().is_none() {
+ match self.scope_id() {
+ 0 => write!(f, "[{}]:{}", self.ip(), self.port()),
+ scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
+ }
+ } else {
+ const IPV6_SOCKET_BUF_LEN: usize = (4 * 8) // The address
+ + 7 // The colon separators
+ + 2 // The brackets
+ + 1 + 10 // The scope id
+ + 1 + 5; // The port
+
+ let mut buf = [0; IPV6_SOCKET_BUF_LEN];
+ let mut buf_slice = &mut buf[..];
+
+ match self.scope_id() {
+ 0 => write!(buf_slice, "[{}]:{}", self.ip(), self.port()),
+ scope_id => write!(buf_slice, "[{}%{}]:{}", self.ip(), scope_id, self.port()),
+ }
+ // Unwrap is fine because writing to a sufficiently-sized
+ // buffer is infallible
+ .unwrap();
+ let len = IPV6_SOCKET_BUF_LEN - buf_slice.len();
+
+ // This unsafe is OK because we know what is being written to the buffer
+ let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
+ f.pad(buf)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for SocketAddrV6 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl PartialOrd for SocketAddrV4 {
+ fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl PartialOrd for SocketAddrV6 {
+ fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl Ord for SocketAddrV4 {
+ fn cmp(&self, other: &SocketAddrV4) -> Ordering {
+ self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
+ }
+}
+
+#[stable(feature = "socketaddr_ordering", since = "1.45.0")]
+impl Ord for SocketAddrV6 {
+ fn cmp(&self, other: &SocketAddrV6) -> Ordering {
+ self.ip().cmp(other.ip()).then(self.port().cmp(&other.port()))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for SocketAddrV4 {
+ fn hash<H: hash::Hasher>(&self, s: &mut H) {
+ (self.port, self.ip).hash(s)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for SocketAddrV6 {
+ fn hash<H: hash::Hasher>(&self, s: &mut H) {
+ (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s)
+ }
+}
+
+/// A trait for objects which can be converted or resolved to one or more
+/// [`SocketAddr`] values.
+///
+/// This trait is used for generic address resolution when constructing network
+/// objects. By default it is implemented for the following types:
+///
+/// * [`SocketAddr`]: [`to_socket_addrs`] is the identity function.
+///
+/// * [`SocketAddrV4`], [`SocketAddrV6`], <code>([IpAddr], [u16])</code>,
+/// <code>([Ipv4Addr], [u16])</code>, <code>([Ipv6Addr], [u16])</code>:
+/// [`to_socket_addrs`] constructs a [`SocketAddr`] trivially.
+///
+/// * <code>(&[str], [u16])</code>: <code>&[str]</code> should be either a string representation
+/// of an [`IpAddr`] address as expected by [`FromStr`] implementation or a host
+/// name. [`u16`] is the port number.
+///
+/// * <code>&[str]</code>: the string should be either a string representation of a
+/// [`SocketAddr`] as expected by its [`FromStr`] implementation or a string like
+/// `<host_name>:<port>` pair where `<port>` is a [`u16`] value.
+///
+/// This trait allows constructing network objects like [`TcpStream`] or
+/// [`UdpSocket`] easily with values of various types for the bind/connection
+/// address. It is needed because sometimes one type is more appropriate than
+/// the other: for simple uses a string like `"localhost:12345"` is much nicer
+/// than manual construction of the corresponding [`SocketAddr`], but sometimes
+/// [`SocketAddr`] value is *the* main source of the address, and converting it to
+/// some other type (e.g., a string) just for it to be converted back to
+/// [`SocketAddr`] in constructor methods is pointless.
+///
+/// Addresses returned by the operating system that are not IP addresses are
+/// silently ignored.
+///
+/// [`FromStr`]: crate::str::FromStr "std::str::FromStr"
+/// [`TcpStream`]: crate::net::TcpStream "net::TcpStream"
+/// [`to_socket_addrs`]: ToSocketAddrs::to_socket_addrs
+/// [`UdpSocket`]: crate::net::UdpSocket "net::UdpSocket"
+///
+/// # Examples
+///
+/// Creating a [`SocketAddr`] iterator that yields one item:
+///
+/// ```
+/// use std::net::{ToSocketAddrs, SocketAddr};
+///
+/// let addr = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let mut addrs_iter = addr.to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Creating a [`SocketAddr`] iterator from a hostname:
+///
+/// ```no_run
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// // assuming 'localhost' resolves to 127.0.0.1
+/// let mut addrs_iter = "localhost:443".to_socket_addrs().unwrap();
+/// assert_eq!(addrs_iter.next(), Some(SocketAddr::from(([127, 0, 0, 1], 443))));
+/// assert!(addrs_iter.next().is_none());
+///
+/// // assuming 'foo' does not resolve
+/// assert!("foo:443".to_socket_addrs().is_err());
+/// ```
+///
+/// Creating a [`SocketAddr`] iterator that yields multiple items:
+///
+/// ```
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// let addr1 = SocketAddr::from(([0, 0, 0, 0], 80));
+/// let addr2 = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let addrs = vec![addr1, addr2];
+///
+/// let mut addrs_iter = (&addrs[..]).to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr1), addrs_iter.next());
+/// assert_eq!(Some(addr2), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Attempting to create a [`SocketAddr`] iterator from an improperly formatted
+/// socket address `&str` (missing the port):
+///
+/// ```
+/// use std::io;
+/// use std::net::ToSocketAddrs;
+///
+/// let err = "127.0.0.1".to_socket_addrs().unwrap_err();
+/// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+/// ```
+///
+/// [`TcpStream::connect`] is an example of an function that utilizes
+/// `ToSocketAddrs` as a trait bound on its parameter in order to accept
+/// different types:
+///
+/// ```no_run
+/// use std::net::{TcpStream, Ipv4Addr};
+///
+/// let stream = TcpStream::connect(("127.0.0.1", 443));
+/// // or
+/// let stream = TcpStream::connect("127.0.0.1:443");
+/// // or
+/// let stream = TcpStream::connect((Ipv4Addr::new(127, 0, 0, 1), 443));
+/// ```
+///
+/// [`TcpStream::connect`]: crate::net::TcpStream::connect
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToSocketAddrs {
+ /// Returned iterator over socket addresses which this type may correspond
+ /// to.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Iter: Iterator<Item = SocketAddr>;
+
+ /// Converts this object to an iterator of resolved [`SocketAddr`]s.
+ ///
+ /// The returned iterator might not actually yield any values depending on the
+ /// outcome of any resolution performed.
+ ///
+ /// Note that this function may block the current thread while resolution is
+ /// performed.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn to_socket_addrs(&self) -> io::Result<Self::Iter>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddr {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ Ok(Some(*self).into_iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddrV4 {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ SocketAddr::V4(*self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for SocketAddrV6 {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ SocketAddr::V6(*self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (IpAddr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ match ip {
+ IpAddr::V4(ref a) => (*a, port).to_socket_addrs(),
+ IpAddr::V6(ref a) => (*a, port).to_socket_addrs(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (Ipv4Addr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ SocketAddrV4::new(ip, port).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (Ipv6Addr, u16) {
+ type Iter = option::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<option::IntoIter<SocketAddr>> {
+ let (ip, port) = *self;
+ SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs()
+ }
+}
+
+fn resolve_socket_addr(lh: LookupHost) -> io::Result<vec::IntoIter<SocketAddr>> {
+ let p = lh.port();
+ let v: Vec<_> = lh
+ .map(|mut a| {
+ a.set_port(p);
+ a
+ })
+ .collect();
+ Ok(v.into_iter())
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for (&str, u16) {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ let (host, port) = *self;
+
+ // try to parse the host as a regular IP address first
+ if let Ok(addr) = host.parse::<Ipv4Addr>() {
+ let addr = SocketAddrV4::new(addr, port);
+ return Ok(vec![SocketAddr::V4(addr)].into_iter());
+ }
+ if let Ok(addr) = host.parse::<Ipv6Addr>() {
+ let addr = SocketAddrV6::new(addr, port, 0, 0);
+ return Ok(vec![SocketAddr::V6(addr)].into_iter());
+ }
+
+ resolve_socket_addr((host, port).try_into()?)
+ }
+}
+
+#[stable(feature = "string_u16_to_socket_addrs", since = "1.46.0")]
+impl ToSocketAddrs for (String, u16) {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ (&*self.0, self.1).to_socket_addrs()
+ }
+}
+
+// accepts strings like 'localhost:12345'
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToSocketAddrs for str {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ // try to parse as a regular SocketAddr first
+ if let Ok(addr) = self.parse() {
+ return Ok(vec![addr].into_iter());
+ }
+
+ resolve_socket_addr(self.try_into()?)
+ }
+}
+
+#[stable(feature = "slice_to_socket_addrs", since = "1.8.0")]
+impl<'a> ToSocketAddrs for &'a [SocketAddr] {
+ type Iter = iter::Cloned<slice::Iter<'a, SocketAddr>>;
+
+ fn to_socket_addrs(&self) -> io::Result<Self::Iter> {
+ Ok(self.iter().cloned())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {
+ type Iter = T::Iter;
+ fn to_socket_addrs(&self) -> io::Result<T::Iter> {
+ (**self).to_socket_addrs()
+ }
+}
+
+#[stable(feature = "string_to_socket_addrs", since = "1.16.0")]
+impl ToSocketAddrs for String {
+ type Iter = vec::IntoIter<SocketAddr>;
+ fn to_socket_addrs(&self) -> io::Result<vec::IntoIter<SocketAddr>> {
+ (&**self).to_socket_addrs()
+ }
+}
diff --git a/library/std/src/net/addr/tests.rs b/library/std/src/net/addr/tests.rs
new file mode 100644
index 000000000..585a17451
--- /dev/null
+++ b/library/std/src/net/addr/tests.rs
@@ -0,0 +1,237 @@
+use crate::net::test::{sa4, sa6, tsa};
+use crate::net::*;
+
+#[test]
+fn to_socket_addr_ipaddr_u16() {
+ let a = Ipv4Addr::new(77, 88, 21, 11);
+ let p = 12345;
+ let e = SocketAddr::V4(SocketAddrV4::new(a, p));
+ assert_eq!(Ok(vec![e]), tsa((a, p)));
+}
+
+#[test]
+fn to_socket_addr_str_u16() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa(("77.88.21.11", 24352)));
+
+ let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
+ assert_eq!(Ok(vec![a]), tsa(("2a02:6b8:0:1::1", 53)));
+
+ let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
+ #[cfg(not(target_env = "sgx"))]
+ assert!(tsa(("localhost", 23924)).unwrap().contains(&a));
+ #[cfg(target_env = "sgx")]
+ let _ = a;
+}
+
+#[test]
+fn to_socket_addr_str() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa("77.88.21.11:24352"));
+
+ let a = sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53);
+ assert_eq!(Ok(vec![a]), tsa("[2a02:6b8:0:1::1]:53"));
+
+ let a = sa4(Ipv4Addr::new(127, 0, 0, 1), 23924);
+ #[cfg(not(target_env = "sgx"))]
+ assert!(tsa("localhost:23924").unwrap().contains(&a));
+ #[cfg(target_env = "sgx")]
+ let _ = a;
+}
+
+#[test]
+fn to_socket_addr_string() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 24352);
+ assert_eq!(Ok(vec![a]), tsa(&*format!("{}:{}", "77.88.21.11", "24352")));
+ assert_eq!(Ok(vec![a]), tsa(&format!("{}:{}", "77.88.21.11", "24352")));
+ assert_eq!(Ok(vec![a]), tsa(format!("{}:{}", "77.88.21.11", "24352")));
+
+ let s = format!("{}:{}", "77.88.21.11", "24352");
+ assert_eq!(Ok(vec![a]), tsa(s));
+ // s has been moved into the tsa call
+}
+
+#[test]
+fn bind_udp_socket_bad() {
+ // rust-lang/rust#53957: This is a regression test for a parsing problem
+ // discovered as part of issue rust-lang/rust#23076, where we were
+ // incorrectly parsing invalid input and then that would result in a
+ // successful `UdpSocket` binding when we would expect failure.
+ //
+ // At one time, this test was written as a call to `tsa` with
+ // INPUT_23076. However, that structure yields an unreliable test,
+ // because it ends up passing junk input to the DNS server, and some DNS
+ // servers will respond with `Ok` to such input, with the ip address of
+ // the DNS server itself.
+ //
+ // This form of the test is more robust: even when the DNS server
+ // returns its own address, it is still an error to bind a UDP socket to
+ // a non-local address, and so we still get an error here in that case.
+
+ const INPUT_23076: &str = "1200::AB00:1234::2552:7777:1313:34300";
+
+ assert!(crate::net::UdpSocket::bind(INPUT_23076).is_err())
+}
+
+#[test]
+fn set_ip() {
+ fn ip4(low: u8) -> Ipv4Addr {
+ Ipv4Addr::new(77, 88, 21, low)
+ }
+ fn ip6(low: u16) -> Ipv6Addr {
+ Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, low)
+ }
+
+ let mut v4 = SocketAddrV4::new(ip4(11), 80);
+ assert_eq!(v4.ip(), &ip4(11));
+ v4.set_ip(ip4(12));
+ assert_eq!(v4.ip(), &ip4(12));
+
+ let mut addr = SocketAddr::V4(v4);
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(12)));
+ addr.set_ip(IpAddr::V4(ip4(13)));
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(13)));
+ addr.set_ip(IpAddr::V6(ip6(14)));
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(14)));
+
+ let mut v6 = SocketAddrV6::new(ip6(1), 80, 0, 0);
+ assert_eq!(v6.ip(), &ip6(1));
+ v6.set_ip(ip6(2));
+ assert_eq!(v6.ip(), &ip6(2));
+
+ let mut addr = SocketAddr::V6(v6);
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(2)));
+ addr.set_ip(IpAddr::V6(ip6(3)));
+ assert_eq!(addr.ip(), IpAddr::V6(ip6(3)));
+ addr.set_ip(IpAddr::V4(ip4(4)));
+ assert_eq!(addr.ip(), IpAddr::V4(ip4(4)));
+}
+
+#[test]
+fn set_port() {
+ let mut v4 = SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80);
+ assert_eq!(v4.port(), 80);
+ v4.set_port(443);
+ assert_eq!(v4.port(), 443);
+
+ let mut addr = SocketAddr::V4(v4);
+ assert_eq!(addr.port(), 443);
+ addr.set_port(8080);
+ assert_eq!(addr.port(), 8080);
+
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 0);
+ assert_eq!(v6.port(), 80);
+ v6.set_port(443);
+ assert_eq!(v6.port(), 443);
+
+ let mut addr = SocketAddr::V6(v6);
+ assert_eq!(addr.port(), 443);
+ addr.set_port(8080);
+ assert_eq!(addr.port(), 8080);
+}
+
+#[test]
+fn set_flowinfo() {
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 10, 0);
+ assert_eq!(v6.flowinfo(), 10);
+ v6.set_flowinfo(20);
+ assert_eq!(v6.flowinfo(), 20);
+}
+
+#[test]
+fn set_scope_id() {
+ let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 10);
+ assert_eq!(v6.scope_id(), 10);
+ v6.set_scope_id(20);
+ assert_eq!(v6.scope_id(), 20);
+}
+
+#[test]
+fn is_v4() {
+ let v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80));
+ assert!(v4.is_ipv4());
+ assert!(!v4.is_ipv6());
+}
+
+#[test]
+fn is_v6() {
+ let v6 = SocketAddr::V6(SocketAddrV6::new(
+ Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1),
+ 80,
+ 10,
+ 0,
+ ));
+ assert!(!v6.is_ipv4());
+ assert!(v6.is_ipv6());
+}
+
+#[test]
+fn socket_v4_to_str() {
+ let socket = SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 8080);
+
+ assert_eq!(format!("{socket}"), "192.168.0.1:8080");
+ assert_eq!(format!("{socket:<20}"), "192.168.0.1:8080 ");
+ assert_eq!(format!("{socket:>20}"), " 192.168.0.1:8080");
+ assert_eq!(format!("{socket:^20}"), " 192.168.0.1:8080 ");
+ assert_eq!(format!("{socket:.10}"), "192.168.0.");
+}
+
+#[test]
+fn socket_v6_to_str() {
+ let mut socket = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0);
+
+ assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1]:53");
+ assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1]:53 ");
+ assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1]:53");
+ assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1]:53 ");
+ assert_eq!(format!("{socket:.15}"), "[2a02:6b8:0:1::");
+
+ socket.set_scope_id(5);
+
+ assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1%5]:53");
+ assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1%5]:53 ");
+ assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1%5]:53");
+ assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1%5]:53 ");
+ assert_eq!(format!("{socket:.18}"), "[2a02:6b8:0:1::1%5");
+}
+
+#[test]
+fn compare() {
+ let v4_1 = "224.120.45.1:23456".parse::<SocketAddrV4>().unwrap();
+ let v4_2 = "224.210.103.5:12345".parse::<SocketAddrV4>().unwrap();
+ let v4_3 = "224.210.103.5:23456".parse::<SocketAddrV4>().unwrap();
+ let v6_1 = "[2001:db8:f00::1002]:23456".parse::<SocketAddrV6>().unwrap();
+ let v6_2 = "[2001:db8:f00::2001]:12345".parse::<SocketAddrV6>().unwrap();
+ let v6_3 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap();
+
+ // equality
+ assert_eq!(v4_1, v4_1);
+ assert_eq!(v6_1, v6_1);
+ assert_eq!(SocketAddr::V4(v4_1), SocketAddr::V4(v4_1));
+ assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1));
+ assert!(v4_1 != v4_2);
+ assert!(v6_1 != v6_2);
+
+ // compare different addresses
+ assert!(v4_1 < v4_2);
+ assert!(v6_1 < v6_2);
+ assert!(v4_2 > v4_1);
+ assert!(v6_2 > v6_1);
+
+ // compare the same address with different ports
+ assert!(v4_2 < v4_3);
+ assert!(v6_2 < v6_3);
+ assert!(v4_3 > v4_2);
+ assert!(v6_3 > v6_2);
+
+ // compare different addresses with the same port
+ assert!(v4_1 < v4_3);
+ assert!(v6_1 < v6_3);
+ assert!(v4_3 > v4_1);
+ assert!(v6_3 > v6_1);
+
+ // compare with an inferred right-hand side
+ assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap());
+ assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap());
+ assert_eq!(SocketAddr::V4(v4_1), "224.120.45.1:23456".parse().unwrap());
+}
diff --git a/library/std/src/net/ip.rs b/library/std/src/net/ip.rs
new file mode 100644
index 000000000..41ca9ba84
--- /dev/null
+++ b/library/std/src/net/ip.rs
@@ -0,0 +1,2040 @@
+// Tests for this module
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cmp::Ordering;
+use crate::fmt::{self, Write as FmtWrite};
+use crate::io::Write as IoWrite;
+use crate::mem::transmute;
+use crate::sys::net::netc as c;
+use crate::sys_common::{FromInner, IntoInner};
+
+/// An IP address, either IPv4 or IPv6.
+///
+/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
+/// respective documentation for more details.
+///
+/// # Examples
+///
+/// ```
+/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+///
+/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
+/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+///
+/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4));
+/// assert_eq!("::1".parse(), Ok(localhost_v6));
+///
+/// assert_eq!(localhost_v4.is_ipv6(), false);
+/// assert_eq!(localhost_v4.is_ipv4(), true);
+/// ```
+#[stable(feature = "ip_addr", since = "1.7.0")]
+#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
+pub enum IpAddr {
+ /// An IPv4 address.
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr),
+ /// An IPv6 address.
+ #[stable(feature = "ip_addr", since = "1.7.0")]
+ V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr),
+}
+
+/// An IPv4 address.
+///
+/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791].
+/// They are usually represented as four octets.
+///
+/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
+///
+/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791
+///
+/// # Textual representation
+///
+/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal
+/// notation, divided by `.` (this is called "dot-decimal notation").
+/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which
+/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943].
+///
+/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1
+/// [`FromStr`]: crate::str::FromStr
+///
+/// # Examples
+///
+/// ```
+/// use std::net::Ipv4Addr;
+///
+/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
+/// assert_eq!("127.0.0.1".parse(), Ok(localhost));
+/// assert_eq!(localhost.is_loopback(), true);
+/// assert!("012.004.002.000".parse::<Ipv4Addr>().is_err()); // all octets are in octal
+/// assert!("0000000.0.0.0".parse::<Ipv4Addr>().is_err()); // first octet is a zero in octal
+/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Ipv4Addr {
+ octets: [u8; 4],
+}
+
+/// An IPv6 address.
+///
+/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291].
+/// They are usually represented as eight 16-bit segments.
+///
+/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+///
+/// # Embedding IPv4 Addresses
+///
+/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
+///
+/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined:
+/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated.
+///
+/// Both types of addresses are not assigned any special meaning by this implementation,
+/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`,
+/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is.
+/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address.
+///
+/// ### IPv4-Compatible IPv6 Addresses
+///
+/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated.
+/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows:
+///
+/// ```text
+/// | 80 bits | 16 | 32 bits |
+/// +--------------------------------------+--------------------------+
+/// |0000..............................0000|0000| IPv4 address |
+/// +--------------------------------------+----+---------------------+
+/// ```
+/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`.
+///
+/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`].
+/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address.
+///
+/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1
+///
+/// ### IPv4-Mapped IPv6 Addresses
+///
+/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2].
+/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows:
+///
+/// ```text
+/// | 80 bits | 16 | 32 bits |
+/// +--------------------------------------+--------------------------+
+/// |0000..............................0000|FFFF| IPv4 address |
+/// +--------------------------------------+----+---------------------+
+/// ```
+/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`.
+///
+/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`].
+/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address.
+/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use
+/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
+///
+/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2
+///
+/// # Textual representation
+///
+/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent
+/// an IPv6 address in text, but in general, each segments is written in hexadecimal
+/// notation, and segments are separated by `:`. For more information, see
+/// [IETF RFC 5952].
+///
+/// [`FromStr`]: crate::str::FromStr
+/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952
+///
+/// # Examples
+///
+/// ```
+/// use std::net::Ipv6Addr;
+///
+/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+/// assert_eq!("::1".parse(), Ok(localhost));
+/// assert_eq!(localhost.is_loopback(), true);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Ipv6Addr {
+ octets: [u8; 16],
+}
+
+/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2].
+///
+/// # Stability Guarantees
+///
+/// Not all possible values for a multicast scope have been assigned.
+/// Future RFCs may introduce new scopes, which will be added as variants to this enum;
+/// because of this the enum is marked as `#[non_exhaustive]`.
+///
+/// # Examples
+/// ```
+/// #![feature(ip)]
+///
+/// use std::net::Ipv6Addr;
+/// use std::net::Ipv6MulticastScope::*;
+///
+/// // An IPv6 multicast address with global scope (`ff0e::`).
+/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0);
+///
+/// // Will print "Global scope".
+/// match address.multicast_scope() {
+/// Some(InterfaceLocal) => println!("Interface-Local scope"),
+/// Some(LinkLocal) => println!("Link-Local scope"),
+/// Some(RealmLocal) => println!("Realm-Local scope"),
+/// Some(AdminLocal) => println!("Admin-Local scope"),
+/// Some(SiteLocal) => println!("Site-Local scope"),
+/// Some(OrganizationLocal) => println!("Organization-Local scope"),
+/// Some(Global) => println!("Global scope"),
+/// Some(_) => println!("Unknown scope"),
+/// None => println!("Not a multicast address!")
+/// }
+///
+/// ```
+///
+/// [IPv6 multicast address]: Ipv6Addr
+/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2
+#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]
+#[unstable(feature = "ip", issue = "27709")]
+#[non_exhaustive]
+pub enum Ipv6MulticastScope {
+ /// Interface-Local scope.
+ InterfaceLocal,
+ /// Link-Local scope.
+ LinkLocal,
+ /// Realm-Local scope.
+ RealmLocal,
+ /// Admin-Local scope.
+ AdminLocal,
+ /// Site-Local scope.
+ SiteLocal,
+ /// Organization-Local scope.
+ OrganizationLocal,
+ /// Global scope.
+ Global,
+}
+
+impl IpAddr {
+ /// Returns [`true`] for the special 'unspecified' address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_unspecified()`] and
+ /// [`Ipv6Addr::is_unspecified()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_unspecified(),
+ IpAddr::V6(ip) => ip.is_unspecified(),
+ }
+ }
+
+ /// Returns [`true`] if this is a loopback address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_loopback()`] and
+ /// [`Ipv6Addr::is_loopback()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_loopback(),
+ IpAddr::V6(ip) => ip.is_loopback(),
+ }
+ }
+
+ /// Returns [`true`] if the address appears to be globally routable.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_global()`] and
+ /// [`Ipv6Addr::is_global()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_global(),
+ IpAddr::V6(ip) => ip.is_global(),
+ }
+ }
+
+ /// Returns [`true`] if this is a multicast address.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_multicast()`] and
+ /// [`Ipv6Addr::is_multicast()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_multicast(),
+ IpAddr::V6(ip) => ip.is_multicast(),
+ }
+ }
+
+ /// Returns [`true`] if this address is in a range designated for documentation.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_documentation()`] and
+ /// [`Ipv6Addr::is_documentation()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(),
+ /// true
+ /// );
+ /// ```
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_documentation(),
+ IpAddr::V6(ip) => ip.is_documentation(),
+ }
+ }
+
+ /// Returns [`true`] if this address is in a range designated for benchmarking.
+ ///
+ /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and
+ /// [`Ipv6Addr::is_benchmarking()`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true);
+ /// ```
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ match self {
+ IpAddr::V4(ip) => ip.is_benchmarking(),
+ IpAddr::V6(ip) => ip.is_benchmarking(),
+ }
+ }
+
+ /// Returns [`true`] if this address is an [`IPv4` address], and [`false`]
+ /// otherwise.
+ ///
+ /// [`IPv4` address]: IpAddr::V4
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ipaddr_checker", since = "1.16.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_ipv4(&self) -> bool {
+ matches!(self, IpAddr::V4(_))
+ }
+
+ /// Returns [`true`] if this address is an [`IPv6` address], and [`false`]
+ /// otherwise.
+ ///
+ /// [`IPv6` address]: IpAddr::V6
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "ipaddr_checker", since = "1.16.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_ipv6(&self) -> bool {
+ matches!(self, IpAddr::V6(_))
+ }
+
+ /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it
+ /// return `self` as-is.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false);
+ /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true);
+ /// ```
+ #[inline]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_unstable(feature = "const_ip", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ pub const fn to_canonical(&self) -> IpAddr {
+ match self {
+ &v4 @ IpAddr::V4(_) => v4,
+ IpAddr::V6(v6) => v6.to_canonical(),
+ }
+ }
+}
+
+impl Ipv4Addr {
+ /// Creates a new IPv4 address from four eight-bit octets.
+ ///
+ /// The result will represent the IP address `a`.`b`.`c`.`d`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
+ Ipv4Addr { octets: [a, b, c, d] }
+ }
+
+ /// An IPv4 address with the address pointing to localhost: `127.0.0.1`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::LOCALHOST;
+ /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
+
+ /// An IPv4 address representing an unspecified address: `0.0.0.0`
+ ///
+ /// This corresponds to the constant `INADDR_ANY` in other languages.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::UNSPECIFIED;
+ /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
+ /// ```
+ #[doc(alias = "INADDR_ANY")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
+
+ /// An IPv4 address representing the broadcast address: `255.255.255.255`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::BROADCAST;
+ /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
+
+ /// Returns the four eight-bit integers that make up this address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ /// assert_eq!(addr.octets(), [127, 0, 0, 1]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn octets(&self) -> [u8; 4] {
+ self.octets
+ }
+
+ /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`).
+ ///
+ /// This property is defined in _UNIX Network Programming, Second Edition_,
+ /// W. Richard Stevens, p. 891; see also [ip7].
+ ///
+ /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
+ /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "ip_shared", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ u32::from_be_bytes(self.octets) == 0
+ }
+
+ /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`).
+ ///
+ /// This property is defined by [IETF RFC 1122].
+ ///
+ /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
+ /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ self.octets()[0] == 127
+ }
+
+ /// Returns [`true`] if this is a private address.
+ ///
+ /// The private address ranges are defined in [IETF RFC 1918] and include:
+ ///
+ /// - `10.0.0.0/8`
+ /// - `172.16.0.0/12`
+ /// - `192.168.0.0/16`
+ ///
+ /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false);
+ /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
+ /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_private(&self) -> bool {
+ match self.octets() {
+ [10, ..] => true,
+ [172, b, ..] if b >= 16 && b <= 31 => true,
+ [192, 168, ..] => true,
+ _ => false,
+ }
+ }
+
+ /// Returns [`true`] if the address is link-local (`169.254.0.0/16`).
+ ///
+ /// This property is defined by [IETF RFC 3927].
+ ///
+ /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true);
+ /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
+ /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_link_local(&self) -> bool {
+ matches!(self.octets(), [169, 254, ..])
+ }
+
+ /// Returns [`true`] if the address appears to be globally routable.
+ /// See [iana-ipv4-special-registry][ipv4-sr].
+ ///
+ /// The following return [`false`]:
+ ///
+ /// - private addresses (see [`Ipv4Addr::is_private()`])
+ /// - the loopback address (see [`Ipv4Addr::is_loopback()`])
+ /// - the link-local address (see [`Ipv4Addr::is_link_local()`])
+ /// - the broadcast address (see [`Ipv4Addr::is_broadcast()`])
+ /// - addresses used for documentation (see [`Ipv4Addr::is_documentation()`])
+ /// - the unspecified address (see [`Ipv4Addr::is_unspecified()`]), and the whole
+ /// `0.0.0.0/8` block
+ /// - addresses reserved for future protocols, except
+ /// `192.0.0.9/32` and `192.0.0.10/32` which are globally routable
+ /// - addresses reserved for future use (see [`Ipv4Addr::is_reserved()`]
+ /// - addresses reserved for networking devices benchmarking (see
+ /// [`Ipv4Addr::is_benchmarking()`])
+ ///
+ /// [ipv4-sr]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv4Addr;
+ ///
+ /// // private addresses are not global
+ /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false);
+ ///
+ /// // the 0.0.0.0/8 block is not global
+ /// assert_eq!(Ipv4Addr::new(0, 1, 2, 3).is_global(), false);
+ /// // in particular, the unspecified address is not global
+ /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_global(), false);
+ ///
+ /// // the loopback address is not global
+ /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_global(), false);
+ ///
+ /// // link local addresses are not global
+ /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false);
+ ///
+ /// // the broadcast address is not global
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_global(), false);
+ ///
+ /// // the address space designated for documentation is not global
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false);
+ ///
+ /// // shared addresses are not global
+ /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false);
+ ///
+ /// // addresses reserved for protocol assignment are not global
+ /// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_global(), false);
+ /// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_global(), false);
+ ///
+ /// // addresses reserved for future use are not global
+ /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false);
+ ///
+ /// // addresses reserved for network devices benchmarking are not global
+ /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false);
+ ///
+ /// // All the other addresses are global
+ /// assert_eq!(Ipv4Addr::new(1, 1, 1, 1).is_global(), true);
+ /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
+ // globally routable addresses in the 192.0.0.0/24 range.
+ if u32::from_be_bytes(self.octets()) == 0xc0000009
+ || u32::from_be_bytes(self.octets()) == 0xc000000a
+ {
+ return true;
+ }
+ !self.is_private()
+ && !self.is_loopback()
+ && !self.is_link_local()
+ && !self.is_broadcast()
+ && !self.is_documentation()
+ && !self.is_shared()
+ // addresses reserved for future protocols (`192.0.0.0/24`)
+ && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0)
+ && !self.is_reserved()
+ && !self.is_benchmarking()
+ // Make sure the address is not in 0.0.0.0/8
+ && self.octets()[0] != 0
+ }
+
+ /// Returns [`true`] if this address is part of the Shared Address Space defined in
+ /// [IETF RFC 6598] (`100.64.0.0/10`).
+ ///
+ /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true);
+ /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true);
+ /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_shared(&self) -> bool {
+ self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000)
+ }
+
+ /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for
+ /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0`
+ /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`.
+ ///
+ /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544
+ /// [errata 423]: https://www.rfc-editor.org/errata/eid423
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false);
+ /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18
+ }
+
+ /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112]
+ /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the
+ /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since
+ /// it is obviously not reserved for future use.
+ ///
+ /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112
+ ///
+ /// # Warning
+ ///
+ /// As IANA assigns new addresses, this method will be
+ /// updated. This may result in non-reserved addresses being
+ /// treated as reserved in code that relies on an outdated version
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true);
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true);
+ ///
+ /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false);
+ /// // The broadcast address is not considered as reserved for future use by this implementation
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_reserved(&self) -> bool {
+ self.octets()[0] & 240 == 240 && !self.is_broadcast()
+ }
+
+ /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`).
+ ///
+ /// Multicast addresses have a most significant octet between `224` and `239`,
+ /// and is defined by [IETF RFC 5771].
+ ///
+ /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true);
+ /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
+ /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ self.octets()[0] >= 224 && self.octets()[0] <= 239
+ }
+
+ /// Returns [`true`] if this is a broadcast address (`255.255.255.255`).
+ ///
+ /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919].
+ ///
+ /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
+ /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_broadcast(&self) -> bool {
+ u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets())
+ }
+
+ /// Returns [`true`] if this address is in a range designated for documentation.
+ ///
+ /// This is defined in [IETF RFC 5737]:
+ ///
+ /// - `192.0.2.0/24` (TEST-NET-1)
+ /// - `198.51.100.0/24` (TEST-NET-2)
+ /// - `203.0.113.0/24` (TEST-NET-3)
+ ///
+ /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
+ /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _])
+ }
+
+ /// Converts this address to an [IPv4-compatible] [`IPv6` address].
+ ///
+ /// `a.b.c.d` becomes `::a.b.c.d`
+ ///
+ /// Note that IPv4-compatible addresses have been officially deprecated.
+ /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead.
+ ///
+ /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(
+ /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(),
+ /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff)
+ /// );
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv6_compatible(&self) -> Ipv6Addr {
+ let [a, b, c, d] = self.octets();
+ Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] }
+ }
+
+ /// Converts this address to an [IPv4-mapped] [`IPv6` address].
+ ///
+ /// `a.b.c.d` becomes `::ffff:a.b.c.d`
+ ///
+ /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
+ /// [`IPv6` address]: Ipv6Addr
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
+ /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff));
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv6_mapped(&self) -> Ipv6Addr {
+ let [a, b, c, d] = self.octets();
+ Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] }
+ }
+}
+
+#[stable(feature = "ip_addr", since = "1.7.0")]
+impl fmt::Display for IpAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ IpAddr::V4(ip) => ip.fmt(fmt),
+ IpAddr::V6(ip) => ip.fmt(fmt),
+ }
+ }
+}
+
+#[stable(feature = "ip_addr", since = "1.7.0")]
+impl fmt::Debug for IpAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<Ipv4Addr> for IpAddr {
+ /// Copies this address to a new `IpAddr::V4`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr};
+ ///
+ /// let addr = Ipv4Addr::new(127, 0, 0, 1);
+ ///
+ /// assert_eq!(
+ /// IpAddr::V4(addr),
+ /// IpAddr::from(addr)
+ /// )
+ /// ```
+ #[inline]
+ fn from(ipv4: Ipv4Addr) -> IpAddr {
+ IpAddr::V4(ipv4)
+ }
+}
+
+#[stable(feature = "ip_from_ip", since = "1.16.0")]
+impl From<Ipv6Addr> for IpAddr {
+ /// Copies this address to a new `IpAddr::V6`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
+ ///
+ /// assert_eq!(
+ /// IpAddr::V6(addr),
+ /// IpAddr::from(addr)
+ /// );
+ /// ```
+ #[inline]
+ fn from(ipv6: Ipv6Addr) -> IpAddr {
+ IpAddr::V6(ipv6)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Ipv4Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let octets = self.octets();
+ // Fast Path: if there's no alignment stuff, write directly to the buffer
+ if fmt.precision().is_none() && fmt.width().is_none() {
+ write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
+ } else {
+ const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address
+ let mut buf = [0u8; IPV4_BUF_LEN];
+ let mut buf_slice = &mut buf[..];
+
+ // Note: The call to write should never fail, hence the unwrap
+ write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
+ let len = IPV4_BUF_LEN - buf_slice.len();
+
+ // This unsafe is OK because we know what is being written to the buffer
+ let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
+ fmt.pad(buf)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Ipv4Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<Ipv4Addr> for IpAddr {
+ #[inline]
+ fn eq(&self, other: &Ipv4Addr) -> bool {
+ match self {
+ IpAddr::V4(v4) => v4 == other,
+ IpAddr::V6(_) => false,
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<IpAddr> for Ipv4Addr {
+ #[inline]
+ fn eq(&self, other: &IpAddr) -> bool {
+ match other {
+ IpAddr::V4(v4) => self == v4,
+ IpAddr::V6(_) => false,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for Ipv4Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<Ipv4Addr> for IpAddr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
+ match self {
+ IpAddr::V4(v4) => v4.partial_cmp(other),
+ IpAddr::V6(_) => Some(Ordering::Greater),
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<IpAddr> for Ipv4Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
+ match other {
+ IpAddr::V4(v4) => self.partial_cmp(v4),
+ IpAddr::V6(_) => Some(Ordering::Less),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for Ipv4Addr {
+ #[inline]
+ fn cmp(&self, other: &Ipv4Addr) -> Ordering {
+ self.octets.cmp(&other.octets)
+ }
+}
+
+impl IntoInner<c::in_addr> for Ipv4Addr {
+ #[inline]
+ fn into_inner(self) -> c::in_addr {
+ // `s_addr` is stored as BE on all machines and the array is in BE order.
+ // So the native endian conversion method is used so that it's never swapped.
+ c::in_addr { s_addr: u32::from_ne_bytes(self.octets) }
+ }
+}
+impl FromInner<c::in_addr> for Ipv4Addr {
+ fn from_inner(addr: c::in_addr) -> Ipv4Addr {
+ Ipv4Addr { octets: addr.s_addr.to_ne_bytes() }
+ }
+}
+
+#[stable(feature = "ip_u32", since = "1.1.0")]
+impl From<Ipv4Addr> for u32 {
+ /// Converts an `Ipv4Addr` into a host byte order `u32`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
+ /// assert_eq!(0x12345678, u32::from(addr));
+ /// ```
+ #[inline]
+ fn from(ip: Ipv4Addr) -> u32 {
+ u32::from_be_bytes(ip.octets)
+ }
+}
+
+#[stable(feature = "ip_u32", since = "1.1.0")]
+impl From<u32> for Ipv4Addr {
+ /// Converts a host byte order `u32` into an `Ipv4Addr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::from(0x12345678);
+ /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr);
+ /// ```
+ #[inline]
+ fn from(ip: u32) -> Ipv4Addr {
+ Ipv4Addr { octets: ip.to_be_bytes() }
+ }
+}
+
+#[stable(feature = "from_slice_v4", since = "1.9.0")]
+impl From<[u8; 4]> for Ipv4Addr {
+ /// Creates an `Ipv4Addr` from a four element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]);
+ /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 4]) -> Ipv4Addr {
+ Ipv4Addr { octets }
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u8; 4]> for IpAddr {
+ /// Creates an `IpAddr::V4` from a four element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv4Addr};
+ ///
+ /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]);
+ /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr);
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 4]) -> IpAddr {
+ IpAddr::V4(Ipv4Addr::from(octets))
+ }
+}
+
+impl Ipv6Addr {
+ /// Creates a new IPv6 address from eight 16-bit segments.
+ ///
+ /// The result will represent the IP address `a:b:c:d:e:f:g:h`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
+ let addr16 = [
+ a.to_be(),
+ b.to_be(),
+ c.to_be(),
+ d.to_be(),
+ e.to_be(),
+ f.to_be(),
+ g.to_be(),
+ h.to_be(),
+ ];
+ Ipv6Addr {
+ // All elements in `addr16` are big endian.
+ // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`.
+ octets: unsafe { transmute::<_, [u8; 16]>(addr16) },
+ }
+ }
+
+ /// An IPv6 address representing localhost: `::1`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::LOCALHOST;
+ /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+
+ /// An IPv6 address representing the unspecified address: `::`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::UNSPECIFIED;
+ /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
+ /// ```
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
+ pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
+
+ /// Returns the eight 16-bit segments that make up this address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
+ /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub const fn segments(&self) -> [u16; 8] {
+ // All elements in `self.octets` must be big endian.
+ // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`.
+ let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) };
+ // We want native endian u16
+ [
+ u16::from_be(a),
+ u16::from_be(b),
+ u16::from_be(c),
+ u16::from_be(d),
+ u16::from_be(e),
+ u16::from_be(f),
+ u16::from_be(g),
+ u16::from_be(h),
+ ]
+ }
+
+ /// Returns [`true`] for the special 'unspecified' address (`::`).
+ ///
+ /// This property is defined in [IETF RFC 4291].
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unspecified(&self) -> bool {
+ u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets())
+ }
+
+ /// Returns [`true`] if this is the [loopback address] (`::1`),
+ /// as defined in [IETF RFC 4291 section 2.5.3].
+ ///
+ /// Contrary to IPv4, in IPv6 there is only one loopback address.
+ ///
+ /// [loopback address]: Ipv6Addr::LOCALHOST
+ /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_loopback(&self) -> bool {
+ u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets())
+ }
+
+ /// Returns [`true`] if the address appears to be globally routable.
+ ///
+ /// The following return [`false`]:
+ ///
+ /// - the loopback address
+ /// - link-local and unique local unicast addresses
+ /// - interface-, link-, realm-, admin- and site-local multicast addresses
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), true);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_global(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1).is_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_global(&self) -> bool {
+ match self.multicast_scope() {
+ Some(Ipv6MulticastScope::Global) => true,
+ None => self.is_unicast_global(),
+ _ => false,
+ }
+ }
+
+ /// Returns [`true`] if this is a unique local address (`fc00::/7`).
+ ///
+ /// This property is defined in [IETF RFC 4193].
+ ///
+ /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false);
+ /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unique_local(&self) -> bool {
+ (self.segments()[0] & 0xfe00) == 0xfc00
+ }
+
+ /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291].
+ /// Any address that is not a [multicast address] (`ff00::/8`) is unicast.
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ /// [multicast address]: Ipv6Addr::is_multicast
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// // The unspecified and loopback addresses are unicast.
+ /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true);
+ /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true);
+ ///
+ /// // Any address that is not a multicast address (`ff00::/8`) is unicast.
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true);
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast(&self) -> bool {
+ !self.is_multicast()
+ }
+
+ /// Returns `true` if the address is a unicast address with link-local scope,
+ /// as defined in [RFC 4291].
+ ///
+ /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4].
+ /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6],
+ /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format:
+ ///
+ /// ```text
+ /// | 10 bits | 54 bits | 64 bits |
+ /// +----------+-------------------------+----------------------------+
+ /// |1111111010| 0 | interface ID |
+ /// +----------+-------------------------+----------------------------+
+ /// ```
+ /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`,
+ /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated,
+ /// and those addresses will have link-local scope.
+ ///
+ /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope",
+ /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it.
+ ///
+ /// [RFC 4291]: https://tools.ietf.org/html/rfc4291
+ /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4
+ /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3
+ /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6
+ /// [loopback address]: Ipv6Addr::LOCALHOST
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// // The loopback address (`::1`) does not actually have link-local scope.
+ /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false);
+ ///
+ /// // Only addresses in `fe80::/10` have link-local scope.
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false);
+ /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
+ ///
+ /// // Addresses outside the stricter `fe80::/64` also have link-local scope.
+ /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true);
+ /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast_link_local(&self) -> bool {
+ (self.segments()[0] & 0xffc0) == 0xfe80
+ }
+
+ /// Returns [`true`] if this is an address reserved for documentation
+ /// (`2001:db8::/32`).
+ ///
+ /// This property is defined in [IETF RFC 3849].
+ ///
+ /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false);
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_documentation(&self) -> bool {
+ (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
+ }
+
+ /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`).
+ ///
+ /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`.
+ /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`.
+ ///
+ /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180
+ /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false);
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true);
+ /// ```
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_benchmarking(&self) -> bool {
+ (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0)
+ }
+
+ /// Returns [`true`] if the address is a globally routable unicast address.
+ ///
+ /// The following return false:
+ ///
+ /// - the loopback address
+ /// - the link-local addresses
+ /// - unique local addresses
+ /// - the unspecified address
+ /// - the address range reserved for documentation
+ ///
+ /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7]
+ ///
+ /// ```no_rust
+ /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer
+ /// be supported in new implementations (i.e., new implementations must treat this prefix as
+ /// Global Unicast).
+ /// ```
+ ///
+ /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn is_unicast_global(&self) -> bool {
+ self.is_unicast()
+ && !self.is_loopback()
+ && !self.is_unicast_link_local()
+ && !self.is_unique_local()
+ && !self.is_unspecified()
+ && !self.is_documentation()
+ }
+
+ /// Returns the address's multicast scope if the address is multicast.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ ///
+ /// use std::net::{Ipv6Addr, Ipv6MulticastScope};
+ ///
+ /// assert_eq!(
+ /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(),
+ /// Some(Ipv6MulticastScope::Global)
+ /// );
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use]
+ #[inline]
+ pub const fn multicast_scope(&self) -> Option<Ipv6MulticastScope> {
+ if self.is_multicast() {
+ match self.segments()[0] & 0x000f {
+ 1 => Some(Ipv6MulticastScope::InterfaceLocal),
+ 2 => Some(Ipv6MulticastScope::LinkLocal),
+ 3 => Some(Ipv6MulticastScope::RealmLocal),
+ 4 => Some(Ipv6MulticastScope::AdminLocal),
+ 5 => Some(Ipv6MulticastScope::SiteLocal),
+ 8 => Some(Ipv6MulticastScope::OrganizationLocal),
+ 14 => Some(Ipv6MulticastScope::Global),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ }
+
+ /// Returns [`true`] if this is a multicast address (`ff00::/8`).
+ ///
+ /// This property is defined by [IETF RFC 4291].
+ ///
+ /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(since = "1.7.0", feature = "ip_17")]
+ #[must_use]
+ #[inline]
+ pub const fn is_multicast(&self) -> bool {
+ (self.segments()[0] & 0xff00) == 0xff00
+ }
+
+ /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address,
+ /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`].
+ ///
+ /// `::ffff:a.b.c.d` becomes `a.b.c.d`.
+ /// All addresses *not* starting with `::ffff` will return `None`.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ /// [IPv4-mapped]: Ipv6Addr
+ /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(),
+ /// Some(Ipv4Addr::new(192, 10, 2, 255)));
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> {
+ match self.octets() {
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
+ Some(Ipv4Addr::new(a, b, c, d))
+ }
+ _ => None,
+ }
+ }
+
+ /// Converts this address to an [`IPv4` address] if it is either
+ /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1],
+ /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2],
+ /// otherwise returns [`None`].
+ ///
+ /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use
+ /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this.
+ ///
+ /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`.
+ /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`.
+ ///
+ /// [`IPv4` address]: Ipv4Addr
+ /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses
+ /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses
+ /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1
+ /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{Ipv4Addr, Ipv6Addr};
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(),
+ /// Some(Ipv4Addr::new(192, 10, 2, 255)));
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
+ /// Some(Ipv4Addr::new(0, 0, 0, 1)));
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_ipv4(&self) -> Option<Ipv4Addr> {
+ if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() {
+ let [a, b] = ab.to_be_bytes();
+ let [c, d] = cd.to_be_bytes();
+ Some(Ipv4Addr::new(a, b, c, d))
+ } else {
+ None
+ }
+ }
+
+ /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it
+ /// returns self wrapped in an `IpAddr::V6`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false);
+ /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true);
+ /// ```
+ #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")]
+ #[unstable(feature = "ip", issue = "27709")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn to_canonical(&self) -> IpAddr {
+ if let Some(mapped) = self.to_ipv4_mapped() {
+ return IpAddr::V4(mapped);
+ }
+ IpAddr::V6(*self)
+ }
+
+ /// Returns the sixteen eight-bit integers the IPv6 address consists of.
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
+ /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+ /// ```
+ #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")]
+ #[stable(feature = "ipv6_to_octets", since = "1.12.0")]
+ #[must_use]
+ #[inline]
+ pub const fn octets(&self) -> [u8; 16] {
+ self.octets
+ }
+}
+
+/// Write an Ipv6Addr, conforming to the canonical style described by
+/// [RFC 5952](https://tools.ietf.org/html/rfc5952).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Ipv6Addr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If there are no alignment requirements, write out the IP address to
+ // f. Otherwise, write it to a local buffer, then use f.pad.
+ if f.precision().is_none() && f.width().is_none() {
+ let segments = self.segments();
+
+ // Special case for :: and ::1; otherwise they get written with the
+ // IPv4 formatter
+ if self.is_unspecified() {
+ f.write_str("::")
+ } else if self.is_loopback() {
+ f.write_str("::1")
+ } else if let Some(ipv4) = self.to_ipv4() {
+ match segments[5] {
+ // IPv4 Compatible address
+ 0 => write!(f, "::{}", ipv4),
+ // IPv4 Mapped address
+ 0xffff => write!(f, "::ffff:{}", ipv4),
+ _ => unreachable!(),
+ }
+ } else {
+ #[derive(Copy, Clone, Default)]
+ struct Span {
+ start: usize,
+ len: usize,
+ }
+
+ // Find the inner 0 span
+ let zeroes = {
+ let mut longest = Span::default();
+ let mut current = Span::default();
+
+ for (i, &segment) in segments.iter().enumerate() {
+ if segment == 0 {
+ if current.len == 0 {
+ current.start = i;
+ }
+
+ current.len += 1;
+
+ if current.len > longest.len {
+ longest = current;
+ }
+ } else {
+ current = Span::default();
+ }
+ }
+
+ longest
+ };
+
+ /// Write a colon-separated part of the address
+ #[inline]
+ fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result {
+ if let Some((first, tail)) = chunk.split_first() {
+ write!(f, "{:x}", first)?;
+ for segment in tail {
+ f.write_char(':')?;
+ write!(f, "{:x}", segment)?;
+ }
+ }
+ Ok(())
+ }
+
+ if zeroes.len > 1 {
+ fmt_subslice(f, &segments[..zeroes.start])?;
+ f.write_str("::")?;
+ fmt_subslice(f, &segments[zeroes.start + zeroes.len..])
+ } else {
+ fmt_subslice(f, &segments)
+ }
+ }
+ } else {
+ // Slow path: write the address to a local buffer, then use f.pad.
+ // Defined recursively by using the fast path to write to the
+ // buffer.
+
+ // This is the largest possible size of an IPv6 address
+ const IPV6_BUF_LEN: usize = (4 * 8) + 7;
+ let mut buf = [0u8; IPV6_BUF_LEN];
+ let mut buf_slice = &mut buf[..];
+
+ // Note: This call to write should never fail, so unwrap is okay.
+ write!(buf_slice, "{}", self).unwrap();
+ let len = IPV6_BUF_LEN - buf_slice.len();
+
+ // This is safe because we know exactly what can be in this buffer
+ let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
+ f.pad(buf)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Ipv6Addr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<IpAddr> for Ipv6Addr {
+ #[inline]
+ fn eq(&self, other: &IpAddr) -> bool {
+ match other {
+ IpAddr::V4(_) => false,
+ IpAddr::V6(v6) => self == v6,
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialEq<Ipv6Addr> for IpAddr {
+ #[inline]
+ fn eq(&self, other: &Ipv6Addr) -> bool {
+ match self {
+ IpAddr::V4(_) => false,
+ IpAddr::V6(v6) => v6 == other,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for Ipv6Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<Ipv6Addr> for IpAddr {
+ #[inline]
+ fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
+ match self {
+ IpAddr::V4(_) => Some(Ordering::Less),
+ IpAddr::V6(v6) => v6.partial_cmp(other),
+ }
+ }
+}
+
+#[stable(feature = "ip_cmp", since = "1.16.0")]
+impl PartialOrd<IpAddr> for Ipv6Addr {
+ #[inline]
+ fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
+ match other {
+ IpAddr::V4(_) => Some(Ordering::Greater),
+ IpAddr::V6(v6) => self.partial_cmp(v6),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for Ipv6Addr {
+ #[inline]
+ fn cmp(&self, other: &Ipv6Addr) -> Ordering {
+ self.segments().cmp(&other.segments())
+ }
+}
+
+impl IntoInner<c::in6_addr> for Ipv6Addr {
+ fn into_inner(self) -> c::in6_addr {
+ c::in6_addr { s6_addr: self.octets }
+ }
+}
+impl FromInner<c::in6_addr> for Ipv6Addr {
+ #[inline]
+ fn from_inner(addr: c::in6_addr) -> Ipv6Addr {
+ Ipv6Addr { octets: addr.s6_addr }
+ }
+}
+
+#[stable(feature = "i128", since = "1.26.0")]
+impl From<Ipv6Addr> for u128 {
+ /// Convert an `Ipv6Addr` into a host byte order `u128`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// );
+ /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
+ /// ```
+ #[inline]
+ fn from(ip: Ipv6Addr) -> u128 {
+ u128::from_be_bytes(ip.octets)
+ }
+}
+#[stable(feature = "i128", since = "1.26.0")]
+impl From<u128> for Ipv6Addr {
+ /// Convert a host byte order `u128` into an `Ipv6Addr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// ),
+ /// addr);
+ /// ```
+ #[inline]
+ fn from(ip: u128) -> Ipv6Addr {
+ Ipv6Addr::from(ip.to_be_bytes())
+ }
+}
+
+#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
+impl From<[u8; 16]> for Ipv6Addr {
+ /// Creates an `Ipv6Addr` from a sixteen element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from([
+ /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
+ /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
+ /// ]);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1918, 0x1716,
+ /// 0x1514, 0x1312,
+ /// 0x1110, 0x0f0e,
+ /// 0x0d0c, 0x0b0a
+ /// ),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 16]) -> Ipv6Addr {
+ Ipv6Addr { octets }
+ }
+}
+
+#[stable(feature = "ipv6_from_segments", since = "1.16.0")]
+impl From<[u16; 8]> for Ipv6Addr {
+ /// Creates an `Ipv6Addr` from an eight element 16-bit array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from([
+ /// 525u16, 524u16, 523u16, 522u16,
+ /// 521u16, 520u16, 519u16, 518u16,
+ /// ]);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x20d, 0x20c,
+ /// 0x20b, 0x20a,
+ /// 0x209, 0x208,
+ /// 0x207, 0x206
+ /// ),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(segments: [u16; 8]) -> Ipv6Addr {
+ let [a, b, c, d, e, f, g, h] = segments;
+ Ipv6Addr::new(a, b, c, d, e, f, g, h)
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u8; 16]> for IpAddr {
+ /// Creates an `IpAddr::V6` from a sixteen element byte array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = IpAddr::from([
+ /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
+ /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
+ /// ]);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(
+ /// 0x1918, 0x1716,
+ /// 0x1514, 0x1312,
+ /// 0x1110, 0x0f0e,
+ /// 0x0d0c, 0x0b0a
+ /// )),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(octets: [u8; 16]) -> IpAddr {
+ IpAddr::V6(Ipv6Addr::from(octets))
+ }
+}
+
+#[stable(feature = "ip_from_slice", since = "1.17.0")]
+impl From<[u16; 8]> for IpAddr {
+ /// Creates an `IpAddr::V6` from an eight element 16-bit array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::net::{IpAddr, Ipv6Addr};
+ ///
+ /// let addr = IpAddr::from([
+ /// 525u16, 524u16, 523u16, 522u16,
+ /// 521u16, 520u16, 519u16, 518u16,
+ /// ]);
+ /// assert_eq!(
+ /// IpAddr::V6(Ipv6Addr::new(
+ /// 0x20d, 0x20c,
+ /// 0x20b, 0x20a,
+ /// 0x209, 0x208,
+ /// 0x207, 0x206
+ /// )),
+ /// addr
+ /// );
+ /// ```
+ #[inline]
+ fn from(segments: [u16; 8]) -> IpAddr {
+ IpAddr::V6(Ipv6Addr::from(segments))
+ }
+}
diff --git a/library/std/src/net/ip/tests.rs b/library/std/src/net/ip/tests.rs
new file mode 100644
index 000000000..c29509331
--- /dev/null
+++ b/library/std/src/net/ip/tests.rs
@@ -0,0 +1,969 @@
+use crate::net::test::{sa4, sa6, tsa};
+use crate::net::*;
+use crate::str::FromStr;
+
+#[test]
+fn test_from_str_ipv4() {
+ assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse());
+ assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse());
+ assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse());
+
+ // out of range
+ let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // too short
+ let none: Option<Ipv4Addr> = "255.0.0".parse().ok();
+ assert_eq!(None, none);
+ // too long
+ let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok();
+ assert_eq!(None, none);
+ // no number between dots
+ let none: Option<Ipv4Addr> = "255.0..1".parse().ok();
+ assert_eq!(None, none);
+ // octal
+ let none: Option<Ipv4Addr> = "255.0.0.01".parse().ok();
+ assert_eq!(None, none);
+ // octal zero
+ let none: Option<Ipv4Addr> = "255.0.0.00".parse().ok();
+ assert_eq!(None, none);
+ let none: Option<Ipv4Addr> = "255.0.00.0".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_ipv6() {
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse());
+
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse());
+
+ assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse());
+
+ // too long group
+ let none: Option<Ipv6Addr> = "::00000".parse().ok();
+ assert_eq!(None, none);
+ // too short
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok();
+ assert_eq!(None, none);
+ // too long
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok();
+ assert_eq!(None, none);
+ // triple colon
+ let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok();
+ assert_eq!(None, none);
+ // two double colons
+ let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok();
+ assert_eq!(None, none);
+ // `::` indicating zero groups of zeros
+ let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_ipv4_in_ipv6() {
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse());
+ assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse());
+ assert_eq!(
+ Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
+ "64:ff9b::192.0.2.33".parse()
+ );
+ assert_eq!(
+ Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
+ "2001:db8:122:c000:2:2100:192.0.2.33".parse()
+ );
+
+ // colon after v4
+ let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok();
+ assert_eq!(None, none);
+ // not enough groups
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // too many groups
+ let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn test_from_str_socket_addr() {
+ assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
+ assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
+ assert_eq!(
+ Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)),
+ "[2a02:6b8:0:1::1]:53".parse()
+ );
+ assert_eq!(
+ Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)),
+ "[2a02:6b8:0:1::1]:53".parse()
+ );
+ assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse());
+ assert_eq!(
+ Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)),
+ "[::127.0.0.1]:22".parse()
+ );
+
+ // without port
+ let none: Option<SocketAddr> = "127.0.0.1".parse().ok();
+ assert_eq!(None, none);
+ // without port
+ let none: Option<SocketAddr> = "127.0.0.1:".parse().ok();
+ assert_eq!(None, none);
+ // wrong brackets around v4
+ let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok();
+ assert_eq!(None, none);
+ // port out of range
+ let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok();
+ assert_eq!(None, none);
+}
+
+#[test]
+fn ipv4_addr_to_string() {
+ assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1");
+ // Short address
+ assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1");
+ // Long address
+ assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127");
+
+ // Test padding
+ assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 ");
+ assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1");
+}
+
+#[test]
+fn ipv6_addr_to_string() {
+ // ipv4-mapped address
+ let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
+ assert_eq!(a1.to_string(), "::ffff:192.0.2.128");
+
+ // ipv4-compatible address
+ let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
+ assert_eq!(a1.to_string(), "::192.0.2.128");
+
+ // v6 address with no zero segments
+ assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
+
+ // longest possible IPv6 length
+ assert_eq!(
+ Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(),
+ "1111:2222:3333:4444:5555:6666:7777:8888"
+ );
+ // padding
+ assert_eq!(&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 ");
+ assert_eq!(&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8");
+
+ // reduce a single run of zeros
+ assert_eq!(
+ "ae::ffff:102:304",
+ Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string()
+ );
+
+ // don't reduce just a single zero segment
+ assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string());
+
+ // 'any' address
+ assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string());
+
+ // loopback address
+ assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string());
+
+ // ends in zeros
+ assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string());
+
+ // two runs of zeros, second one is longer
+ assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string());
+
+ // two runs of zeros, equal length
+ assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string());
+
+ // don't prefix `0x` to each segment in `dbg!`.
+ assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8)));
+}
+
+#[test]
+fn ipv4_to_ipv6() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678),
+ Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped()
+ );
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678),
+ Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible()
+ );
+}
+
+#[test]
+fn ipv6_to_ipv4_mapped() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None);
+}
+
+#[test]
+fn ipv6_to_ipv4() {
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(
+ Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(),
+ Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
+ );
+ assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None);
+}
+
+#[test]
+fn ip_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ IpAddr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr) => {
+ check!($s, 0);
+ };
+
+ ($s:expr, $mask:expr) => {{
+ let unspec: u8 = 1 << 0;
+ let loopback: u8 = 1 << 1;
+ let global: u8 = 1 << 2;
+ let multicast: u8 = 1 << 3;
+ let doc: u8 = 1 << 4;
+ let benchmarking: u8 = 1 << 5;
+
+ if ($mask & unspec) == unspec {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+
+ if ($mask & multicast) == multicast {
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(!ip!($s).is_multicast());
+ }
+
+ if ($mask & doc) == doc {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+ }};
+ }
+
+ let unspec: u8 = 1 << 0;
+ let loopback: u8 = 1 << 1;
+ let global: u8 = 1 << 2;
+ let multicast: u8 = 1 << 3;
+ let doc: u8 = 1 << 4;
+ let benchmarking: u8 = 1 << 5;
+
+ check!("0.0.0.0", unspec);
+ check!("0.0.0.1");
+ check!("0.1.0.0");
+ check!("10.9.8.7");
+ check!("127.1.2.3", loopback);
+ check!("172.31.254.253");
+ check!("169.254.253.242");
+ check!("192.0.2.183", doc);
+ check!("192.1.2.183", global);
+ check!("192.168.254.253");
+ check!("198.51.100.0", doc);
+ check!("203.0.113.0", doc);
+ check!("203.2.113.0", global);
+ check!("224.0.0.0", global | multicast);
+ check!("239.255.255.255", global | multicast);
+ check!("255.255.255.255");
+ // make sure benchmarking addresses are not global
+ check!("198.18.0.0", benchmarking);
+ check!("198.18.54.2", benchmarking);
+ check!("198.19.255.255", benchmarking);
+ // make sure addresses reserved for protocol assignment are not global
+ check!("192.0.0.0");
+ check!("192.0.0.255");
+ check!("192.0.0.100");
+ // make sure reserved addresses are not global
+ check!("240.0.0.0");
+ check!("251.54.1.76");
+ check!("254.255.255.255");
+ // make sure shared addresses are not global
+ check!("100.64.0.0");
+ check!("100.127.255.255");
+ check!("100.100.100.0");
+
+ check!("::", unspec);
+ check!("::1", loopback);
+ check!("::0.0.0.2", global);
+ check!("1::", global);
+ check!("fc00::");
+ check!("fdff:ffff::");
+ check!("fe80:ffff::");
+ check!("febf:ffff::");
+ check!("fec0::", global);
+ check!("ff01::", multicast);
+ check!("ff02::", multicast);
+ check!("ff03::", multicast);
+ check!("ff04::", multicast);
+ check!("ff05::", multicast);
+ check!("ff08::", multicast);
+ check!("ff0e::", global | multicast);
+ check!("2001:db8:85a3::8a2e:370:7334", doc);
+ check!("2001:2::ac32:23ff:21", global | benchmarking);
+ check!("102:304:506:708:90a:b0c:d0e:f10", global);
+}
+
+#[test]
+fn ipv4_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ Ipv4Addr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr) => {
+ check!($s, 0);
+ };
+
+ ($s:expr, $mask:expr) => {{
+ let unspec: u16 = 1 << 0;
+ let loopback: u16 = 1 << 1;
+ let private: u16 = 1 << 2;
+ let link_local: u16 = 1 << 3;
+ let global: u16 = 1 << 4;
+ let multicast: u16 = 1 << 5;
+ let broadcast: u16 = 1 << 6;
+ let documentation: u16 = 1 << 7;
+ let benchmarking: u16 = 1 << 8;
+ let reserved: u16 = 1 << 10;
+ let shared: u16 = 1 << 11;
+
+ if ($mask & unspec) == unspec {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+
+ if ($mask & private) == private {
+ assert!(ip!($s).is_private());
+ } else {
+ assert!(!ip!($s).is_private());
+ }
+
+ if ($mask & link_local) == link_local {
+ assert!(ip!($s).is_link_local());
+ } else {
+ assert!(!ip!($s).is_link_local());
+ }
+
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+
+ if ($mask & multicast) == multicast {
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(!ip!($s).is_multicast());
+ }
+
+ if ($mask & broadcast) == broadcast {
+ assert!(ip!($s).is_broadcast());
+ } else {
+ assert!(!ip!($s).is_broadcast());
+ }
+
+ if ($mask & documentation) == documentation {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+
+ if ($mask & reserved) == reserved {
+ assert!(ip!($s).is_reserved());
+ } else {
+ assert!(!ip!($s).is_reserved());
+ }
+
+ if ($mask & shared) == shared {
+ assert!(ip!($s).is_shared());
+ } else {
+ assert!(!ip!($s).is_shared());
+ }
+ }};
+ }
+
+ let unspec: u16 = 1 << 0;
+ let loopback: u16 = 1 << 1;
+ let private: u16 = 1 << 2;
+ let link_local: u16 = 1 << 3;
+ let global: u16 = 1 << 4;
+ let multicast: u16 = 1 << 5;
+ let broadcast: u16 = 1 << 6;
+ let documentation: u16 = 1 << 7;
+ let benchmarking: u16 = 1 << 8;
+ let reserved: u16 = 1 << 10;
+ let shared: u16 = 1 << 11;
+
+ check!("0.0.0.0", unspec);
+ check!("0.0.0.1");
+ check!("0.1.0.0");
+ check!("10.9.8.7", private);
+ check!("127.1.2.3", loopback);
+ check!("172.31.254.253", private);
+ check!("169.254.253.242", link_local);
+ check!("192.0.2.183", documentation);
+ check!("192.1.2.183", global);
+ check!("192.168.254.253", private);
+ check!("198.51.100.0", documentation);
+ check!("203.0.113.0", documentation);
+ check!("203.2.113.0", global);
+ check!("224.0.0.0", global | multicast);
+ check!("239.255.255.255", global | multicast);
+ check!("255.255.255.255", broadcast);
+ check!("198.18.0.0", benchmarking);
+ check!("198.18.54.2", benchmarking);
+ check!("198.19.255.255", benchmarking);
+ check!("192.0.0.0");
+ check!("192.0.0.255");
+ check!("192.0.0.100");
+ check!("240.0.0.0", reserved);
+ check!("251.54.1.76", reserved);
+ check!("254.255.255.255", reserved);
+ check!("100.64.0.0", shared);
+ check!("100.127.255.255", shared);
+ check!("100.100.100.0", shared);
+}
+
+#[test]
+fn ipv6_properties() {
+ macro_rules! ip {
+ ($s:expr) => {
+ Ipv6Addr::from_str($s).unwrap()
+ };
+ }
+
+ macro_rules! check {
+ ($s:expr, &[$($octet:expr),*], $mask:expr) => {
+ assert_eq!($s, ip!($s).to_string());
+ let octets = &[$($octet),*];
+ assert_eq!(&ip!($s).octets(), octets);
+ assert_eq!(Ipv6Addr::from(*octets), ip!($s));
+
+ let unspecified: u32 = 1 << 0;
+ let loopback: u32 = 1 << 1;
+ let unique_local: u32 = 1 << 2;
+ let global: u32 = 1 << 3;
+ let unicast_link_local: u32 = 1 << 4;
+ let unicast_global: u32 = 1 << 7;
+ let documentation: u32 = 1 << 8;
+ let benchmarking: u32 = 1 << 16;
+ let multicast_interface_local: u32 = 1 << 9;
+ let multicast_link_local: u32 = 1 << 10;
+ let multicast_realm_local: u32 = 1 << 11;
+ let multicast_admin_local: u32 = 1 << 12;
+ let multicast_site_local: u32 = 1 << 13;
+ let multicast_organization_local: u32 = 1 << 14;
+ let multicast_global: u32 = 1 << 15;
+ let multicast: u32 = multicast_interface_local
+ | multicast_admin_local
+ | multicast_global
+ | multicast_link_local
+ | multicast_realm_local
+ | multicast_site_local
+ | multicast_organization_local;
+
+ if ($mask & unspecified) == unspecified {
+ assert!(ip!($s).is_unspecified());
+ } else {
+ assert!(!ip!($s).is_unspecified());
+ }
+ if ($mask & loopback) == loopback {
+ assert!(ip!($s).is_loopback());
+ } else {
+ assert!(!ip!($s).is_loopback());
+ }
+ if ($mask & unique_local) == unique_local {
+ assert!(ip!($s).is_unique_local());
+ } else {
+ assert!(!ip!($s).is_unique_local());
+ }
+ if ($mask & global) == global {
+ assert!(ip!($s).is_global());
+ } else {
+ assert!(!ip!($s).is_global());
+ }
+ if ($mask & unicast_link_local) == unicast_link_local {
+ assert!(ip!($s).is_unicast_link_local());
+ } else {
+ assert!(!ip!($s).is_unicast_link_local());
+ }
+ if ($mask & unicast_global) == unicast_global {
+ assert!(ip!($s).is_unicast_global());
+ } else {
+ assert!(!ip!($s).is_unicast_global());
+ }
+ if ($mask & documentation) == documentation {
+ assert!(ip!($s).is_documentation());
+ } else {
+ assert!(!ip!($s).is_documentation());
+ }
+ if ($mask & benchmarking) == benchmarking {
+ assert!(ip!($s).is_benchmarking());
+ } else {
+ assert!(!ip!($s).is_benchmarking());
+ }
+ if ($mask & multicast) != 0 {
+ assert!(ip!($s).multicast_scope().is_some());
+ assert!(ip!($s).is_multicast());
+ } else {
+ assert!(ip!($s).multicast_scope().is_none());
+ assert!(!ip!($s).is_multicast());
+ }
+ if ($mask & multicast_interface_local) == multicast_interface_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::InterfaceLocal);
+ }
+ if ($mask & multicast_link_local) == multicast_link_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::LinkLocal);
+ }
+ if ($mask & multicast_realm_local) == multicast_realm_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::RealmLocal);
+ }
+ if ($mask & multicast_admin_local) == multicast_admin_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::AdminLocal);
+ }
+ if ($mask & multicast_site_local) == multicast_site_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::SiteLocal);
+ }
+ if ($mask & multicast_organization_local) == multicast_organization_local {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::OrganizationLocal);
+ }
+ if ($mask & multicast_global) == multicast_global {
+ assert_eq!(ip!($s).multicast_scope().unwrap(),
+ Ipv6MulticastScope::Global);
+ }
+ }
+ }
+
+ let unspecified: u32 = 1 << 0;
+ let loopback: u32 = 1 << 1;
+ let unique_local: u32 = 1 << 2;
+ let global: u32 = 1 << 3;
+ let unicast_link_local: u32 = 1 << 4;
+ let unicast_global: u32 = 1 << 7;
+ let documentation: u32 = 1 << 8;
+ let benchmarking: u32 = 1 << 16;
+ let multicast_interface_local: u32 = 1 << 9;
+ let multicast_link_local: u32 = 1 << 10;
+ let multicast_realm_local: u32 = 1 << 11;
+ let multicast_admin_local: u32 = 1 << 12;
+ let multicast_site_local: u32 = 1 << 13;
+ let multicast_organization_local: u32 = 1 << 14;
+ let multicast_global: u32 = 1 << 15;
+
+ check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
+
+ check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
+
+ check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global);
+
+ check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
+
+ check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local);
+
+ check!(
+ "fdff:ffff::",
+ &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unique_local
+ );
+
+ check!(
+ "fe80:ffff::",
+ &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
+
+ check!(
+ "febf:ffff::",
+ &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local);
+
+ check!(
+ "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+ &[
+ 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff
+ ],
+ unicast_link_local
+ );
+
+ check!(
+ "fe80::ffff:ffff:ffff:ffff",
+ &[
+ 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff
+ ],
+ unicast_link_local
+ );
+
+ check!(
+ "fe80:0:0:1::",
+ &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_link_local
+ );
+
+ check!(
+ "fec0::",
+ &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ unicast_global | global
+ );
+
+ check!(
+ "ff01::",
+ &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_interface_local
+ );
+
+ check!("ff02::", &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_link_local);
+
+ check!("ff03::", &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_realm_local);
+
+ check!("ff04::", &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_admin_local);
+
+ check!("ff05::", &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], multicast_site_local);
+
+ check!(
+ "ff08::",
+ &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_organization_local
+ );
+
+ check!(
+ "ff0e::",
+ &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ multicast_global | global
+ );
+
+ check!(
+ "2001:db8:85a3::8a2e:370:7334",
+ &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
+ documentation
+ );
+
+ check!(
+ "2001:2::ac32:23ff:21",
+ &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21],
+ global | unicast_global | benchmarking
+ );
+
+ check!(
+ "102:304:506:708:90a:b0c:d0e:f10",
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ global | unicast_global
+ );
+}
+
+#[test]
+fn to_socket_addr_socketaddr() {
+ let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345);
+ assert_eq!(Ok(vec![a]), tsa(a));
+}
+
+#[test]
+fn test_ipv4_to_int() {
+ let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
+ assert_eq!(u32::from(a), 0x11223344);
+}
+
+#[test]
+fn test_int_to_ipv4() {
+ let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
+ assert_eq!(Ipv4Addr::from(0x11223344), a);
+}
+
+#[test]
+fn test_ipv6_to_int() {
+ let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
+ assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128);
+}
+
+#[test]
+fn test_int_to_ipv6() {
+ let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
+ assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a);
+}
+
+#[test]
+fn ipv4_from_constructors() {
+ assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1));
+ assert!(Ipv4Addr::LOCALHOST.is_loopback());
+ assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0));
+ assert!(Ipv4Addr::UNSPECIFIED.is_unspecified());
+ assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255));
+ assert!(Ipv4Addr::BROADCAST.is_broadcast());
+}
+
+#[test]
+fn ipv6_from_constructors() {
+ assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+ assert!(Ipv6Addr::LOCALHOST.is_loopback());
+ assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
+ assert!(Ipv6Addr::UNSPECIFIED.is_unspecified());
+}
+
+#[test]
+fn ipv4_from_octets() {
+ assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
+}
+
+#[test]
+fn ipv6_from_segments() {
+ let from_u16s =
+ Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
+ let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff);
+ assert_eq!(new, from_u16s);
+}
+
+#[test]
+fn ipv6_from_octets() {
+ let from_u16s =
+ Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
+ let from_u8s = Ipv6Addr::from([
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff,
+ ]);
+ assert_eq!(from_u16s, from_u8s);
+}
+
+#[test]
+fn cmp() {
+ let v41 = Ipv4Addr::new(100, 64, 3, 3);
+ let v42 = Ipv4Addr::new(192, 0, 2, 2);
+ let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap();
+ let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap();
+ assert!(v41 < v42);
+ assert!(v61 < v62);
+
+ assert_eq!(v41, IpAddr::V4(v41));
+ assert_eq!(v61, IpAddr::V6(v61));
+ assert!(v41 != IpAddr::V4(v42));
+ assert!(v61 != IpAddr::V6(v62));
+
+ assert!(v41 < IpAddr::V4(v42));
+ assert!(v61 < IpAddr::V6(v62));
+ assert!(IpAddr::V4(v41) < v42);
+ assert!(IpAddr::V6(v61) < v62);
+
+ assert!(v41 < IpAddr::V6(v61));
+ assert!(IpAddr::V4(v41) < v61);
+}
+
+#[test]
+fn is_v4() {
+ let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3));
+ assert!(ip.is_ipv4());
+ assert!(!ip.is_ipv6());
+}
+
+#[test]
+fn is_v6() {
+ let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678));
+ assert!(!ip.is_ipv4());
+ assert!(ip.is_ipv6());
+}
+
+#[test]
+fn ipv4_const() {
+ // test that the methods of `Ipv4Addr` are usable in a const context
+
+ const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1);
+ assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST);
+
+ const OCTETS: [u8; 4] = IP_ADDRESS.octets();
+ assert_eq!(OCTETS, [127, 0, 0, 1]);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_PRIVATE: bool = IP_ADDRESS.is_private();
+ assert!(!IS_PRIVATE);
+
+ const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local();
+ assert!(!IS_LINK_LOCAL);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_SHARED: bool = IP_ADDRESS.is_shared();
+ assert!(!IS_SHARED);
+
+ const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
+ assert!(!IS_BENCHMARKING);
+
+ const IS_RESERVED: bool = IP_ADDRESS.is_reserved();
+ assert!(!IS_RESERVED);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast();
+ assert!(!IS_BROADCAST);
+
+ const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
+ assert!(!IS_DOCUMENTATION);
+
+ const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible();
+ assert_eq!(
+ IP_V6_COMPATIBLE,
+ Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1])
+ );
+
+ const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped();
+ assert_eq!(
+ IP_V6_MAPPED,
+ Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1])
+ );
+}
+
+#[test]
+fn ipv6_const() {
+ // test that the methods of `Ipv6Addr` are usable in a const context
+
+ const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST);
+
+ const SEGMENTS: [u16; 8] = IP_ADDRESS.segments();
+ assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]);
+
+ const OCTETS: [u8; 16] = IP_ADDRESS.octets();
+ assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local();
+ assert!(!IS_UNIQUE_LOCAL);
+
+ const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local();
+ assert!(!IS_UNICAST_LINK_LOCAL);
+
+ const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation();
+ assert!(!IS_DOCUMENTATION);
+
+ const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking();
+ assert!(!IS_BENCHMARKING);
+
+ const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global();
+ assert!(!IS_UNICAST_GLOBAL);
+
+ const MULTICAST_SCOPE: Option<Ipv6MulticastScope> = IP_ADDRESS.multicast_scope();
+ assert_eq!(MULTICAST_SCOPE, None);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IP_V4: Option<Ipv4Addr> = IP_ADDRESS.to_ipv4();
+ assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1));
+}
+
+#[test]
+fn ip_const() {
+ // test that the methods of `IpAddr` are usable in a const context
+
+ const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
+
+ const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified();
+ assert!(!IS_UNSPECIFIED);
+
+ const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback();
+ assert!(IS_LOOPBACK);
+
+ const IS_GLOBAL: bool = IP_ADDRESS.is_global();
+ assert!(!IS_GLOBAL);
+
+ const IS_MULTICAST: bool = IP_ADDRESS.is_multicast();
+ assert!(!IS_MULTICAST);
+
+ const IS_IP_V4: bool = IP_ADDRESS.is_ipv4();
+ assert!(IS_IP_V4);
+
+ const IS_IP_V6: bool = IP_ADDRESS.is_ipv6();
+ assert!(!IS_IP_V6);
+}
+
+#[test]
+fn structural_match() {
+ // test that all IP types can be structurally matched upon
+
+ const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST;
+ match IPV4 {
+ Ipv4Addr::LOCALHOST => {}
+ _ => unreachable!(),
+ }
+
+ const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST;
+ match IPV6 {
+ Ipv6Addr::LOCALHOST => {}
+ _ => unreachable!(),
+ }
+
+ const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);
+ match IP {
+ IpAddr::V4(Ipv4Addr::LOCALHOST) => {}
+ _ => unreachable!(),
+ }
+}
diff --git a/library/std/src/net/mod.rs b/library/std/src/net/mod.rs
new file mode 100644
index 000000000..e7a40bdaf
--- /dev/null
+++ b/library/std/src/net/mod.rs
@@ -0,0 +1,90 @@
+//! Networking primitives for TCP/UDP communication.
+//!
+//! This module provides networking functionality for the Transmission Control and User
+//! Datagram Protocols, as well as types for IP and socket addresses.
+//!
+//! # Organization
+//!
+//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP
+//! * [`UdpSocket`] provides functionality for communication over UDP
+//! * [`IpAddr`] represents IP addresses of either IPv4 or IPv6; [`Ipv4Addr`] and
+//! [`Ipv6Addr`] are respectively IPv4 and IPv6 addresses
+//! * [`SocketAddr`] represents socket addresses of either IPv4 or IPv6; [`SocketAddrV4`]
+//! and [`SocketAddrV6`] are respectively IPv4 and IPv6 socket addresses
+//! * [`ToSocketAddrs`] is a trait that used for generic address resolution when interacting
+//! with networking objects like [`TcpListener`], [`TcpStream`] or [`UdpSocket`]
+//! * Other types are return or parameter types for various methods in this module
+//!
+//! Rust disables inheritance of socket objects to child processes by default when possible. For
+//! example, through the use of the `CLOEXEC` flag in UNIX systems or the `HANDLE_FLAG_INHERIT`
+//! flag on Windows.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::io::{self, ErrorKind};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::addr::{SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::ip::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::parser::AddrParseError;
+#[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
+pub use self::tcp::IntoIncoming;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::tcp::{Incoming, TcpListener, TcpStream};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::udp::UdpSocket;
+
+mod addr;
+mod ip;
+mod parser;
+mod tcp;
+#[cfg(test)]
+mod test;
+mod udp;
+
+/// Possible values which can be passed to the [`TcpStream::shutdown`] method.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Shutdown {
+ /// The reading portion of the [`TcpStream`] should be shut down.
+ ///
+ /// All currently blocked and future [reads] will return <code>[Ok]\(0)</code>.
+ ///
+ /// [reads]: crate::io::Read "io::Read"
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Read,
+ /// The writing portion of the [`TcpStream`] should be shut down.
+ ///
+ /// All currently blocked and future [writes] will return an error.
+ ///
+ /// [writes]: crate::io::Write "io::Write"
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Write,
+ /// Both the reading and the writing portions of the [`TcpStream`] should be shut down.
+ ///
+ /// See [`Shutdown::Read`] and [`Shutdown::Write`] for more information.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Both,
+}
+
+fn each_addr<A: ToSocketAddrs, F, T>(addr: A, mut f: F) -> io::Result<T>
+where
+ F: FnMut(io::Result<&SocketAddr>) -> io::Result<T>,
+{
+ let addrs = match addr.to_socket_addrs() {
+ Ok(addrs) => addrs,
+ Err(e) => return f(Err(e)),
+ };
+ let mut last_err = None;
+ for addr in addrs {
+ match f(Ok(&addr)) {
+ Ok(l) => return Ok(l),
+ Err(e) => last_err = Some(e),
+ }
+ }
+ Err(last_err.unwrap_or_else(|| {
+ io::const_io_error!(ErrorKind::InvalidInput, "could not resolve to any addresses")
+ }))
+}
diff --git a/library/std/src/net/parser.rs b/library/std/src/net/parser.rs
new file mode 100644
index 000000000..069b66099
--- /dev/null
+++ b/library/std/src/net/parser.rs
@@ -0,0 +1,388 @@
+//! A private parser implementation of IPv4, IPv6, and socket addresses.
+//!
+//! This module is "publicly exported" through the `FromStr` implementations
+//! below.
+
+#[cfg(test)]
+mod tests;
+
+use crate::error::Error;
+use crate::fmt;
+use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+use crate::str::FromStr;
+
+trait ReadNumberHelper: crate::marker::Sized {
+ const ZERO: Self;
+ fn checked_mul(&self, other: u32) -> Option<Self>;
+ fn checked_add(&self, other: u32) -> Option<Self>;
+}
+
+macro_rules! impl_helper {
+ ($($t:ty)*) => ($(impl ReadNumberHelper for $t {
+ const ZERO: Self = 0;
+ #[inline]
+ fn checked_mul(&self, other: u32) -> Option<Self> {
+ Self::checked_mul(*self, other.try_into().ok()?)
+ }
+ #[inline]
+ fn checked_add(&self, other: u32) -> Option<Self> {
+ Self::checked_add(*self, other.try_into().ok()?)
+ }
+ })*)
+}
+
+impl_helper! { u8 u16 u32 }
+
+struct Parser<'a> {
+ // Parsing as ASCII, so can use byte array.
+ state: &'a [u8],
+}
+
+impl<'a> Parser<'a> {
+ fn new(input: &'a str) -> Parser<'a> {
+ Parser { state: input.as_bytes() }
+ }
+
+ /// Run a parser, and restore the pre-parse state if it fails.
+ fn read_atomically<T, F>(&mut self, inner: F) -> Option<T>
+ where
+ F: FnOnce(&mut Parser<'_>) -> Option<T>,
+ {
+ let state = self.state;
+ let result = inner(self);
+ if result.is_none() {
+ self.state = state;
+ }
+ result
+ }
+
+ /// Run a parser, but fail if the entire input wasn't consumed.
+ /// Doesn't run atomically.
+ fn parse_with<T, F>(&mut self, inner: F, kind: AddrKind) -> Result<T, AddrParseError>
+ where
+ F: FnOnce(&mut Parser<'_>) -> Option<T>,
+ {
+ let result = inner(self);
+ if self.state.is_empty() { result } else { None }.ok_or(AddrParseError(kind))
+ }
+
+ /// Peek the next character from the input
+ fn peek_char(&self) -> Option<char> {
+ self.state.first().map(|&b| char::from(b))
+ }
+
+ /// Read the next character from the input
+ fn read_char(&mut self) -> Option<char> {
+ self.state.split_first().map(|(&b, tail)| {
+ self.state = tail;
+ char::from(b)
+ })
+ }
+
+ #[must_use]
+ /// Read the next character from the input if it matches the target.
+ fn read_given_char(&mut self, target: char) -> Option<()> {
+ self.read_atomically(|p| {
+ p.read_char().and_then(|c| if c == target { Some(()) } else { None })
+ })
+ }
+
+ /// Helper for reading separators in an indexed loop. Reads the separator
+ /// character iff index > 0, then runs the parser. When used in a loop,
+ /// the separator character will only be read on index > 0 (see
+ /// read_ipv4_addr for an example)
+ fn read_separator<T, F>(&mut self, sep: char, index: usize, inner: F) -> Option<T>
+ where
+ F: FnOnce(&mut Parser<'_>) -> Option<T>,
+ {
+ self.read_atomically(move |p| {
+ if index > 0 {
+ p.read_given_char(sep)?;
+ }
+ inner(p)
+ })
+ }
+
+ // Read a number off the front of the input in the given radix, stopping
+ // at the first non-digit character or eof. Fails if the number has more
+ // digits than max_digits or if there is no number.
+ fn read_number<T: ReadNumberHelper>(
+ &mut self,
+ radix: u32,
+ max_digits: Option<usize>,
+ allow_zero_prefix: bool,
+ ) -> Option<T> {
+ self.read_atomically(move |p| {
+ let mut result = T::ZERO;
+ let mut digit_count = 0;
+ let has_leading_zero = p.peek_char() == Some('0');
+
+ while let Some(digit) = p.read_atomically(|p| p.read_char()?.to_digit(radix)) {
+ result = result.checked_mul(radix)?;
+ result = result.checked_add(digit)?;
+ digit_count += 1;
+ if let Some(max_digits) = max_digits {
+ if digit_count > max_digits {
+ return None;
+ }
+ }
+ }
+
+ if digit_count == 0 {
+ None
+ } else if !allow_zero_prefix && has_leading_zero && digit_count > 1 {
+ None
+ } else {
+ Some(result)
+ }
+ })
+ }
+
+ /// Read an IPv4 address.
+ fn read_ipv4_addr(&mut self) -> Option<Ipv4Addr> {
+ self.read_atomically(|p| {
+ let mut groups = [0; 4];
+
+ for (i, slot) in groups.iter_mut().enumerate() {
+ *slot = p.read_separator('.', i, |p| {
+ // Disallow octal number in IP string.
+ // https://tools.ietf.org/html/rfc6943#section-3.1.1
+ p.read_number(10, Some(3), false)
+ })?;
+ }
+
+ Some(groups.into())
+ })
+ }
+
+ /// Read an IPv6 Address.
+ fn read_ipv6_addr(&mut self) -> Option<Ipv6Addr> {
+ /// Read a chunk of an IPv6 address into `groups`. Returns the number
+ /// of groups read, along with a bool indicating if an embedded
+ /// trailing IPv4 address was read. Specifically, read a series of
+ /// colon-separated IPv6 groups (0x0000 - 0xFFFF), with an optional
+ /// trailing embedded IPv4 address.
+ fn read_groups(p: &mut Parser<'_>, groups: &mut [u16]) -> (usize, bool) {
+ let limit = groups.len();
+
+ for (i, slot) in groups.iter_mut().enumerate() {
+ // Try to read a trailing embedded IPv4 address. There must be
+ // at least two groups left.
+ if i < limit - 1 {
+ let ipv4 = p.read_separator(':', i, |p| p.read_ipv4_addr());
+
+ if let Some(v4_addr) = ipv4 {
+ let [one, two, three, four] = v4_addr.octets();
+ groups[i + 0] = u16::from_be_bytes([one, two]);
+ groups[i + 1] = u16::from_be_bytes([three, four]);
+ return (i + 2, true);
+ }
+ }
+
+ let group = p.read_separator(':', i, |p| p.read_number(16, Some(4), true));
+
+ match group {
+ Some(g) => *slot = g,
+ None => return (i, false),
+ }
+ }
+ (groups.len(), false)
+ }
+
+ self.read_atomically(|p| {
+ // Read the front part of the address; either the whole thing, or up
+ // to the first ::
+ let mut head = [0; 8];
+ let (head_size, head_ipv4) = read_groups(p, &mut head);
+
+ if head_size == 8 {
+ return Some(head.into());
+ }
+
+ // IPv4 part is not allowed before `::`
+ if head_ipv4 {
+ return None;
+ }
+
+ // Read `::` if previous code parsed less than 8 groups.
+ // `::` indicates one or more groups of 16 bits of zeros.
+ p.read_given_char(':')?;
+ p.read_given_char(':')?;
+
+ // Read the back part of the address. The :: must contain at least one
+ // set of zeroes, so our max length is 7.
+ let mut tail = [0; 7];
+ let limit = 8 - (head_size + 1);
+ let (tail_size, _) = read_groups(p, &mut tail[..limit]);
+
+ // Concat the head and tail of the IP address
+ head[(8 - tail_size)..8].copy_from_slice(&tail[..tail_size]);
+
+ Some(head.into())
+ })
+ }
+
+ /// Read an IP Address, either IPv4 or IPv6.
+ fn read_ip_addr(&mut self) -> Option<IpAddr> {
+ self.read_ipv4_addr().map(IpAddr::V4).or_else(move || self.read_ipv6_addr().map(IpAddr::V6))
+ }
+
+ /// Read a `:` followed by a port in base 10.
+ fn read_port(&mut self) -> Option<u16> {
+ self.read_atomically(|p| {
+ p.read_given_char(':')?;
+ p.read_number(10, None, true)
+ })
+ }
+
+ /// Read a `%` followed by a scope ID in base 10.
+ fn read_scope_id(&mut self) -> Option<u32> {
+ self.read_atomically(|p| {
+ p.read_given_char('%')?;
+ p.read_number(10, None, true)
+ })
+ }
+
+ /// Read an IPv4 address with a port.
+ fn read_socket_addr_v4(&mut self) -> Option<SocketAddrV4> {
+ self.read_atomically(|p| {
+ let ip = p.read_ipv4_addr()?;
+ let port = p.read_port()?;
+ Some(SocketAddrV4::new(ip, port))
+ })
+ }
+
+ /// Read an IPv6 address with a port.
+ fn read_socket_addr_v6(&mut self) -> Option<SocketAddrV6> {
+ self.read_atomically(|p| {
+ p.read_given_char('[')?;
+ let ip = p.read_ipv6_addr()?;
+ let scope_id = p.read_scope_id().unwrap_or(0);
+ p.read_given_char(']')?;
+
+ let port = p.read_port()?;
+ Some(SocketAddrV6::new(ip, port, 0, scope_id))
+ })
+ }
+
+ /// Read an IP address with a port
+ fn read_socket_addr(&mut self) -> Option<SocketAddr> {
+ self.read_socket_addr_v4()
+ .map(SocketAddr::V4)
+ .or_else(|| self.read_socket_addr_v6().map(SocketAddr::V6))
+ }
+}
+
+#[stable(feature = "ip_addr", since = "1.7.0")]
+impl FromStr for IpAddr {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
+ Parser::new(s).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for Ipv4Addr {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<Ipv4Addr, AddrParseError> {
+ // don't try to parse if too long
+ if s.len() > 15 {
+ Err(AddrParseError(AddrKind::Ipv4))
+ } else {
+ Parser::new(s).parse_with(|p| p.read_ipv4_addr(), AddrKind::Ipv4)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for Ipv6Addr {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<Ipv6Addr, AddrParseError> {
+ Parser::new(s).parse_with(|p| p.read_ipv6_addr(), AddrKind::Ipv6)
+ }
+}
+
+#[stable(feature = "socket_addr_from_str", since = "1.5.0")]
+impl FromStr for SocketAddrV4 {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<SocketAddrV4, AddrParseError> {
+ Parser::new(s).parse_with(|p| p.read_socket_addr_v4(), AddrKind::SocketV4)
+ }
+}
+
+#[stable(feature = "socket_addr_from_str", since = "1.5.0")]
+impl FromStr for SocketAddrV6 {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<SocketAddrV6, AddrParseError> {
+ Parser::new(s).parse_with(|p| p.read_socket_addr_v6(), AddrKind::SocketV6)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for SocketAddr {
+ type Err = AddrParseError;
+ fn from_str(s: &str) -> Result<SocketAddr, AddrParseError> {
+ Parser::new(s).parse_with(|p| p.read_socket_addr(), AddrKind::Socket)
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum AddrKind {
+ Ip,
+ Ipv4,
+ Ipv6,
+ Socket,
+ SocketV4,
+ SocketV6,
+}
+
+/// An error which can be returned when parsing an IP address or a socket address.
+///
+/// This error is used as the error type for the [`FromStr`] implementation for
+/// [`IpAddr`], [`Ipv4Addr`], [`Ipv6Addr`], [`SocketAddr`], [`SocketAddrV4`], and
+/// [`SocketAddrV6`].
+///
+/// # Potential causes
+///
+/// `AddrParseError` may be thrown because the provided string does not parse as the given type,
+/// often because it includes information only handled by a different address type.
+///
+/// ```should_panic
+/// use std::net::IpAddr;
+/// let _foo: IpAddr = "127.0.0.1:8080".parse().expect("Cannot handle the socket port");
+/// ```
+///
+/// [`IpAddr`] doesn't handle the port. Use [`SocketAddr`] instead.
+///
+/// ```
+/// use std::net::SocketAddr;
+///
+/// // No problem, the `panic!` message has disappeared.
+/// let _foo: SocketAddr = "127.0.0.1:8080".parse().expect("unreachable panic");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct AddrParseError(AddrKind);
+
+#[stable(feature = "addr_parse_error_error", since = "1.4.0")]
+impl fmt::Display for AddrParseError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.write_str(self.description())
+ }
+}
+
+#[stable(feature = "addr_parse_error_error", since = "1.4.0")]
+impl Error for AddrParseError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ match self.0 {
+ AddrKind::Ip => "invalid IP address syntax",
+ AddrKind::Ipv4 => "invalid IPv4 address syntax",
+ AddrKind::Ipv6 => "invalid IPv6 address syntax",
+ AddrKind::Socket => "invalid socket address syntax",
+ AddrKind::SocketV4 => "invalid IPv4 socket address syntax",
+ AddrKind::SocketV6 => "invalid IPv6 socket address syntax",
+ }
+ }
+}
diff --git a/library/std/src/net/parser/tests.rs b/library/std/src/net/parser/tests.rs
new file mode 100644
index 000000000..6d2d48eca
--- /dev/null
+++ b/library/std/src/net/parser/tests.rs
@@ -0,0 +1,149 @@
+// FIXME: These tests are all excellent candidates for AFL fuzz testing
+use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+use crate::str::FromStr;
+
+const PORT: u16 = 8080;
+const SCOPE_ID: u32 = 1337;
+
+const IPV4: Ipv4Addr = Ipv4Addr::new(192, 168, 0, 1);
+const IPV4_STR: &str = "192.168.0.1";
+const IPV4_STR_PORT: &str = "192.168.0.1:8080";
+const IPV4_STR_WITH_OCTAL: &str = "0127.0.0.1";
+const IPV4_STR_WITH_HEX: &str = "0x10.0.0.1";
+
+const IPV6: Ipv6Addr = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0xc0a8, 0x1);
+const IPV6_STR_FULL: &str = "2001:db8:0:0:0:0:c0a8:1";
+const IPV6_STR_COMPRESS: &str = "2001:db8::c0a8:1";
+const IPV6_STR_V4: &str = "2001:db8::192.168.0.1";
+const IPV6_STR_V4_WITH_OCTAL: &str = "2001:db8::0127.0.0.1";
+const IPV6_STR_V4_WITH_HEX: &str = "2001:db8::0x10.0.0.1";
+const IPV6_STR_PORT: &str = "[2001:db8::c0a8:1]:8080";
+const IPV6_STR_PORT_SCOPE_ID: &str = "[2001:db8::c0a8:1%1337]:8080";
+
+#[test]
+fn parse_ipv4() {
+ let result: Ipv4Addr = IPV4_STR.parse().unwrap();
+ assert_eq!(result, IPV4);
+
+ assert!(Ipv4Addr::from_str(IPV4_STR_PORT).is_err());
+ assert!(Ipv4Addr::from_str(IPV4_STR_WITH_OCTAL).is_err());
+ assert!(Ipv4Addr::from_str(IPV4_STR_WITH_HEX).is_err());
+ assert!(Ipv4Addr::from_str(IPV6_STR_FULL).is_err());
+ assert!(Ipv4Addr::from_str(IPV6_STR_COMPRESS).is_err());
+ assert!(Ipv4Addr::from_str(IPV6_STR_V4).is_err());
+ assert!(Ipv4Addr::from_str(IPV6_STR_PORT).is_err());
+}
+
+#[test]
+fn parse_ipv6() {
+ let result: Ipv6Addr = IPV6_STR_FULL.parse().unwrap();
+ assert_eq!(result, IPV6);
+
+ let result: Ipv6Addr = IPV6_STR_COMPRESS.parse().unwrap();
+ assert_eq!(result, IPV6);
+
+ let result: Ipv6Addr = IPV6_STR_V4.parse().unwrap();
+ assert_eq!(result, IPV6);
+
+ assert!(Ipv6Addr::from_str(IPV6_STR_V4_WITH_OCTAL).is_err());
+ assert!(Ipv6Addr::from_str(IPV6_STR_V4_WITH_HEX).is_err());
+ assert!(Ipv6Addr::from_str(IPV4_STR).is_err());
+ assert!(Ipv6Addr::from_str(IPV4_STR_PORT).is_err());
+ assert!(Ipv6Addr::from_str(IPV6_STR_PORT).is_err());
+}
+
+#[test]
+fn parse_ip() {
+ let result: IpAddr = IPV4_STR.parse().unwrap();
+ assert_eq!(result, IpAddr::from(IPV4));
+
+ let result: IpAddr = IPV6_STR_FULL.parse().unwrap();
+ assert_eq!(result, IpAddr::from(IPV6));
+
+ let result: IpAddr = IPV6_STR_COMPRESS.parse().unwrap();
+ assert_eq!(result, IpAddr::from(IPV6));
+
+ let result: IpAddr = IPV6_STR_V4.parse().unwrap();
+ assert_eq!(result, IpAddr::from(IPV6));
+
+ assert!(IpAddr::from_str(IPV4_STR_PORT).is_err());
+ assert!(IpAddr::from_str(IPV6_STR_PORT).is_err());
+}
+
+#[test]
+fn parse_socket_v4() {
+ let result: SocketAddrV4 = IPV4_STR_PORT.parse().unwrap();
+ assert_eq!(result, SocketAddrV4::new(IPV4, PORT));
+
+ assert!(SocketAddrV4::from_str(IPV4_STR).is_err());
+ assert!(SocketAddrV4::from_str(IPV6_STR_FULL).is_err());
+ assert!(SocketAddrV4::from_str(IPV6_STR_COMPRESS).is_err());
+ assert!(SocketAddrV4::from_str(IPV6_STR_V4).is_err());
+ assert!(SocketAddrV4::from_str(IPV6_STR_PORT).is_err());
+}
+
+#[test]
+fn parse_socket_v6() {
+ assert_eq!(IPV6_STR_PORT.parse(), Ok(SocketAddrV6::new(IPV6, PORT, 0, 0)));
+ assert_eq!(IPV6_STR_PORT_SCOPE_ID.parse(), Ok(SocketAddrV6::new(IPV6, PORT, 0, SCOPE_ID)));
+
+ assert!(SocketAddrV6::from_str(IPV4_STR).is_err());
+ assert!(SocketAddrV6::from_str(IPV4_STR_PORT).is_err());
+ assert!(SocketAddrV6::from_str(IPV6_STR_FULL).is_err());
+ assert!(SocketAddrV6::from_str(IPV6_STR_COMPRESS).is_err());
+ assert!(SocketAddrV6::from_str(IPV6_STR_V4).is_err());
+}
+
+#[test]
+fn parse_socket() {
+ let result: SocketAddr = IPV4_STR_PORT.parse().unwrap();
+ assert_eq!(result, SocketAddr::from((IPV4, PORT)));
+
+ let result: SocketAddr = IPV6_STR_PORT.parse().unwrap();
+ assert_eq!(result, SocketAddr::from((IPV6, PORT)));
+
+ assert!(SocketAddr::from_str(IPV4_STR).is_err());
+ assert!(SocketAddr::from_str(IPV6_STR_FULL).is_err());
+ assert!(SocketAddr::from_str(IPV6_STR_COMPRESS).is_err());
+ assert!(SocketAddr::from_str(IPV6_STR_V4).is_err());
+}
+
+#[test]
+fn ipv6_corner_cases() {
+ let result: Ipv6Addr = "1::".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0));
+
+ let result: Ipv6Addr = "1:1::".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(1, 1, 0, 0, 0, 0, 0, 0));
+
+ let result: Ipv6Addr = "::1".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
+
+ let result: Ipv6Addr = "::1:1".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 1, 1));
+
+ let result: Ipv6Addr = "::".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
+
+ let result: Ipv6Addr = "::192.168.0.1".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc0a8, 0x1));
+
+ let result: Ipv6Addr = "::1:192.168.0.1".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 1, 0xc0a8, 0x1));
+
+ let result: Ipv6Addr = "1:1:1:1:1:1:192.168.0.1".parse().unwrap();
+ assert_eq!(result, Ipv6Addr::new(1, 1, 1, 1, 1, 1, 0xc0a8, 0x1));
+}
+
+// Things that might not seem like failures but are
+#[test]
+fn ipv6_corner_failures() {
+ // No IP address before the ::
+ assert!(Ipv6Addr::from_str("1:192.168.0.1::").is_err());
+
+ // :: must have at least 1 set of zeroes
+ assert!(Ipv6Addr::from_str("1:1:1:1::1:1:1:1").is_err());
+
+ // Need brackets for a port
+ assert!(SocketAddrV6::from_str("1:1:1:1:1:1:1:1:8080").is_err());
+}
diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs
new file mode 100644
index 000000000..69b72a81c
--- /dev/null
+++ b/library/std/src/net/tcp.rs
@@ -0,0 +1,1050 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::io::prelude::*;
+
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::iter::FusedIterator;
+use crate::net::{Shutdown, SocketAddr, ToSocketAddrs};
+use crate::sys_common::net as net_imp;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::Duration;
+
+/// A TCP stream between a local and a remote socket.
+///
+/// After creating a `TcpStream` by either [`connect`]ing to a remote host or
+/// [`accept`]ing a connection on a [`TcpListener`], data can be transmitted
+/// by [reading] and [writing] to it.
+///
+/// The connection will be closed when the value is dropped. The reading and writing
+/// portions of the connection can also be shut down individually with the [`shutdown`]
+/// method.
+///
+/// The Transmission Control Protocol is specified in [IETF RFC 793].
+///
+/// [`accept`]: TcpListener::accept
+/// [`connect`]: TcpStream::connect
+/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793
+/// [reading]: Read
+/// [`shutdown`]: TcpStream::shutdown
+/// [writing]: Write
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::io::prelude::*;
+/// use std::net::TcpStream;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut stream = TcpStream::connect("127.0.0.1:34254")?;
+///
+/// stream.write(&[1])?;
+/// stream.read(&mut [0; 128])?;
+/// Ok(())
+/// } // the stream is closed here
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct TcpStream(net_imp::TcpStream);
+
+/// A TCP socket server, listening for connections.
+///
+/// After creating a `TcpListener` by [`bind`]ing it to a socket address, it listens
+/// for incoming TCP connections. These can be accepted by calling [`accept`] or by
+/// iterating over the [`Incoming`] iterator returned by [`incoming`][`TcpListener::incoming`].
+///
+/// The socket will be closed when the value is dropped.
+///
+/// The Transmission Control Protocol is specified in [IETF RFC 793].
+///
+/// [`accept`]: TcpListener::accept
+/// [`bind`]: TcpListener::bind
+/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::net::{TcpListener, TcpStream};
+///
+/// fn handle_client(stream: TcpStream) {
+/// // ...
+/// }
+///
+/// fn main() -> std::io::Result<()> {
+/// let listener = TcpListener::bind("127.0.0.1:80")?;
+///
+/// // accept connections and process them serially
+/// for stream in listener.incoming() {
+/// handle_client(stream?);
+/// }
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct TcpListener(net_imp::TcpListener);
+
+/// An iterator that infinitely [`accept`]s connections on a [`TcpListener`].
+///
+/// This `struct` is created by the [`TcpListener::incoming`] method.
+/// See its documentation for more.
+///
+/// [`accept`]: TcpListener::accept
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Incoming<'a> {
+ listener: &'a TcpListener,
+}
+
+/// An iterator that infinitely [`accept`]s connections on a [`TcpListener`].
+///
+/// This `struct` is created by the [`TcpListener::into_incoming`] method.
+/// See its documentation for more.
+///
+/// [`accept`]: TcpListener::accept
+#[derive(Debug)]
+#[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
+pub struct IntoIncoming {
+ listener: TcpListener,
+}
+
+impl TcpStream {
+ /// Opens a TCP connection to a remote host.
+ ///
+ /// `addr` is an address of the remote host. Anything which implements
+ /// [`ToSocketAddrs`] trait can be supplied for the address; see this trait
+ /// documentation for concrete examples.
+ ///
+ /// If `addr` yields multiple addresses, `connect` will be attempted with
+ /// each of the addresses until a connection is successful. If none of
+ /// the addresses result in a successful connection, the error returned from
+ /// the last connection attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// Open a TCP connection to `127.0.0.1:8080`:
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// if let Ok(stream) = TcpStream::connect("127.0.0.1:8080") {
+ /// println!("Connected to the server!");
+ /// } else {
+ /// println!("Couldn't connect to server...");
+ /// }
+ /// ```
+ ///
+ /// Open a TCP connection to `127.0.0.1:8080`. If the connection fails, open
+ /// a TCP connection to `127.0.0.1:8081`:
+ ///
+ /// ```no_run
+ /// use std::net::{SocketAddr, TcpStream};
+ ///
+ /// let addrs = [
+ /// SocketAddr::from(([127, 0, 0, 1], 8080)),
+ /// SocketAddr::from(([127, 0, 0, 1], 8081)),
+ /// ];
+ /// if let Ok(stream) = TcpStream::connect(&addrs[..]) {
+ /// println!("Connected to the server!");
+ /// } else {
+ /// println!("Couldn't connect to server...");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
+ super::each_addr(addr, net_imp::TcpStream::connect).map(TcpStream)
+ }
+
+ /// Opens a TCP connection to a remote host with a timeout.
+ ///
+ /// Unlike `connect`, `connect_timeout` takes a single [`SocketAddr`] since
+ /// timeout must be applied to individual addresses.
+ ///
+ /// It is an error to pass a zero `Duration` to this function.
+ ///
+ /// Unlike other methods on `TcpStream`, this does not correspond to a
+ /// single system call. It instead calls `connect` in nonblocking mode and
+ /// then uses an OS-specific mechanism to await the completion of the
+ /// connection request.
+ #[stable(feature = "tcpstream_connect_timeout", since = "1.21.0")]
+ pub fn connect_timeout(addr: &SocketAddr, timeout: Duration) -> io::Result<TcpStream> {
+ net_imp::TcpStream::connect_timeout(addr, timeout).map(TcpStream)
+ }
+
+ /// Returns the socket address of the remote peer of this TCP connection.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, TcpStream};
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// assert_eq!(stream.peer_addr().unwrap(),
+ /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080)));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0.peer_addr()
+ }
+
+ /// Returns the socket address of the local half of this TCP connection.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{IpAddr, Ipv4Addr, TcpStream};
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// assert_eq!(stream.local_addr().unwrap().ip(),
+ /// IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.0.socket_addr()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of [`Shutdown`]).
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Calling this function multiple times may result in different behavior,
+ /// depending on the operating system. On Linux, the second call will
+ /// return `Ok(())`, but on macOS, it will return `ErrorKind::NotConnected`.
+ /// This may change in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{Shutdown, TcpStream};
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.shutdown(Shutdown::Both).expect("shutdown call failed");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.0.shutdown(how)
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `TcpStream` is a reference to the same stream that this
+ /// object references. Both handles will read and write the same stream of
+ /// data, and options set on one stream will be propagated to the other
+ /// stream.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// let stream_clone = stream.try_clone().expect("clone failed...");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_clone(&self) -> io::Result<TcpStream> {
+ self.0.duplicate().map(TcpStream)
+ }
+
+ /// Sets the read timeout to the timeout specified.
+ ///
+ /// If the value specified is [`None`], then [`read`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
+ /// passed to this method.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Platforms may return a different error code whenever a read times out as
+ /// a result of setting this option. For example Unix typically returns an
+ /// error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
+ ///
+ /// [`read`]: Read::read
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ /// [`TimedOut`]: io::ErrorKind::TimedOut
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_read_timeout(None).expect("set_read_timeout call failed");
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080").unwrap();
+ /// let result = stream.set_read_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput)
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.0.set_read_timeout(dur)
+ }
+
+ /// Sets the write timeout to the timeout specified.
+ ///
+ /// If the value specified is [`None`], then [`write`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
+ /// passed to this method.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Platforms may return a different error code whenever a write times out
+ /// as a result of setting this option. For example Unix typically returns
+ /// an error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
+ ///
+ /// [`write`]: Write::write
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ /// [`TimedOut`]: io::ErrorKind::TimedOut
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_write_timeout(None).expect("set_write_timeout call failed");
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080").unwrap();
+ /// let result = stream.set_write_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput)
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.0.set_write_timeout(dur)
+ }
+
+ /// Returns the read timeout of this socket.
+ ///
+ /// If the timeout is [`None`], then [`read`] calls will block indefinitely.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Some platforms do not provide access to the current timeout.
+ ///
+ /// [`read`]: Read::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_read_timeout(None).expect("set_read_timeout call failed");
+ /// assert_eq!(stream.read_timeout().unwrap(), None);
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.read_timeout()
+ }
+
+ /// Returns the write timeout of this socket.
+ ///
+ /// If the timeout is [`None`], then [`write`] calls will block indefinitely.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Some platforms do not provide access to the current timeout.
+ ///
+ /// [`write`]: Write::write
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_write_timeout(None).expect("set_write_timeout call failed");
+ /// assert_eq!(stream.write_timeout().unwrap(), None);
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.write_timeout()
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8000")
+ /// .expect("Couldn't connect to the server...");
+ /// let mut buf = [0; 10];
+ /// let len = stream.peek(&mut buf).expect("peek failed");
+ /// ```
+ #[stable(feature = "peek", since = "1.18.0")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
+ /// Sets the value of the `SO_LINGER` option on this socket.
+ ///
+ /// This value controls how the socket is closed when data remains
+ /// to be sent. If `SO_LINGER` is set, the socket will remain open
+ /// for the specified duration as the system attempts to send pending data.
+ /// Otherwise, the system may close the socket immediately, or wait for a
+ /// default timeout.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(tcp_linger)]
+ ///
+ /// use std::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_linger(Some(Duration::from_secs(0))).expect("set_linger call failed");
+ /// ```
+ #[unstable(feature = "tcp_linger", issue = "88494")]
+ pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+ self.0.set_linger(linger)
+ }
+
+ /// Gets the value of the `SO_LINGER` option on this socket.
+ ///
+ /// For more information about this option, see [`TcpStream::set_linger`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(tcp_linger)]
+ ///
+ /// use std::net::TcpStream;
+ /// use std::time::Duration;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_linger(Some(Duration::from_secs(0))).expect("set_linger call failed");
+ /// assert_eq!(stream.linger().unwrap(), Some(Duration::from_secs(0)));
+ /// ```
+ #[unstable(feature = "tcp_linger", issue = "88494")]
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.0.linger()
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_nodelay(true).expect("set_nodelay call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.0.set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`TcpStream::set_nodelay`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_nodelay(true).expect("set_nodelay call failed");
+ /// assert_eq!(stream.nodelay().unwrap_or(false), true);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.0.nodelay()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_ttl(100).expect("set_ttl call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.0.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`TcpStream::set_ttl`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_ttl(100).expect("set_ttl call failed");
+ /// assert_eq!(stream.ttl().unwrap_or(0), 100);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0.ttl()
+ }
+
+ /// Gets the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8080")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.take_error().expect("No error was expected...");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Moves this TCP stream into or out of nonblocking mode.
+ ///
+ /// This will result in `read`, `write`, `recv` and `send` operations
+ /// becoming nonblocking, i.e., immediately returning from their calls.
+ /// If the IO operation is successful, `Ok` is returned and no further
+ /// action is required. If the IO operation could not be completed and needs
+ /// to be retried, an error with kind [`io::ErrorKind::WouldBlock`] is
+ /// returned.
+ ///
+ /// On Unix platforms, calling this method corresponds to calling `fcntl`
+ /// `FIONBIO`. On Windows calling this method corresponds to calling
+ /// `ioctlsocket` `FIONBIO`.
+ ///
+ /// # Examples
+ ///
+ /// Reading bytes from a TCP stream in non-blocking mode:
+ ///
+ /// ```no_run
+ /// use std::io::{self, Read};
+ /// use std::net::TcpStream;
+ ///
+ /// let mut stream = TcpStream::connect("127.0.0.1:7878")
+ /// .expect("Couldn't connect to the server...");
+ /// stream.set_nonblocking(true).expect("set_nonblocking call failed");
+ ///
+ /// # fn wait_for_fd() { unimplemented!() }
+ /// let mut buf = vec![];
+ /// loop {
+ /// match stream.read_to_end(&mut buf) {
+ /// Ok(_) => break,
+ /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ /// // wait until network socket is ready, typically implemented
+ /// // via platform-specific APIs such as epoll or IOCP
+ /// wait_for_fd();
+ /// }
+ /// Err(e) => panic!("encountered IO error: {e}"),
+ /// };
+ /// };
+ /// println!("bytes: {buf:?}");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+}
+
+// In addition to the `impl`s here, `TcpStream` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsSocket`/`From<OwnedSocket>`/`Into<OwnedSocket>` and
+// `AsRawSocket`/`IntoRawSocket`/`FromRawSocket` on Windows.
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Read for &TcpStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for &TcpStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl AsInner<net_imp::TcpStream> for TcpStream {
+ fn as_inner(&self) -> &net_imp::TcpStream {
+ &self.0
+ }
+}
+
+impl FromInner<net_imp::TcpStream> for TcpStream {
+ fn from_inner(inner: net_imp::TcpStream) -> TcpStream {
+ TcpStream(inner)
+ }
+}
+
+impl IntoInner<net_imp::TcpStream> for TcpStream {
+ fn into_inner(self) -> net_imp::TcpStream {
+ self.0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl TcpListener {
+ /// Creates a new `TcpListener` which will be bound to the specified
+ /// address.
+ ///
+ /// The returned listener is ready for accepting connections.
+ ///
+ /// Binding with a port number of 0 will request that the OS assigns a port
+ /// to this listener. The port allocated can be queried via the
+ /// [`TcpListener::local_addr`] method.
+ ///
+ /// The address type can be any implementor of [`ToSocketAddrs`] trait. See
+ /// its documentation for concrete examples.
+ ///
+ /// If `addr` yields multiple addresses, `bind` will be attempted with
+ /// each of the addresses until one succeeds and returns the listener. If
+ /// none of the addresses succeed in creating a listener, the error returned
+ /// from the last attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// Creates a TCP listener bound to `127.0.0.1:80`:
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// ```
+ ///
+ /// Creates a TCP listener bound to `127.0.0.1:80`. If that fails, create a
+ /// TCP listener bound to `127.0.0.1:443`:
+ ///
+ /// ```no_run
+ /// use std::net::{SocketAddr, TcpListener};
+ ///
+ /// let addrs = [
+ /// SocketAddr::from(([127, 0, 0, 1], 80)),
+ /// SocketAddr::from(([127, 0, 0, 1], 443)),
+ /// ];
+ /// let listener = TcpListener::bind(&addrs[..]).unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
+ super::each_addr(addr, net_imp::TcpListener::bind).map(TcpListener)
+ }
+
+ /// Returns the local socket address of this listener.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, TcpListener};
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:8080").unwrap();
+ /// assert_eq!(listener.local_addr().unwrap(),
+ /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080)));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.0.socket_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned [`TcpListener`] is a reference to the same socket that this
+ /// object references. Both handles can be used to accept incoming
+ /// connections and options set on one listener will affect the other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:8080").unwrap();
+ /// let listener_clone = listener.try_clone().unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_clone(&self) -> io::Result<TcpListener> {
+ self.0.duplicate().map(TcpListener)
+ }
+
+ /// Accept a new incoming connection from this listener.
+ ///
+ /// This function will block the calling thread until a new TCP connection
+ /// is established. When established, the corresponding [`TcpStream`] and the
+ /// remote peer's address will be returned.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:8080").unwrap();
+ /// match listener.accept() {
+ /// Ok((_socket, addr)) => println!("new client: {addr:?}"),
+ /// Err(e) => println!("couldn't get client: {e:?}"),
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ // On WASM, `TcpStream` is uninhabited (as it's unsupported) and so
+ // the `a` variable here is technically unused.
+ #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
+ self.0.accept().map(|(a, b)| (TcpStream(a), b))
+ }
+
+ /// Returns an iterator over the connections being received on this
+ /// listener.
+ ///
+ /// The returned iterator will never return [`None`] and will also not yield
+ /// the peer's [`SocketAddr`] structure. Iterating over it is equivalent to
+ /// calling [`TcpListener::accept`] in a loop.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{TcpListener, TcpStream};
+ ///
+ /// fn handle_connection(stream: TcpStream) {
+ /// //...
+ /// }
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ ///
+ /// for stream in listener.incoming() {
+ /// match stream {
+ /// Ok(stream) => {
+ /// handle_connection(stream);
+ /// }
+ /// Err(e) => { /* connection failed */ }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn incoming(&self) -> Incoming<'_> {
+ Incoming { listener: self }
+ }
+
+ /// Turn this into an iterator over the connections being received on this
+ /// listener.
+ ///
+ /// The returned iterator will never return [`None`] and will also not yield
+ /// the peer's [`SocketAddr`] structure. Iterating over it is equivalent to
+ /// calling [`TcpListener::accept`] in a loop.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(tcplistener_into_incoming)]
+ /// use std::net::{TcpListener, TcpStream};
+ ///
+ /// fn listen_on(port: u16) -> impl Iterator<Item = TcpStream> {
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// listener.into_incoming()
+ /// .filter_map(Result::ok) /* Ignore failed connections */
+ /// }
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// for stream in listen_on(80) {
+ /// /* handle the connection here */
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
+ pub fn into_incoming(self) -> IntoIncoming {
+ IntoIncoming { listener: self }
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// listener.set_ttl(100).expect("could not set TTL");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.0.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`TcpListener::set_ttl`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// listener.set_ttl(100).expect("could not set TTL");
+ /// assert_eq!(listener.ttl().unwrap_or(0), 100);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0.ttl()
+ }
+
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ #[deprecated(since = "1.16.0", note = "this option can only be set before the socket is bound")]
+ #[allow(missing_docs)]
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ self.0.set_only_v6(only_v6)
+ }
+
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ #[deprecated(since = "1.16.0", note = "this option can only be set before the socket is bound")]
+ #[allow(missing_docs)]
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.0.only_v6()
+ }
+
+ /// Gets the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// listener.take_error().expect("No error was expected");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Moves this TCP stream into or out of nonblocking mode.
+ ///
+ /// This will result in the `accept` operation becoming nonblocking,
+ /// i.e., immediately returning from their calls. If the IO operation is
+ /// successful, `Ok` is returned and no further action is required. If the
+ /// IO operation could not be completed and needs to be retried, an error
+ /// with kind [`io::ErrorKind::WouldBlock`] is returned.
+ ///
+ /// On Unix platforms, calling this method corresponds to calling `fcntl`
+ /// `FIONBIO`. On Windows calling this method corresponds to calling
+ /// `ioctlsocket` `FIONBIO`.
+ ///
+ /// # Examples
+ ///
+ /// Bind a TCP listener to an address, listen for connections, and read
+ /// bytes in nonblocking mode:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::TcpListener;
+ ///
+ /// let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
+ /// listener.set_nonblocking(true).expect("Cannot set non-blocking");
+ ///
+ /// # fn wait_for_fd() { unimplemented!() }
+ /// # fn handle_connection(stream: std::net::TcpStream) { unimplemented!() }
+ /// for stream in listener.incoming() {
+ /// match stream {
+ /// Ok(s) => {
+ /// // do something with the TcpStream
+ /// handle_connection(s);
+ /// }
+ /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ /// // wait until network socket is ready, typically implemented
+ /// // via platform-specific APIs such as epoll or IOCP
+ /// wait_for_fd();
+ /// continue;
+ /// }
+ /// Err(e) => panic!("encountered IO error: {e}"),
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+}
+
+// In addition to the `impl`s here, `TcpListener` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsSocket`/`From<OwnedSocket>`/`Into<OwnedSocket>` and
+// `AsRawSocket`/`IntoRawSocket`/`FromRawSocket` on Windows.
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Incoming<'a> {
+ type Item = io::Result<TcpStream>;
+ fn next(&mut self) -> Option<io::Result<TcpStream>> {
+ Some(self.listener.accept().map(|p| p.0))
+ }
+}
+
+#[stable(feature = "tcp_listener_incoming_fused_iterator", since = "1.64.0")]
+impl FusedIterator for Incoming<'_> {}
+
+#[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
+impl Iterator for IntoIncoming {
+ type Item = io::Result<TcpStream>;
+ fn next(&mut self) -> Option<io::Result<TcpStream>> {
+ Some(self.listener.accept().map(|p| p.0))
+ }
+}
+
+#[unstable(feature = "tcplistener_into_incoming", issue = "88339")]
+impl FusedIterator for IntoIncoming {}
+
+impl AsInner<net_imp::TcpListener> for TcpListener {
+ fn as_inner(&self) -> &net_imp::TcpListener {
+ &self.0
+ }
+}
+
+impl FromInner<net_imp::TcpListener> for TcpListener {
+ fn from_inner(inner: net_imp::TcpListener) -> TcpListener {
+ TcpListener(inner)
+ }
+}
+
+impl IntoInner<net_imp::TcpListener> for TcpListener {
+ fn into_inner(self) -> net_imp::TcpListener {
+ self.0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
diff --git a/library/std/src/net/tcp/tests.rs b/library/std/src/net/tcp/tests.rs
new file mode 100644
index 000000000..8c0adcfb0
--- /dev/null
+++ b/library/std/src/net/tcp/tests.rs
@@ -0,0 +1,876 @@
+use crate::fmt;
+use crate::io::prelude::*;
+use crate::io::{ErrorKind, IoSlice, IoSliceMut};
+use crate::net::test::{next_test_ip4, next_test_ip6};
+use crate::net::*;
+use crate::sync::mpsc::channel;
+use crate::thread;
+use crate::time::{Duration, Instant};
+
+fn each_ip(f: &mut dyn FnMut(SocketAddr)) {
+ f(next_test_ip4());
+ f(next_test_ip6());
+}
+
+macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(t) => t,
+ Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
+ }
+ };
+}
+
+#[test]
+fn bind_error() {
+ match TcpListener::bind("1.1.1.1:9999") {
+ Ok(..) => panic!(),
+ Err(e) => assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
+ }
+}
+
+#[test]
+fn connect_error() {
+ match TcpStream::connect("0.0.0.0:1") {
+ Ok(..) => panic!(),
+ Err(e) => assert!(
+ e.kind() == ErrorKind::ConnectionRefused
+ || e.kind() == ErrorKind::InvalidInput
+ || e.kind() == ErrorKind::AddrInUse
+ || e.kind() == ErrorKind::AddrNotAvailable,
+ "bad error: {} {:?}",
+ e,
+ e.kind()
+ ),
+ }
+}
+
+#[test]
+fn listen_localhost() {
+ let socket_addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&socket_addr));
+
+ let _t = thread::spawn(move || {
+ let mut stream = t!(TcpStream::connect(&("localhost", socket_addr.port())));
+ t!(stream.write(&[144]));
+ });
+
+ let mut stream = t!(listener.accept()).0;
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert!(buf[0] == 144);
+}
+
+#[test]
+fn connect_loopback() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ let host = match addr {
+ SocketAddr::V4(..) => "127.0.0.1",
+ SocketAddr::V6(..) => "::1",
+ };
+ let mut stream = t!(TcpStream::connect(&(host, addr.port())));
+ t!(stream.write(&[66]));
+ });
+
+ let mut stream = t!(acceptor.accept()).0;
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert!(buf[0] == 66);
+ })
+}
+
+#[test]
+fn smoke_test() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let mut stream = t!(TcpStream::connect(&addr));
+ t!(stream.write(&[99]));
+ tx.send(t!(stream.local_addr())).unwrap();
+ });
+
+ let (mut stream, addr) = t!(acceptor.accept());
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert!(buf[0] == 99);
+ assert_eq!(addr, t!(rx.recv()));
+ })
+}
+
+#[test]
+fn read_eof() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ let _stream = t!(TcpStream::connect(&addr));
+ // Close
+ });
+
+ let mut stream = t!(acceptor.accept()).0;
+ let mut buf = [0];
+ let nread = t!(stream.read(&mut buf));
+ assert_eq!(nread, 0);
+ let nread = t!(stream.read(&mut buf));
+ assert_eq!(nread, 0);
+ })
+}
+
+#[test]
+fn write_close() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ drop(t!(TcpStream::connect(&addr)));
+ tx.send(()).unwrap();
+ });
+
+ let mut stream = t!(acceptor.accept()).0;
+ rx.recv().unwrap();
+ let buf = [0];
+ match stream.write(&buf) {
+ Ok(..) => {}
+ Err(e) => {
+ assert!(
+ e.kind() == ErrorKind::ConnectionReset
+ || e.kind() == ErrorKind::BrokenPipe
+ || e.kind() == ErrorKind::ConnectionAborted,
+ "unknown error: {e}"
+ );
+ }
+ }
+ })
+}
+
+#[test]
+fn multiple_connect_serial() {
+ each_ip(&mut |addr| {
+ let max = 10;
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ for _ in 0..max {
+ let mut stream = t!(TcpStream::connect(&addr));
+ t!(stream.write(&[99]));
+ }
+ });
+
+ for stream in acceptor.incoming().take(max) {
+ let mut stream = t!(stream);
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert_eq!(buf[0], 99);
+ }
+ })
+}
+
+#[test]
+fn multiple_connect_interleaved_greedy_schedule() {
+ const MAX: usize = 10;
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ let acceptor = acceptor;
+ for (i, stream) in acceptor.incoming().enumerate().take(MAX) {
+ // Start another thread to handle the connection
+ let _t = thread::spawn(move || {
+ let mut stream = t!(stream);
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert!(buf[0] == i as u8);
+ });
+ }
+ });
+
+ connect(0, addr);
+ });
+
+ fn connect(i: usize, addr: SocketAddr) {
+ if i == MAX {
+ return;
+ }
+
+ let t = thread::spawn(move || {
+ let mut stream = t!(TcpStream::connect(&addr));
+ // Connect again before writing
+ connect(i + 1, addr);
+ t!(stream.write(&[i as u8]));
+ });
+ t.join().ok().expect("thread panicked");
+ }
+}
+
+#[test]
+fn multiple_connect_interleaved_lazy_schedule() {
+ const MAX: usize = 10;
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ for stream in acceptor.incoming().take(MAX) {
+ // Start another thread to handle the connection
+ let _t = thread::spawn(move || {
+ let mut stream = t!(stream);
+ let mut buf = [0];
+ t!(stream.read(&mut buf));
+ assert!(buf[0] == 99);
+ });
+ }
+ });
+
+ connect(0, addr);
+ });
+
+ fn connect(i: usize, addr: SocketAddr) {
+ if i == MAX {
+ return;
+ }
+
+ let t = thread::spawn(move || {
+ let mut stream = t!(TcpStream::connect(&addr));
+ connect(i + 1, addr);
+ t!(stream.write(&[99]));
+ });
+ t.join().ok().expect("thread panicked");
+ }
+}
+
+#[test]
+fn socket_and_peer_name() {
+ each_ip(&mut |addr| {
+ let listener = t!(TcpListener::bind(&addr));
+ let so_name = t!(listener.local_addr());
+ assert_eq!(addr, so_name);
+ let _t = thread::spawn(move || {
+ t!(listener.accept());
+ });
+
+ let stream = t!(TcpStream::connect(&addr));
+ assert_eq!(addr, t!(stream.peer_addr()));
+ })
+}
+
+#[test]
+fn partial_read() {
+ each_ip(&mut |addr| {
+ let (tx, rx) = channel();
+ let srv = t!(TcpListener::bind(&addr));
+ let _t = thread::spawn(move || {
+ let mut cl = t!(srv.accept()).0;
+ cl.write(&[10]).unwrap();
+ let mut b = [0];
+ t!(cl.read(&mut b));
+ tx.send(()).unwrap();
+ });
+
+ let mut c = t!(TcpStream::connect(&addr));
+ let mut b = [0; 10];
+ assert_eq!(c.read(&mut b).unwrap(), 1);
+ t!(c.write(&[1]));
+ rx.recv().unwrap();
+ })
+}
+
+#[test]
+fn read_vectored() {
+ each_ip(&mut |addr| {
+ let srv = t!(TcpListener::bind(&addr));
+ let mut s1 = t!(TcpStream::connect(&addr));
+ let mut s2 = t!(srv.accept()).0;
+
+ let len = s1.write(&[10, 11, 12]).unwrap();
+ assert_eq!(len, 3);
+
+ let mut a = [];
+ let mut b = [0];
+ let mut c = [0; 3];
+ let len = t!(s2.read_vectored(&mut [
+ IoSliceMut::new(&mut a),
+ IoSliceMut::new(&mut b),
+ IoSliceMut::new(&mut c)
+ ],));
+ assert!(len > 0);
+ assert_eq!(b, [10]);
+ // some implementations don't support readv, so we may only fill the first buffer
+ assert!(len == 1 || c == [11, 12, 0]);
+ })
+}
+
+#[test]
+fn write_vectored() {
+ each_ip(&mut |addr| {
+ let srv = t!(TcpListener::bind(&addr));
+ let mut s1 = t!(TcpStream::connect(&addr));
+ let mut s2 = t!(srv.accept()).0;
+
+ let a = [];
+ let b = [10];
+ let c = [11, 12];
+ t!(s1.write_vectored(&[IoSlice::new(&a), IoSlice::new(&b), IoSlice::new(&c)]));
+
+ let mut buf = [0; 4];
+ let len = t!(s2.read(&mut buf));
+ // some implementations don't support writev, so we may only write the first buffer
+ if len == 1 {
+ assert_eq!(buf, [10, 0, 0, 0]);
+ } else {
+ assert_eq!(len, 3);
+ assert_eq!(buf, [10, 11, 12, 0]);
+ }
+ })
+}
+
+#[test]
+fn double_bind() {
+ each_ip(&mut |addr| {
+ let listener1 = t!(TcpListener::bind(&addr));
+ match TcpListener::bind(&addr) {
+ Ok(listener2) => panic!(
+ "This system (perhaps due to options set by TcpListener::bind) \
+ permits double binding: {:?} and {:?}",
+ listener1, listener2
+ ),
+ Err(e) => {
+ assert!(
+ e.kind() == ErrorKind::ConnectionRefused
+ || e.kind() == ErrorKind::Uncategorized
+ || e.kind() == ErrorKind::AddrInUse,
+ "unknown error: {} {:?}",
+ e,
+ e.kind()
+ );
+ }
+ }
+ })
+}
+
+#[test]
+fn tcp_clone_smoke() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ let mut s = t!(TcpStream::connect(&addr));
+ let mut buf = [0, 0];
+ assert_eq!(s.read(&mut buf).unwrap(), 1);
+ assert_eq!(buf[0], 1);
+ t!(s.write(&[2]));
+ });
+
+ let mut s1 = t!(acceptor.accept()).0;
+ let s2 = t!(s1.try_clone());
+
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ let _t = thread::spawn(move || {
+ let mut s2 = s2;
+ rx1.recv().unwrap();
+ t!(s2.write(&[1]));
+ tx2.send(()).unwrap();
+ });
+ tx1.send(()).unwrap();
+ let mut buf = [0, 0];
+ assert_eq!(s1.read(&mut buf).unwrap(), 1);
+ rx2.recv().unwrap();
+ })
+}
+
+#[test]
+fn tcp_clone_two_read() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+ let (tx1, rx) = channel();
+ let tx2 = tx1.clone();
+
+ let _t = thread::spawn(move || {
+ let mut s = t!(TcpStream::connect(&addr));
+ t!(s.write(&[1]));
+ rx.recv().unwrap();
+ t!(s.write(&[2]));
+ rx.recv().unwrap();
+ });
+
+ let mut s1 = t!(acceptor.accept()).0;
+ let s2 = t!(s1.try_clone());
+
+ let (done, rx) = channel();
+ let _t = thread::spawn(move || {
+ let mut s2 = s2;
+ let mut buf = [0, 0];
+ t!(s2.read(&mut buf));
+ tx2.send(()).unwrap();
+ done.send(()).unwrap();
+ });
+ let mut buf = [0, 0];
+ t!(s1.read(&mut buf));
+ tx1.send(()).unwrap();
+
+ rx.recv().unwrap();
+ })
+}
+
+#[test]
+fn tcp_clone_two_write() {
+ each_ip(&mut |addr| {
+ let acceptor = t!(TcpListener::bind(&addr));
+
+ let _t = thread::spawn(move || {
+ let mut s = t!(TcpStream::connect(&addr));
+ let mut buf = [0, 1];
+ t!(s.read(&mut buf));
+ t!(s.read(&mut buf));
+ });
+
+ let mut s1 = t!(acceptor.accept()).0;
+ let s2 = t!(s1.try_clone());
+
+ let (done, rx) = channel();
+ let _t = thread::spawn(move || {
+ let mut s2 = s2;
+ t!(s2.write(&[1]));
+ done.send(()).unwrap();
+ });
+ t!(s1.write(&[2]));
+
+ rx.recv().unwrap();
+ })
+}
+
+#[test]
+// FIXME: https://github.com/fortanix/rust-sgx/issues/110
+#[cfg_attr(target_env = "sgx", ignore)]
+fn shutdown_smoke() {
+ each_ip(&mut |addr| {
+ let a = t!(TcpListener::bind(&addr));
+ let _t = thread::spawn(move || {
+ let mut c = t!(a.accept()).0;
+ let mut b = [0];
+ assert_eq!(c.read(&mut b).unwrap(), 0);
+ t!(c.write(&[1]));
+ });
+
+ let mut s = t!(TcpStream::connect(&addr));
+ t!(s.shutdown(Shutdown::Write));
+ assert!(s.write(&[1]).is_err());
+ let mut b = [0, 0];
+ assert_eq!(t!(s.read(&mut b)), 1);
+ assert_eq!(b[0], 1);
+ })
+}
+
+#[test]
+// FIXME: https://github.com/fortanix/rust-sgx/issues/110
+#[cfg_attr(target_env = "sgx", ignore)]
+fn close_readwrite_smoke() {
+ each_ip(&mut |addr| {
+ let a = t!(TcpListener::bind(&addr));
+ let (tx, rx) = channel::<()>();
+ let _t = thread::spawn(move || {
+ let _s = t!(a.accept());
+ let _ = rx.recv();
+ });
+
+ let mut b = [0];
+ let mut s = t!(TcpStream::connect(&addr));
+ let mut s2 = t!(s.try_clone());
+
+ // closing should prevent reads/writes
+ t!(s.shutdown(Shutdown::Write));
+ assert!(s.write(&[0]).is_err());
+ t!(s.shutdown(Shutdown::Read));
+ assert_eq!(s.read(&mut b).unwrap(), 0);
+
+ // closing should affect previous handles
+ assert!(s2.write(&[0]).is_err());
+ assert_eq!(s2.read(&mut b).unwrap(), 0);
+
+ // closing should affect new handles
+ let mut s3 = t!(s.try_clone());
+ assert!(s3.write(&[0]).is_err());
+ assert_eq!(s3.read(&mut b).unwrap(), 0);
+
+ // make sure these don't die
+ let _ = s2.shutdown(Shutdown::Read);
+ let _ = s2.shutdown(Shutdown::Write);
+ let _ = s3.shutdown(Shutdown::Read);
+ let _ = s3.shutdown(Shutdown::Write);
+ drop(tx);
+ })
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)]
+fn close_read_wakes_up() {
+ each_ip(&mut |addr| {
+ let a = t!(TcpListener::bind(&addr));
+ let (tx1, rx) = channel::<()>();
+ let _t = thread::spawn(move || {
+ let _s = t!(a.accept());
+ let _ = rx.recv();
+ });
+
+ let s = t!(TcpStream::connect(&addr));
+ let s2 = t!(s.try_clone());
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let mut s2 = s2;
+ assert_eq!(t!(s2.read(&mut [0])), 0);
+ tx.send(()).unwrap();
+ });
+ // this should wake up the child thread
+ t!(s.shutdown(Shutdown::Read));
+
+ // this test will never finish if the child doesn't wake up
+ rx.recv().unwrap();
+ drop(tx1);
+ })
+}
+
+#[test]
+fn clone_while_reading() {
+ each_ip(&mut |addr| {
+ let accept = t!(TcpListener::bind(&addr));
+
+ // Enqueue a thread to write to a socket
+ let (tx, rx) = channel();
+ let (txdone, rxdone) = channel();
+ let txdone2 = txdone.clone();
+ let _t = thread::spawn(move || {
+ let mut tcp = t!(TcpStream::connect(&addr));
+ rx.recv().unwrap();
+ t!(tcp.write(&[0]));
+ txdone2.send(()).unwrap();
+ });
+
+ // Spawn off a reading clone
+ let tcp = t!(accept.accept()).0;
+ let tcp2 = t!(tcp.try_clone());
+ let txdone3 = txdone.clone();
+ let _t = thread::spawn(move || {
+ let mut tcp2 = tcp2;
+ t!(tcp2.read(&mut [0]));
+ txdone3.send(()).unwrap();
+ });
+
+ // Try to ensure that the reading clone is indeed reading
+ for _ in 0..50 {
+ thread::yield_now();
+ }
+
+ // clone the handle again while it's reading, then let it finish the
+ // read.
+ let _ = t!(tcp.try_clone());
+ tx.send(()).unwrap();
+ rxdone.recv().unwrap();
+ rxdone.recv().unwrap();
+ })
+}
+
+#[test]
+fn clone_accept_smoke() {
+ each_ip(&mut |addr| {
+ let a = t!(TcpListener::bind(&addr));
+ let a2 = t!(a.try_clone());
+
+ let _t = thread::spawn(move || {
+ let _ = TcpStream::connect(&addr);
+ });
+ let _t = thread::spawn(move || {
+ let _ = TcpStream::connect(&addr);
+ });
+
+ t!(a.accept());
+ t!(a2.accept());
+ })
+}
+
+#[test]
+fn clone_accept_concurrent() {
+ each_ip(&mut |addr| {
+ let a = t!(TcpListener::bind(&addr));
+ let a2 = t!(a.try_clone());
+
+ let (tx, rx) = channel();
+ let tx2 = tx.clone();
+
+ let _t = thread::spawn(move || {
+ tx.send(t!(a.accept())).unwrap();
+ });
+ let _t = thread::spawn(move || {
+ tx2.send(t!(a2.accept())).unwrap();
+ });
+
+ let _t = thread::spawn(move || {
+ let _ = TcpStream::connect(&addr);
+ });
+ let _t = thread::spawn(move || {
+ let _ = TcpStream::connect(&addr);
+ });
+
+ rx.recv().unwrap();
+ rx.recv().unwrap();
+ })
+}
+
+#[test]
+fn debug() {
+ #[cfg(not(target_env = "sgx"))]
+ fn render_socket_addr<'a>(addr: &'a SocketAddr) -> impl fmt::Debug + 'a {
+ addr
+ }
+ #[cfg(target_env = "sgx")]
+ fn render_socket_addr<'a>(addr: &'a SocketAddr) -> impl fmt::Debug + 'a {
+ addr.to_string()
+ }
+
+ #[cfg(target_env = "sgx")]
+ use crate::os::fortanix_sgx::io::AsRawFd;
+ #[cfg(unix)]
+ use crate::os::unix::io::AsRawFd;
+ #[cfg(not(windows))]
+ fn render_inner(addr: &dyn AsRawFd) -> impl fmt::Debug {
+ addr.as_raw_fd()
+ }
+ #[cfg(windows)]
+ fn render_inner(addr: &dyn crate::os::windows::io::AsRawSocket) -> impl fmt::Debug {
+ addr.as_raw_socket()
+ }
+
+ let inner_name = if cfg!(windows) { "socket" } else { "fd" };
+ let socket_addr = next_test_ip4();
+
+ let listener = t!(TcpListener::bind(&socket_addr));
+ let compare = format!(
+ "TcpListener {{ addr: {:?}, {}: {:?} }}",
+ render_socket_addr(&socket_addr),
+ inner_name,
+ render_inner(&listener)
+ );
+ assert_eq!(format!("{listener:?}"), compare);
+
+ let stream = t!(TcpStream::connect(&("localhost", socket_addr.port())));
+ let compare = format!(
+ "TcpStream {{ addr: {:?}, peer: {:?}, {}: {:?} }}",
+ render_socket_addr(&stream.local_addr().unwrap()),
+ render_socket_addr(&stream.peer_addr().unwrap()),
+ inner_name,
+ render_inner(&stream)
+ );
+ assert_eq!(format!("{stream:?}"), compare);
+}
+
+// FIXME: re-enabled openbsd tests once their socket timeout code
+// no longer has rounding errors.
+// VxWorks ignores SO_SNDTIMEO.
+#[cfg_attr(any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks"), ignore)]
+#[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+#[test]
+fn timeouts() {
+ let addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&addr));
+
+ let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+ let dur = Duration::new(15410, 0);
+
+ assert_eq!(None, t!(stream.read_timeout()));
+
+ t!(stream.set_read_timeout(Some(dur)));
+ assert_eq!(Some(dur), t!(stream.read_timeout()));
+
+ assert_eq!(None, t!(stream.write_timeout()));
+
+ t!(stream.set_write_timeout(Some(dur)));
+ assert_eq!(Some(dur), t!(stream.write_timeout()));
+
+ t!(stream.set_read_timeout(None));
+ assert_eq!(None, t!(stream.read_timeout()));
+
+ t!(stream.set_write_timeout(None));
+ assert_eq!(None, t!(stream.write_timeout()));
+ drop(listener);
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+fn test_read_timeout() {
+ let addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&addr));
+
+ let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
+ t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ let mut buf = [0; 10];
+ let start = Instant::now();
+ let kind = stream.read_exact(&mut buf).err().expect("expected error").kind();
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+ assert!(start.elapsed() > Duration::from_millis(400));
+ drop(listener);
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+fn test_read_with_timeout() {
+ let addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&addr));
+
+ let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
+ t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ let mut other_end = t!(listener.accept()).0;
+ t!(other_end.write_all(b"hello world"));
+
+ let mut buf = [0; 11];
+ t!(stream.read(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+
+ let start = Instant::now();
+ let kind = stream.read_exact(&mut buf).err().expect("expected error").kind();
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+ assert!(start.elapsed() > Duration::from_millis(400));
+ drop(listener);
+}
+
+// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
+// when passed zero Durations
+#[test]
+fn test_timeout_zero_duration() {
+ let addr = next_test_ip4();
+
+ let listener = t!(TcpListener::bind(&addr));
+ let stream = t!(TcpStream::connect(&addr));
+
+ let result = stream.set_write_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ let result = stream.set_read_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ drop(listener);
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)]
+fn linger() {
+ let addr = next_test_ip4();
+ let _listener = t!(TcpListener::bind(&addr));
+
+ let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+ assert_eq!(None, t!(stream.linger()));
+ t!(stream.set_linger(Some(Duration::from_secs(1))));
+ assert_eq!(Some(Duration::from_secs(1)), t!(stream.linger()));
+ t!(stream.set_linger(None));
+ assert_eq!(None, t!(stream.linger()));
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)]
+fn nodelay() {
+ let addr = next_test_ip4();
+ let _listener = t!(TcpListener::bind(&addr));
+
+ let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+ assert_eq!(false, t!(stream.nodelay()));
+ t!(stream.set_nodelay(true));
+ assert_eq!(true, t!(stream.nodelay()));
+ t!(stream.set_nodelay(false));
+ assert_eq!(false, t!(stream.nodelay()));
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)]
+fn ttl() {
+ let ttl = 100;
+
+ let addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&addr));
+
+ t!(listener.set_ttl(ttl));
+ assert_eq!(ttl, t!(listener.ttl()));
+
+ let stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+ t!(stream.set_ttl(ttl));
+ assert_eq!(ttl, t!(stream.ttl()));
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)]
+fn set_nonblocking() {
+ let addr = next_test_ip4();
+ let listener = t!(TcpListener::bind(&addr));
+
+ t!(listener.set_nonblocking(true));
+ t!(listener.set_nonblocking(false));
+
+ let mut stream = t!(TcpStream::connect(&("localhost", addr.port())));
+
+ t!(stream.set_nonblocking(false));
+ t!(stream.set_nonblocking(true));
+
+ let mut buf = [0];
+ match stream.read(&mut buf) {
+ Ok(_) => panic!("expected error"),
+ Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error {e}"),
+ }
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+fn peek() {
+ each_ip(&mut |addr| {
+ let (txdone, rxdone) = channel();
+
+ let srv = t!(TcpListener::bind(&addr));
+ let _t = thread::spawn(move || {
+ let mut cl = t!(srv.accept()).0;
+ cl.write(&[1, 3, 3, 7]).unwrap();
+ t!(rxdone.recv());
+ });
+
+ let mut c = t!(TcpStream::connect(&addr));
+ let mut b = [0; 10];
+ for _ in 1..3 {
+ let len = c.peek(&mut b).unwrap();
+ assert_eq!(len, 4);
+ }
+ let len = c.read(&mut b).unwrap();
+ assert_eq!(len, 4);
+
+ t!(c.set_nonblocking(true));
+ match c.peek(&mut b) {
+ Ok(_) => panic!("expected error"),
+ Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error {e}"),
+ }
+ t!(txdone.send(()));
+ })
+}
+
+#[test]
+#[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+fn connect_timeout_valid() {
+ let listener = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+ TcpStream::connect_timeout(&addr, Duration::from_secs(2)).unwrap();
+}
diff --git a/library/std/src/net/test.rs b/library/std/src/net/test.rs
new file mode 100644
index 000000000..37937b5ea
--- /dev/null
+++ b/library/std/src/net/test.rs
@@ -0,0 +1,60 @@
+#![allow(warnings)] // not used on emscripten
+
+use crate::env;
+use crate::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
+use crate::sync::atomic::{AtomicUsize, Ordering};
+
+static PORT: AtomicUsize = AtomicUsize::new(0);
+
+pub fn next_test_ip4() -> SocketAddr {
+ let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();
+ SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port))
+}
+
+pub fn next_test_ip6() -> SocketAddr {
+ let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();
+ SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), port, 0, 0))
+}
+
+pub fn sa4(a: Ipv4Addr, p: u16) -> SocketAddr {
+ SocketAddr::V4(SocketAddrV4::new(a, p))
+}
+
+pub fn sa6(a: Ipv6Addr, p: u16) -> SocketAddr {
+ SocketAddr::V6(SocketAddrV6::new(a, p, 0, 0))
+}
+
+pub fn tsa<A: ToSocketAddrs>(a: A) -> Result<Vec<SocketAddr>, String> {
+ match a.to_socket_addrs() {
+ Ok(a) => Ok(a.collect()),
+ Err(e) => Err(e.to_string()),
+ }
+}
+
+// The bots run multiple builds at the same time, and these builds
+// all want to use ports. This function figures out which workspace
+// it is running in and assigns a port range based on it.
+fn base_port() -> u16 {
+ let cwd = if cfg!(target_env = "sgx") {
+ String::from("sgx")
+ } else {
+ env::current_dir().unwrap().into_os_string().into_string().unwrap()
+ };
+ let dirs = [
+ "32-opt",
+ "32-nopt",
+ "musl-64-opt",
+ "cross-opt",
+ "64-opt",
+ "64-nopt",
+ "64-opt-vg",
+ "64-debug-opt",
+ "all-opt",
+ "snap3",
+ "dist",
+ "sgx",
+ ];
+ dirs.iter().enumerate().find(|&(_, dir)| cwd.contains(dir)).map(|p| p.0).unwrap_or(0) as u16
+ * 1000
+ + 19600
+}
diff --git a/library/std/src/net/udp.rs b/library/std/src/net/udp.rs
new file mode 100644
index 000000000..864e1b0f3
--- /dev/null
+++ b/library/std/src/net/udp.rs
@@ -0,0 +1,813 @@
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+mod tests;
+
+use crate::fmt;
+use crate::io::{self, ErrorKind};
+use crate::net::{Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs};
+use crate::sys_common::net as net_imp;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::Duration;
+
+/// A UDP socket.
+///
+/// After creating a `UdpSocket` by [`bind`]ing it to a socket address, data can be
+/// [sent to] and [received from] any other socket address.
+///
+/// Although UDP is a connectionless protocol, this implementation provides an interface
+/// to set an address where data should be sent and received from. After setting a remote
+/// address with [`connect`], data can be sent to and received from that address with
+/// [`send`] and [`recv`].
+///
+/// As stated in the User Datagram Protocol's specification in [IETF RFC 768], UDP is
+/// an unordered, unreliable protocol; refer to [`TcpListener`] and [`TcpStream`] for TCP
+/// primitives.
+///
+/// [`bind`]: UdpSocket::bind
+/// [`connect`]: UdpSocket::connect
+/// [IETF RFC 768]: https://tools.ietf.org/html/rfc768
+/// [`recv`]: UdpSocket::recv
+/// [received from]: UdpSocket::recv_from
+/// [`send`]: UdpSocket::send
+/// [sent to]: UdpSocket::send_to
+/// [`TcpListener`]: crate::net::TcpListener
+/// [`TcpStream`]: crate::net::TcpStream
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::net::UdpSocket;
+///
+/// fn main() -> std::io::Result<()> {
+/// {
+/// let socket = UdpSocket::bind("127.0.0.1:34254")?;
+///
+/// // Receives a single datagram message on the socket. If `buf` is too small to hold
+/// // the message, it will be cut off.
+/// let mut buf = [0; 10];
+/// let (amt, src) = socket.recv_from(&mut buf)?;
+///
+/// // Redeclare `buf` as slice of the received data and send reverse data back to origin.
+/// let buf = &mut buf[..amt];
+/// buf.reverse();
+/// socket.send_to(buf, &src)?;
+/// } // the socket is closed here
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct UdpSocket(net_imp::UdpSocket);
+
+impl UdpSocket {
+ /// Creates a UDP socket from the given address.
+ ///
+ /// The address type can be any implementor of [`ToSocketAddrs`] trait. See
+ /// its documentation for concrete examples.
+ ///
+ /// If `addr` yields multiple addresses, `bind` will be attempted with
+ /// each of the addresses until one succeeds and returns the socket. If none
+ /// of the addresses succeed in creating a socket, the error returned from
+ /// the last attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// Creates a UDP socket bound to `127.0.0.1:3400`:
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:3400").expect("couldn't bind to address");
+ /// ```
+ ///
+ /// Creates a UDP socket bound to `127.0.0.1:3400`. If the socket cannot be
+ /// bound to that address, create a UDP socket bound to `127.0.0.1:3401`:
+ ///
+ /// ```no_run
+ /// use std::net::{SocketAddr, UdpSocket};
+ ///
+ /// let addrs = [
+ /// SocketAddr::from(([127, 0, 0, 1], 3400)),
+ /// SocketAddr::from(([127, 0, 0, 1], 3401)),
+ /// ];
+ /// let socket = UdpSocket::bind(&addrs[..]).expect("couldn't bind to address");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
+ super::each_addr(addr, net_imp::UdpSocket::bind).map(UdpSocket)
+ }
+
+ /// Receives a single datagram message on the socket. On success, returns the number
+ /// of bytes read and the origin.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// let mut buf = [0; 10];
+ /// let (number_of_bytes, src_addr) = socket.recv_from(&mut buf)
+ /// .expect("Didn't receive data");
+ /// let filled_buf = &mut buf[..number_of_bytes];
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0.recv_from(buf)
+ }
+
+ /// Receives a single datagram message on the socket, without removing it from the
+ /// queue. On success, returns the number of bytes read and the origin.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call.
+ ///
+ /// Do not use this function to implement busy waiting, instead use `libc::poll` to
+ /// synchronize IO events on one or more sockets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// let mut buf = [0; 10];
+ /// let (number_of_bytes, src_addr) = socket.peek_from(&mut buf)
+ /// .expect("Didn't receive data");
+ /// let filled_buf = &mut buf[..number_of_bytes];
+ /// ```
+ #[stable(feature = "peek", since = "1.18.0")]
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0.peek_from(buf)
+ }
+
+ /// Sends data on the socket to the given address. On success, returns the
+ /// number of bytes written.
+ ///
+ /// Address type can be any implementor of [`ToSocketAddrs`] trait. See its
+ /// documentation for concrete examples.
+ ///
+ /// It is possible for `addr` to yield multiple addresses, but `send_to`
+ /// will only send data to the first address yielded by `addr`.
+ ///
+ /// This will return an error when the IP version of the local socket
+ /// does not match that returned from [`ToSocketAddrs`].
+ ///
+ /// See [Issue #34202] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.send_to(&[0; 10], "127.0.0.1:4242").expect("couldn't send data");
+ /// ```
+ ///
+ /// [Issue #34202]: https://github.com/rust-lang/rust/issues/34202
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn send_to<A: ToSocketAddrs>(&self, buf: &[u8], addr: A) -> io::Result<usize> {
+ match addr.to_socket_addrs()?.next() {
+ Some(addr) => self.0.send_to(buf, &addr),
+ None => {
+ Err(io::const_io_error!(ErrorKind::InvalidInput, "no addresses to send data to"))
+ }
+ }
+ }
+
+ /// Returns the socket address of the remote peer this socket was connected to.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, UdpSocket};
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.connect("192.168.0.1:41203").expect("couldn't connect to address");
+ /// assert_eq!(socket.peer_addr().unwrap(),
+ /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 41203)));
+ /// ```
+ ///
+ /// If the socket isn't connected, it will return a [`NotConnected`] error.
+ ///
+ /// [`NotConnected`]: io::ErrorKind::NotConnected
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// assert_eq!(socket.peer_addr().unwrap_err().kind(),
+ /// std::io::ErrorKind::NotConnected);
+ /// ```
+ #[stable(feature = "udp_peer_addr", since = "1.40.0")]
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0.peer_addr()
+ }
+
+ /// Returns the socket address that this socket was created from.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, UdpSocket};
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// assert_eq!(socket.local_addr().unwrap(),
+ /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 34254)));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.0.socket_addr()
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UdpSocket` is a reference to the same socket that this
+ /// object references. Both handles will read and write the same port, and
+ /// options set on one socket will be propagated to the other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// let socket_clone = socket.try_clone().expect("couldn't clone the socket");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_clone(&self) -> io::Result<UdpSocket> {
+ self.0.duplicate().map(UdpSocket)
+ }
+
+ /// Sets the read timeout to the timeout specified.
+ ///
+ /// If the value specified is [`None`], then [`read`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
+ /// passed to this method.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Platforms may return a different error code whenever a read times out as
+ /// a result of setting this option. For example Unix typically returns an
+ /// error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
+ ///
+ /// [`read`]: io::Read::read
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ /// [`TimedOut`]: io::ErrorKind::TimedOut
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_read_timeout(None).expect("set_read_timeout call failed");
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::UdpSocket;
+ /// use std::time::Duration;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").unwrap();
+ /// let result = socket.set_read_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput)
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.0.set_read_timeout(dur)
+ }
+
+ /// Sets the write timeout to the timeout specified.
+ ///
+ /// If the value specified is [`None`], then [`write`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
+ /// passed to this method.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// Platforms may return a different error code whenever a write times out
+ /// as a result of setting this option. For example Unix typically returns
+ /// an error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
+ ///
+ /// [`write`]: io::Write::write
+ /// [`WouldBlock`]: io::ErrorKind::WouldBlock
+ /// [`TimedOut`]: io::ErrorKind::TimedOut
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_write_timeout(None).expect("set_write_timeout call failed");
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::UdpSocket;
+ /// use std::time::Duration;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").unwrap();
+ /// let result = socket.set_write_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput)
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.0.set_write_timeout(dur)
+ }
+
+ /// Returns the read timeout of this socket.
+ ///
+ /// If the timeout is [`None`], then [`read`] calls will block indefinitely.
+ ///
+ /// [`read`]: io::Read::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_read_timeout(None).expect("set_read_timeout call failed");
+ /// assert_eq!(socket.read_timeout().unwrap(), None);
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.read_timeout()
+ }
+
+ /// Returns the write timeout of this socket.
+ ///
+ /// If the timeout is [`None`], then [`write`] calls will block indefinitely.
+ ///
+ /// [`write`]: io::Write::write
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_write_timeout(None).expect("set_write_timeout call failed");
+ /// assert_eq!(socket.write_timeout().unwrap(), None);
+ /// ```
+ #[stable(feature = "socket_timeout", since = "1.4.0")]
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.write_timeout()
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_broadcast(false).expect("set_broadcast call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
+ self.0.set_broadcast(broadcast)
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see [`UdpSocket::set_broadcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_broadcast(false).expect("set_broadcast call failed");
+ /// assert_eq!(socket.broadcast().unwrap(), false);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.0.broadcast()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ /// Note that this might not have any effect on IPv6 sockets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_loop_v4(false).expect("set_multicast_loop_v4 call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> {
+ self.0.set_multicast_loop_v4(multicast_loop_v4)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see [`UdpSocket::set_multicast_loop_v4`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_loop_v4(false).expect("set_multicast_loop_v4 call failed");
+ /// assert_eq!(socket.multicast_loop_v4().unwrap(), false);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.0.multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// Note that this might not have any effect on IPv6 sockets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_ttl_v4(42).expect("set_multicast_ttl_v4 call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> {
+ self.0.set_multicast_ttl_v4(multicast_ttl_v4)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`UdpSocket::set_multicast_ttl_v4`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_ttl_v4(42).expect("set_multicast_ttl_v4 call failed");
+ /// assert_eq!(socket.multicast_ttl_v4().unwrap(), 42);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.0.multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ /// Note that this might not have any affect on IPv4 sockets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_loop_v6(false).expect("set_multicast_loop_v6 call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> {
+ self.0.set_multicast_loop_v6(multicast_loop_v6)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see [`UdpSocket::set_multicast_loop_v6`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_multicast_loop_v6(false).expect("set_multicast_loop_v6 call failed");
+ /// assert_eq!(socket.multicast_loop_v6().unwrap(), false);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.0.multicast_loop_v6()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_ttl(42).expect("set_ttl call failed");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.0.set_ttl(ttl)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`UdpSocket::set_ttl`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.set_ttl(42).expect("set_ttl call failed");
+ /// assert_eq!(socket.ttl().unwrap(), 42);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0.ttl()
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.0.join_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.0.join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see [`UdpSocket::join_multicast_v4`].
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ self.0.leave_multicast_v4(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see [`UdpSocket::join_multicast_v6`].
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.0.leave_multicast_v6(multiaddr, interface)
+ }
+
+ /// Gets the value of the `SO_ERROR` option on this socket.
+ ///
+ /// This will retrieve the stored error in the underlying socket, clearing
+ /// the field in the process. This can be useful for checking errors between
+ /// calls.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// match socket.take_error() {
+ /// Ok(Some(error)) => println!("UdpSocket error: {error:?}"),
+ /// Ok(None) => println!("No error"),
+ /// Err(error) => println!("UdpSocket.take_error failed: {error:?}"),
+ /// }
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Connects this UDP socket to a remote address, allowing the `send` and
+ /// `recv` syscalls to be used to send data and also applies filters to only
+ /// receive data from the specified address.
+ ///
+ /// If `addr` yields multiple addresses, `connect` will be attempted with
+ /// each of the addresses until the underlying OS function returns no
+ /// error. Note that usually, a successful `connect` call does not specify
+ /// that there is a remote server listening on the port, rather, such an
+ /// error would only be detected after the first send. If the OS returns an
+ /// error for each of the specified addresses, the error returned from the
+ /// last connection attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// Creates a UDP socket bound to `127.0.0.1:3400` and connect the socket to
+ /// `127.0.0.1:8080`:
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:3400").expect("couldn't bind to address");
+ /// socket.connect("127.0.0.1:8080").expect("connect function failed");
+ /// ```
+ ///
+ /// Unlike in the TCP case, passing an array of addresses to the `connect`
+ /// function of a UDP socket is not a useful thing to do: The OS will be
+ /// unable to determine whether something is listening on the remote
+ /// address without the application sending data.
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> {
+ super::each_addr(addr, |addr| self.0.connect(addr))
+ }
+
+ /// Sends data on the socket to the remote address to which it is connected.
+ ///
+ /// [`UdpSocket::connect`] will connect this socket to a remote address. This
+ /// method will fail if the socket is not connected.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.connect("127.0.0.1:8080").expect("connect function failed");
+ /// socket.send(&[0, 1, 2]).expect("couldn't send message");
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.send(buf)
+ }
+
+ /// Receives a single datagram message on the socket from the remote address to
+ /// which it is connected. On success, returns the number of bytes read.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// [`UdpSocket::connect`] will connect this socket to a remote address. This
+ /// method will fail if the socket is not connected.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.connect("127.0.0.1:8080").expect("connect function failed");
+ /// let mut buf = [0; 10];
+ /// match socket.recv(&mut buf) {
+ /// Ok(received) => println!("received {received} bytes {:?}", &buf[..received]),
+ /// Err(e) => println!("recv function failed: {e:?}"),
+ /// }
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.recv(buf)
+ }
+
+ /// Receives single datagram on the socket from the remote address to which it is
+ /// connected, without removing the message from input queue. On success, returns
+ /// the number of bytes peeked.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// Do not use this function to implement busy waiting, instead use `libc::poll` to
+ /// synchronize IO events on one or more sockets.
+ ///
+ /// [`UdpSocket::connect`] will connect this socket to a remote address. This
+ /// method will fail if the socket is not connected.
+ ///
+ /// # Errors
+ ///
+ /// This method will fail if the socket is not connected. The `connect` method
+ /// will connect this socket to a remote address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.connect("127.0.0.1:8080").expect("connect function failed");
+ /// let mut buf = [0; 10];
+ /// match socket.peek(&mut buf) {
+ /// Ok(received) => println!("received {received} bytes"),
+ /// Err(e) => println!("peek function failed: {e:?}"),
+ /// }
+ /// ```
+ #[stable(feature = "peek", since = "1.18.0")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
+ /// Moves this UDP socket into or out of nonblocking mode.
+ ///
+ /// This will result in `recv`, `recv_from`, `send`, and `send_to`
+ /// operations becoming nonblocking, i.e., immediately returning from their
+ /// calls. If the IO operation is successful, `Ok` is returned and no
+ /// further action is required. If the IO operation could not be completed
+ /// and needs to be retried, an error with kind
+ /// [`io::ErrorKind::WouldBlock`] is returned.
+ ///
+ /// On Unix platforms, calling this method corresponds to calling `fcntl`
+ /// `FIONBIO`. On Windows calling this method corresponds to calling
+ /// `ioctlsocket` `FIONBIO`.
+ ///
+ /// # Examples
+ ///
+ /// Creates a UDP socket bound to `127.0.0.1:7878` and read bytes in
+ /// nonblocking mode:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:7878").unwrap();
+ /// socket.set_nonblocking(true).unwrap();
+ ///
+ /// # fn wait_for_fd() { unimplemented!() }
+ /// let mut buf = [0; 10];
+ /// let (num_bytes_read, _) = loop {
+ /// match socket.recv_from(&mut buf) {
+ /// Ok(n) => break n,
+ /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ /// // wait until network socket is ready, typically implemented
+ /// // via platform-specific APIs such as epoll or IOCP
+ /// wait_for_fd();
+ /// }
+ /// Err(e) => panic!("encountered IO error: {e}"),
+ /// }
+ /// };
+ /// println!("bytes: {:?}", &buf[..num_bytes_read]);
+ /// ```
+ #[stable(feature = "net2_mutators", since = "1.9.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+}
+
+// In addition to the `impl`s here, `UdpSocket` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsSocket`/`From<OwnedSocket>`/`Into<OwnedSocket>` and
+// `AsRawSocket`/`IntoRawSocket`/`FromRawSocket` on Windows.
+
+impl AsInner<net_imp::UdpSocket> for UdpSocket {
+ fn as_inner(&self) -> &net_imp::UdpSocket {
+ &self.0
+ }
+}
+
+impl FromInner<net_imp::UdpSocket> for UdpSocket {
+ fn from_inner(inner: net_imp::UdpSocket) -> UdpSocket {
+ UdpSocket(inner)
+ }
+}
+
+impl IntoInner<net_imp::UdpSocket> for UdpSocket {
+ fn into_inner(self) -> net_imp::UdpSocket {
+ self.0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
diff --git a/library/std/src/net/udp/tests.rs b/library/std/src/net/udp/tests.rs
new file mode 100644
index 000000000..f82904ffb
--- /dev/null
+++ b/library/std/src/net/udp/tests.rs
@@ -0,0 +1,365 @@
+use crate::io::ErrorKind;
+use crate::net::test::{next_test_ip4, next_test_ip6};
+use crate::net::*;
+use crate::sync::mpsc::channel;
+use crate::thread;
+use crate::time::{Duration, Instant};
+
+fn each_ip(f: &mut dyn FnMut(SocketAddr, SocketAddr)) {
+ f(next_test_ip4(), next_test_ip4());
+ f(next_test_ip6(), next_test_ip6());
+}
+
+macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(t) => t,
+ Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
+ }
+ };
+}
+
+#[test]
+fn bind_error() {
+ match UdpSocket::bind("1.1.1.1:9999") {
+ Ok(..) => panic!(),
+ Err(e) => assert_eq!(e.kind(), ErrorKind::AddrNotAvailable),
+ }
+}
+
+#[test]
+fn socket_smoke_test_ip4() {
+ each_ip(&mut |server_ip, client_ip| {
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+
+ let _t = thread::spawn(move || {
+ let client = t!(UdpSocket::bind(&client_ip));
+ rx1.recv().unwrap();
+ t!(client.send_to(&[99], &server_ip));
+ tx2.send(()).unwrap();
+ });
+
+ let server = t!(UdpSocket::bind(&server_ip));
+ tx1.send(()).unwrap();
+ let mut buf = [0];
+ let (nread, src) = t!(server.recv_from(&mut buf));
+ assert_eq!(nread, 1);
+ assert_eq!(buf[0], 99);
+ assert_eq!(src, client_ip);
+ rx2.recv().unwrap();
+ })
+}
+
+#[test]
+fn socket_name() {
+ each_ip(&mut |addr, _| {
+ let server = t!(UdpSocket::bind(&addr));
+ assert_eq!(addr, t!(server.local_addr()));
+ })
+}
+
+#[test]
+fn socket_peer() {
+ each_ip(&mut |addr1, addr2| {
+ let server = t!(UdpSocket::bind(&addr1));
+ assert_eq!(server.peer_addr().unwrap_err().kind(), ErrorKind::NotConnected);
+ t!(server.connect(&addr2));
+ assert_eq!(addr2, t!(server.peer_addr()));
+ })
+}
+
+#[test]
+fn udp_clone_smoke() {
+ each_ip(&mut |addr1, addr2| {
+ let sock1 = t!(UdpSocket::bind(&addr1));
+ let sock2 = t!(UdpSocket::bind(&addr2));
+
+ let _t = thread::spawn(move || {
+ let mut buf = [0, 0];
+ assert_eq!(sock2.recv_from(&mut buf).unwrap(), (1, addr1));
+ assert_eq!(buf[0], 1);
+ t!(sock2.send_to(&[2], &addr1));
+ });
+
+ let sock3 = t!(sock1.try_clone());
+
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ let _t = thread::spawn(move || {
+ rx1.recv().unwrap();
+ t!(sock3.send_to(&[1], &addr2));
+ tx2.send(()).unwrap();
+ });
+ tx1.send(()).unwrap();
+ let mut buf = [0, 0];
+ assert_eq!(sock1.recv_from(&mut buf).unwrap(), (1, addr2));
+ rx2.recv().unwrap();
+ })
+}
+
+#[test]
+fn udp_clone_two_read() {
+ each_ip(&mut |addr1, addr2| {
+ let sock1 = t!(UdpSocket::bind(&addr1));
+ let sock2 = t!(UdpSocket::bind(&addr2));
+ let (tx1, rx) = channel();
+ let tx2 = tx1.clone();
+
+ let _t = thread::spawn(move || {
+ t!(sock2.send_to(&[1], &addr1));
+ rx.recv().unwrap();
+ t!(sock2.send_to(&[2], &addr1));
+ rx.recv().unwrap();
+ });
+
+ let sock3 = t!(sock1.try_clone());
+
+ let (done, rx) = channel();
+ let _t = thread::spawn(move || {
+ let mut buf = [0, 0];
+ t!(sock3.recv_from(&mut buf));
+ tx2.send(()).unwrap();
+ done.send(()).unwrap();
+ });
+ let mut buf = [0, 0];
+ t!(sock1.recv_from(&mut buf));
+ tx1.send(()).unwrap();
+
+ rx.recv().unwrap();
+ })
+}
+
+#[test]
+fn udp_clone_two_write() {
+ each_ip(&mut |addr1, addr2| {
+ let sock1 = t!(UdpSocket::bind(&addr1));
+ let sock2 = t!(UdpSocket::bind(&addr2));
+
+ let (tx, rx) = channel();
+ let (serv_tx, serv_rx) = channel();
+
+ let _t = thread::spawn(move || {
+ let mut buf = [0, 1];
+ rx.recv().unwrap();
+ t!(sock2.recv_from(&mut buf));
+ serv_tx.send(()).unwrap();
+ });
+
+ let sock3 = t!(sock1.try_clone());
+
+ let (done, rx) = channel();
+ let tx2 = tx.clone();
+ let _t = thread::spawn(move || {
+ if sock3.send_to(&[1], &addr2).is_ok() {
+ let _ = tx2.send(());
+ }
+ done.send(()).unwrap();
+ });
+ if sock1.send_to(&[2], &addr2).is_ok() {
+ let _ = tx.send(());
+ }
+ drop(tx);
+
+ rx.recv().unwrap();
+ serv_rx.recv().unwrap();
+ })
+}
+
+#[test]
+fn debug() {
+ let name = if cfg!(windows) { "socket" } else { "fd" };
+ let socket_addr = next_test_ip4();
+
+ let udpsock = t!(UdpSocket::bind(&socket_addr));
+ let udpsock_inner = udpsock.0.socket().as_raw();
+ let compare = format!("UdpSocket {{ addr: {socket_addr:?}, {name}: {udpsock_inner:?} }}");
+ assert_eq!(format!("{udpsock:?}"), compare);
+}
+
+// FIXME: re-enabled openbsd/netbsd tests once their socket timeout code
+// no longer has rounding errors.
+// VxWorks ignores SO_SNDTIMEO.
+#[cfg_attr(any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks"), ignore)]
+#[test]
+fn timeouts() {
+ let addr = next_test_ip4();
+
+ let stream = t!(UdpSocket::bind(&addr));
+ let dur = Duration::new(15410, 0);
+
+ assert_eq!(None, t!(stream.read_timeout()));
+
+ t!(stream.set_read_timeout(Some(dur)));
+ assert_eq!(Some(dur), t!(stream.read_timeout()));
+
+ assert_eq!(None, t!(stream.write_timeout()));
+
+ t!(stream.set_write_timeout(Some(dur)));
+ assert_eq!(Some(dur), t!(stream.write_timeout()));
+
+ t!(stream.set_read_timeout(None));
+ assert_eq!(None, t!(stream.read_timeout()));
+
+ t!(stream.set_write_timeout(None));
+ assert_eq!(None, t!(stream.write_timeout()));
+}
+
+#[test]
+fn test_read_timeout() {
+ let addr = next_test_ip4();
+
+ let stream = t!(UdpSocket::bind(&addr));
+ t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ let mut buf = [0; 10];
+
+ let start = Instant::now();
+ loop {
+ let kind = stream.recv_from(&mut buf).err().expect("expected error").kind();
+ if kind != ErrorKind::Interrupted {
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+ break;
+ }
+ }
+ assert!(start.elapsed() > Duration::from_millis(400));
+}
+
+#[test]
+fn test_read_with_timeout() {
+ let addr = next_test_ip4();
+
+ let stream = t!(UdpSocket::bind(&addr));
+ t!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ t!(stream.send_to(b"hello world", &addr));
+
+ let mut buf = [0; 11];
+ t!(stream.recv_from(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+
+ let start = Instant::now();
+ loop {
+ let kind = stream.recv_from(&mut buf).err().expect("expected error").kind();
+ if kind != ErrorKind::Interrupted {
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+ break;
+ }
+ }
+ assert!(start.elapsed() > Duration::from_millis(400));
+}
+
+// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
+// when passed zero Durations
+#[test]
+fn test_timeout_zero_duration() {
+ let addr = next_test_ip4();
+
+ let socket = t!(UdpSocket::bind(&addr));
+
+ let result = socket.set_write_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ let result = socket.set_read_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+}
+
+#[test]
+fn connect_send_recv() {
+ let addr = next_test_ip4();
+
+ let socket = t!(UdpSocket::bind(&addr));
+ t!(socket.connect(addr));
+
+ t!(socket.send(b"hello world"));
+
+ let mut buf = [0; 11];
+ t!(socket.recv(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+}
+
+#[test]
+fn connect_send_peek_recv() {
+ each_ip(&mut |addr, _| {
+ let socket = t!(UdpSocket::bind(&addr));
+ t!(socket.connect(addr));
+
+ t!(socket.send(b"hello world"));
+
+ for _ in 1..3 {
+ let mut buf = [0; 11];
+ let size = t!(socket.peek(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ }
+
+ let mut buf = [0; 11];
+ let size = t!(socket.recv(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ })
+}
+
+#[test]
+fn peek_from() {
+ each_ip(&mut |addr, _| {
+ let socket = t!(UdpSocket::bind(&addr));
+ t!(socket.send_to(b"hello world", &addr));
+
+ for _ in 1..3 {
+ let mut buf = [0; 11];
+ let (size, _) = t!(socket.peek_from(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ }
+
+ let mut buf = [0; 11];
+ let (size, _) = t!(socket.recv_from(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ })
+}
+
+#[test]
+fn ttl() {
+ let ttl = 100;
+
+ let addr = next_test_ip4();
+
+ let stream = t!(UdpSocket::bind(&addr));
+
+ t!(stream.set_ttl(ttl));
+ assert_eq!(ttl, t!(stream.ttl()));
+}
+
+#[test]
+fn set_nonblocking() {
+ each_ip(&mut |addr, _| {
+ let socket = t!(UdpSocket::bind(&addr));
+
+ t!(socket.set_nonblocking(true));
+ t!(socket.set_nonblocking(false));
+
+ t!(socket.connect(addr));
+
+ t!(socket.set_nonblocking(false));
+ t!(socket.set_nonblocking(true));
+
+ let mut buf = [0];
+ match socket.recv(&mut buf) {
+ Ok(_) => panic!("expected error"),
+ Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error {e}"),
+ }
+ })
+}
diff --git a/library/std/src/num.rs b/library/std/src/num.rs
new file mode 100644
index 000000000..46064bd28
--- /dev/null
+++ b/library/std/src/num.rs
@@ -0,0 +1,53 @@
+//! Additional functionality for numerics.
+//!
+//! This module provides some extra types that are useful when doing numerical
+//! work. See the individual documentation for each piece for more information.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![allow(missing_docs)]
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(test)]
+mod benches;
+
+#[unstable(feature = "saturating_int_impl", issue = "87920")]
+pub use core::num::Saturating;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::num::Wrapping;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::num::{FpCategory, ParseFloatError, ParseIntError, TryFromIntError};
+
+#[stable(feature = "signed_nonzero", since = "1.34.0")]
+pub use core::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize};
+#[stable(feature = "nonzero", since = "1.28.0")]
+pub use core::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize};
+
+#[stable(feature = "int_error_matching", since = "1.55.0")]
+pub use core::num::IntErrorKind;
+
+#[cfg(test)]
+use crate::fmt;
+#[cfg(test)]
+use crate::ops::{Add, Div, Mul, Rem, Sub};
+
+/// Helper function for testing numeric operations
+#[cfg(test)]
+pub fn test_num<T>(ten: T, two: T)
+where
+ T: PartialEq
+ + Add<Output = T>
+ + Sub<Output = T>
+ + Mul<Output = T>
+ + Div<Output = T>
+ + Rem<Output = T>
+ + fmt::Debug
+ + Copy,
+{
+ assert_eq!(ten.add(two), ten + two);
+ assert_eq!(ten.sub(two), ten - two);
+ assert_eq!(ten.mul(two), ten * two);
+ assert_eq!(ten.div(two), ten / two);
+ assert_eq!(ten.rem(two), ten % two);
+}
diff --git a/library/std/src/num/benches.rs b/library/std/src/num/benches.rs
new file mode 100644
index 000000000..233ea0506
--- /dev/null
+++ b/library/std/src/num/benches.rs
@@ -0,0 +1,9 @@
+use test::Bencher;
+
+#[bench]
+fn bench_pow_function(b: &mut Bencher) {
+ let v = (0..1024).collect::<Vec<u32>>();
+ b.iter(|| {
+ v.iter().fold(0u32, |old, new| old.pow(*new as u32));
+ });
+}
diff --git a/library/std/src/num/tests.rs b/library/std/src/num/tests.rs
new file mode 100644
index 000000000..df0df3f23
--- /dev/null
+++ b/library/std/src/num/tests.rs
@@ -0,0 +1,230 @@
+use crate::ops::Mul;
+
+#[test]
+fn test_saturating_add_uint() {
+ assert_eq!(3_usize.saturating_add(5_usize), 8_usize);
+ assert_eq!(3_usize.saturating_add(usize::MAX - 1), usize::MAX);
+ assert_eq!(usize::MAX.saturating_add(usize::MAX), usize::MAX);
+ assert_eq!((usize::MAX - 2).saturating_add(1), usize::MAX - 1);
+}
+
+#[test]
+fn test_saturating_sub_uint() {
+ assert_eq!(5_usize.saturating_sub(3_usize), 2_usize);
+ assert_eq!(3_usize.saturating_sub(5_usize), 0_usize);
+ assert_eq!(0_usize.saturating_sub(1_usize), 0_usize);
+ assert_eq!((usize::MAX - 1).saturating_sub(usize::MAX), 0);
+}
+
+#[test]
+fn test_saturating_add_int() {
+ assert_eq!(3i32.saturating_add(5), 8);
+ assert_eq!(3isize.saturating_add(isize::MAX - 1), isize::MAX);
+ assert_eq!(isize::MAX.saturating_add(isize::MAX), isize::MAX);
+ assert_eq!((isize::MAX - 2).saturating_add(1), isize::MAX - 1);
+ assert_eq!(3i32.saturating_add(-5), -2);
+ assert_eq!(isize::MIN.saturating_add(-1), isize::MIN);
+ assert_eq!((-2isize).saturating_add(-isize::MAX), isize::MIN);
+}
+
+#[test]
+fn test_saturating_sub_int() {
+ assert_eq!(3i32.saturating_sub(5), -2);
+ assert_eq!(isize::MIN.saturating_sub(1), isize::MIN);
+ assert_eq!((-2isize).saturating_sub(isize::MAX), isize::MIN);
+ assert_eq!(3i32.saturating_sub(-5), 8);
+ assert_eq!(3isize.saturating_sub(-(isize::MAX - 1)), isize::MAX);
+ assert_eq!(isize::MAX.saturating_sub(-isize::MAX), isize::MAX);
+ assert_eq!((isize::MAX - 2).saturating_sub(-1), isize::MAX - 1);
+}
+
+#[test]
+fn test_checked_add() {
+ let five_less = usize::MAX - 5;
+ assert_eq!(five_less.checked_add(0), Some(usize::MAX - 5));
+ assert_eq!(five_less.checked_add(1), Some(usize::MAX - 4));
+ assert_eq!(five_less.checked_add(2), Some(usize::MAX - 3));
+ assert_eq!(five_less.checked_add(3), Some(usize::MAX - 2));
+ assert_eq!(five_less.checked_add(4), Some(usize::MAX - 1));
+ assert_eq!(five_less.checked_add(5), Some(usize::MAX));
+ assert_eq!(five_less.checked_add(6), None);
+ assert_eq!(five_less.checked_add(7), None);
+}
+
+#[test]
+fn test_checked_sub() {
+ assert_eq!(5_usize.checked_sub(0), Some(5));
+ assert_eq!(5_usize.checked_sub(1), Some(4));
+ assert_eq!(5_usize.checked_sub(2), Some(3));
+ assert_eq!(5_usize.checked_sub(3), Some(2));
+ assert_eq!(5_usize.checked_sub(4), Some(1));
+ assert_eq!(5_usize.checked_sub(5), Some(0));
+ assert_eq!(5_usize.checked_sub(6), None);
+ assert_eq!(5_usize.checked_sub(7), None);
+}
+
+#[test]
+fn test_checked_mul() {
+ let third = usize::MAX / 3;
+ assert_eq!(third.checked_mul(0), Some(0));
+ assert_eq!(third.checked_mul(1), Some(third));
+ assert_eq!(third.checked_mul(2), Some(third * 2));
+ assert_eq!(third.checked_mul(3), Some(third * 3));
+ assert_eq!(third.checked_mul(4), None);
+}
+
+macro_rules! test_is_power_of_two {
+ ($test_name:ident, $T:ident) => {
+ #[test]
+ fn $test_name() {
+ assert_eq!((0 as $T).is_power_of_two(), false);
+ assert_eq!((1 as $T).is_power_of_two(), true);
+ assert_eq!((2 as $T).is_power_of_two(), true);
+ assert_eq!((3 as $T).is_power_of_two(), false);
+ assert_eq!((4 as $T).is_power_of_two(), true);
+ assert_eq!((5 as $T).is_power_of_two(), false);
+ assert_eq!(($T::MAX / 2 + 1).is_power_of_two(), true);
+ }
+ };
+}
+
+test_is_power_of_two! { test_is_power_of_two_u8, u8 }
+test_is_power_of_two! { test_is_power_of_two_u16, u16 }
+test_is_power_of_two! { test_is_power_of_two_u32, u32 }
+test_is_power_of_two! { test_is_power_of_two_u64, u64 }
+test_is_power_of_two! { test_is_power_of_two_uint, usize }
+
+macro_rules! test_next_power_of_two {
+ ($test_name:ident, $T:ident) => {
+ #[test]
+ fn $test_name() {
+ assert_eq!((0 as $T).next_power_of_two(), 1);
+ let mut next_power = 1;
+ for i in 1 as $T..40 {
+ assert_eq!(i.next_power_of_two(), next_power);
+ if i == next_power {
+ next_power *= 2
+ }
+ }
+ }
+ };
+}
+
+test_next_power_of_two! { test_next_power_of_two_u8, u8 }
+test_next_power_of_two! { test_next_power_of_two_u16, u16 }
+test_next_power_of_two! { test_next_power_of_two_u32, u32 }
+test_next_power_of_two! { test_next_power_of_two_u64, u64 }
+test_next_power_of_two! { test_next_power_of_two_uint, usize }
+
+macro_rules! test_checked_next_power_of_two {
+ ($test_name:ident, $T:ident) => {
+ #[test]
+ fn $test_name() {
+ assert_eq!((0 as $T).checked_next_power_of_two(), Some(1));
+ let smax = $T::MAX >> 1;
+ assert_eq!(smax.checked_next_power_of_two(), Some(smax + 1));
+ assert_eq!((smax + 1).checked_next_power_of_two(), Some(smax + 1));
+ assert_eq!((smax + 2).checked_next_power_of_two(), None);
+ assert_eq!(($T::MAX - 1).checked_next_power_of_two(), None);
+ assert_eq!($T::MAX.checked_next_power_of_two(), None);
+ let mut next_power = 1;
+ for i in 1 as $T..40 {
+ assert_eq!(i.checked_next_power_of_two(), Some(next_power));
+ if i == next_power {
+ next_power *= 2
+ }
+ }
+ }
+ };
+}
+
+test_checked_next_power_of_two! { test_checked_next_power_of_two_u8, u8 }
+test_checked_next_power_of_two! { test_checked_next_power_of_two_u16, u16 }
+test_checked_next_power_of_two! { test_checked_next_power_of_two_u32, u32 }
+test_checked_next_power_of_two! { test_checked_next_power_of_two_u64, u64 }
+test_checked_next_power_of_two! { test_checked_next_power_of_two_uint, usize }
+
+#[test]
+fn test_pow() {
+ fn naive_pow<T: Mul<Output = T> + Copy>(one: T, base: T, exp: usize) -> T {
+ (0..exp).fold(one, |acc, _| acc * base)
+ }
+ macro_rules! assert_pow {
+ (($num:expr, $exp:expr) => $expected:expr) => {{
+ let result = $num.pow($exp);
+ assert_eq!(result, $expected);
+ assert_eq!(result, naive_pow(1, $num, $exp));
+ }};
+ }
+ assert_pow!((3u32, 0 ) => 1);
+ assert_pow!((5u32, 1 ) => 5);
+ assert_pow!((-4i32, 2 ) => 16);
+ assert_pow!((8u32, 3 ) => 512);
+ assert_pow!((2u64, 50) => 1125899906842624);
+}
+
+#[test]
+fn test_uint_to_str_overflow() {
+ let mut u8_val: u8 = 255;
+ assert_eq!(u8_val.to_string(), "255");
+
+ u8_val = u8_val.wrapping_add(1);
+ assert_eq!(u8_val.to_string(), "0");
+
+ let mut u16_val: u16 = 65_535;
+ assert_eq!(u16_val.to_string(), "65535");
+
+ u16_val = u16_val.wrapping_add(1);
+ assert_eq!(u16_val.to_string(), "0");
+
+ let mut u32_val: u32 = 4_294_967_295;
+ assert_eq!(u32_val.to_string(), "4294967295");
+
+ u32_val = u32_val.wrapping_add(1);
+ assert_eq!(u32_val.to_string(), "0");
+
+ let mut u64_val: u64 = 18_446_744_073_709_551_615;
+ assert_eq!(u64_val.to_string(), "18446744073709551615");
+
+ u64_val = u64_val.wrapping_add(1);
+ assert_eq!(u64_val.to_string(), "0");
+}
+
+fn from_str<T: crate::str::FromStr>(t: &str) -> Option<T> {
+ crate::str::FromStr::from_str(t).ok()
+}
+
+#[test]
+fn test_uint_from_str_overflow() {
+ let mut u8_val: u8 = 255;
+ assert_eq!(from_str::<u8>("255"), Some(u8_val));
+ assert_eq!(from_str::<u8>("256"), None);
+
+ u8_val = u8_val.wrapping_add(1);
+ assert_eq!(from_str::<u8>("0"), Some(u8_val));
+ assert_eq!(from_str::<u8>("-1"), None);
+
+ let mut u16_val: u16 = 65_535;
+ assert_eq!(from_str::<u16>("65535"), Some(u16_val));
+ assert_eq!(from_str::<u16>("65536"), None);
+
+ u16_val = u16_val.wrapping_add(1);
+ assert_eq!(from_str::<u16>("0"), Some(u16_val));
+ assert_eq!(from_str::<u16>("-1"), None);
+
+ let mut u32_val: u32 = 4_294_967_295;
+ assert_eq!(from_str::<u32>("4294967295"), Some(u32_val));
+ assert_eq!(from_str::<u32>("4294967296"), None);
+
+ u32_val = u32_val.wrapping_add(1);
+ assert_eq!(from_str::<u32>("0"), Some(u32_val));
+ assert_eq!(from_str::<u32>("-1"), None);
+
+ let mut u64_val: u64 = 18_446_744_073_709_551_615;
+ assert_eq!(from_str::<u64>("18446744073709551615"), Some(u64_val));
+ assert_eq!(from_str::<u64>("18446744073709551616"), None);
+
+ u64_val = u64_val.wrapping_add(1);
+ assert_eq!(from_str::<u64>("0"), Some(u64_val));
+ assert_eq!(from_str::<u64>("-1"), None);
+}
diff --git a/library/std/src/os/android/fs.rs b/library/std/src/os/android/fs.rs
new file mode 100644
index 000000000..1beb3cf6e
--- /dev/null
+++ b/library/std/src/os/android/fs.rs
@@ -0,0 +1,117 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::android::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/android/mod.rs b/library/std/src/os/android/mod.rs
new file mode 100644
index 000000000..dbb0127f3
--- /dev/null
+++ b/library/std/src/os/android/mod.rs
@@ -0,0 +1,6 @@
+//! Android-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/android/raw.rs b/library/std/src/os/android/raw.rs
new file mode 100644
index 000000000..a255d0320
--- /dev/null
+++ b/library/std/src/os/android/raw.rs
@@ -0,0 +1,219 @@
+//! Android-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_long;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use self::arch::{blkcnt_t, blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t, stat, time_t};
+
+#[cfg(any(target_arch = "arm", target_arch = "x86"))]
+mod arch {
+ use crate::os::raw::{c_longlong, c_uchar, c_uint, c_ulong, c_ulonglong};
+ use crate::os::unix::raw::{gid_t, uid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type dev_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type mode_t = u32;
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: c_ulonglong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: [c_uchar; 4],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: c_ulonglong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad3: [c_uchar; 4],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: c_longlong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: c_ulonglong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: c_ulonglong,
+ }
+}
+
+#[cfg(target_arch = "aarch64")]
+mod arch {
+ use crate::os::raw::{c_uchar, c_ulong};
+ use crate::os::unix::raw::{gid_t, uid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type dev_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type mode_t = u32;
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: [c_uchar; 4],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad3: [c_uchar; 4],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod arch {
+ use crate::os::raw::{c_long, c_uint, c_ulong};
+ use crate::os::unix::raw::{gid_t, uid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type dev_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type mode_t = u32;
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_ulong,
+ __unused: [c_long; 3],
+ }
+}
diff --git a/library/std/src/os/dragonfly/fs.rs b/library/std/src/os/dragonfly/fs.rs
new file mode 100644
index 000000000..1424fc4c6
--- /dev/null
+++ b/library/std/src/os/dragonfly/fs.rs
@@ -0,0 +1,132 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::dragonfly::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_lspare(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+ fn st_lspare(&self) -> u32 {
+ self.as_inner().as_inner().st_lspare as u32
+ }
+}
diff --git a/library/std/src/os/dragonfly/mod.rs b/library/std/src/os/dragonfly/mod.rs
new file mode 100644
index 000000000..350b5fca7
--- /dev/null
+++ b/library/std/src/os/dragonfly/mod.rs
@@ -0,0 +1,6 @@
+//! Dragonfly-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/dragonfly/raw.rs b/library/std/src/os/dragonfly/raw.rs
new file mode 100644
index 000000000..071bf6199
--- /dev/null
+++ b/library/std/src/os/dragonfly/raw.rs
@@ -0,0 +1,83 @@
+//! Dragonfly-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+}
diff --git a/library/std/src/os/emscripten/fs.rs b/library/std/src/os/emscripten/fs.rs
new file mode 100644
index 000000000..d5ec8e03c
--- /dev/null
+++ b/library/std/src/os/emscripten/fs.rs
@@ -0,0 +1,117 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::emscripten::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/emscripten/mod.rs b/library/std/src/os/emscripten/mod.rs
new file mode 100644
index 000000000..d35307162
--- /dev/null
+++ b/library/std/src/os/emscripten/mod.rs
@@ -0,0 +1,6 @@
+//! Linux-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/emscripten/raw.rs b/library/std/src/os/emscripten/raw.rs
new file mode 100644
index 000000000..d23011c73
--- /dev/null
+++ b/library/std/src/os/emscripten/raw.rs
@@ -0,0 +1,80 @@
+//! Emscripten-specific raw type definitions
+//! This is basically exactly the same as the linux definitions,
+//! except using the musl-specific stat64 structure in liblibc.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::{c_long, c_short, c_uint, c_ulong};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = c_long;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: c_short,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+}
diff --git a/library/std/src/os/espidf/fs.rs b/library/std/src/os/espidf/fs.rs
new file mode 100644
index 000000000..88701dafe
--- /dev/null
+++ b/library/std/src/os/espidf/fs.rs
@@ -0,0 +1,117 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::espidf::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_spare4(&self) -> [u32; 2];
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_spare4(&self) -> [u32; 2] {
+ let spare4 = self.as_inner().as_inner().st_spare4;
+ [spare4[0] as u32, spare4[1] as u32]
+ }
+}
diff --git a/library/std/src/os/espidf/mod.rs b/library/std/src/os/espidf/mod.rs
new file mode 100644
index 000000000..a9cef9709
--- /dev/null
+++ b/library/std/src/os/espidf/mod.rs
@@ -0,0 +1,6 @@
+//! Definitions for the ESP-IDF framework.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/espidf/raw.rs b/library/std/src/os/espidf/raw.rs
new file mode 100644
index 000000000..7df0e74b2
--- /dev/null
+++ b/library/std/src/os/espidf/raw.rs
@@ -0,0 +1,69 @@
+//! Raw type definitions for the ESP-IDF framework.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = libc::pthread_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = libc::blkcnt_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = libc::blksize_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = libc::dev_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = libc::ino_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = libc::mode_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = libc::nlink_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = libc::off_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = libc::time_t;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_spare4: [c_long; 2usize],
+}
diff --git a/library/std/src/os/fd/mod.rs b/library/std/src/os/fd/mod.rs
new file mode 100644
index 000000000..a45694753
--- /dev/null
+++ b/library/std/src/os/fd/mod.rs
@@ -0,0 +1,16 @@
+//! Owned and borrowed Unix-like file descriptors.
+
+#![stable(feature = "io_safety", since = "1.63.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+// `RawFd`, `AsRawFd`, etc.
+pub mod raw;
+
+// `OwnedFd`, `AsFd`, etc.
+pub mod owned;
+
+// Implementations for `AsRawFd` etc. for network types.
+mod net;
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/os/fd/net.rs b/library/std/src/os/fd/net.rs
new file mode 100644
index 000000000..843f45f7f
--- /dev/null
+++ b/library/std/src/os/fd/net.rs
@@ -0,0 +1,46 @@
+use crate::os::fd::owned::OwnedFd;
+use crate::os::fd::raw::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use crate::sys_common::{self, AsInner, FromInner, IntoInner};
+use crate::{net, sys};
+
+macro_rules! impl_as_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl AsRawFd for net::$t {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().socket().as_raw_fd()
+ }
+ }
+ )*};
+}
+impl_as_raw_fd! { TcpStream TcpListener UdpSocket }
+
+macro_rules! impl_from_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "from_raw_os", since = "1.1.0")]
+ impl FromRawFd for net::$t {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> net::$t {
+ unsafe {
+ let socket = sys::net::Socket::from_inner(FromInner::from_inner(OwnedFd::from_raw_fd(fd)));
+ net::$t::from_inner(sys_common::net::$t::from_inner(socket))
+ }
+ }
+ }
+ )*};
+}
+impl_from_raw_fd! { TcpStream TcpListener UdpSocket }
+
+macro_rules! impl_into_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "into_raw_os", since = "1.4.0")]
+ impl IntoRawFd for net::$t {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_socket().into_inner().into_inner().into_raw_fd()
+ }
+ }
+ )*};
+}
+impl_into_raw_fd! { TcpStream TcpListener UdpSocket }
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
new file mode 100644
index 000000000..a463bc41d
--- /dev/null
+++ b/library/std/src/os/fd/owned.rs
@@ -0,0 +1,388 @@
+//! Owned and borrowed Unix-like file descriptors.
+
+#![stable(feature = "io_safety", since = "1.63.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use super::raw::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+use crate::fmt;
+use crate::fs;
+use crate::marker::PhantomData;
+use crate::mem::forget;
+#[cfg(not(any(target_arch = "wasm32", target_env = "sgx")))]
+use crate::sys::cvt;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+/// A borrowed file descriptor.
+///
+/// This has a lifetime parameter to tie it to the lifetime of something that
+/// owns the file descriptor.
+///
+/// This uses `repr(transparent)` and has the representation of a host file
+/// descriptor, so it can be used in FFI in places where a file descriptor is
+/// passed as an argument, it is not captured or consumed, and it never has the
+/// value `-1`.
+///
+/// This type's `.to_owned()` implementation returns another `BorrowedFd`
+/// rather than an `OwnedFd`. It just makes a trivial copy of the raw file
+/// descriptor, which is then borrowed under the same lifetime.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
+// 32-bit c_int. Below is -2, in two's complement, but that only works out
+// because c_int is 32 bits.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+#[rustc_nonnull_optimization_guaranteed]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct BorrowedFd<'fd> {
+ fd: RawFd,
+ _phantom: PhantomData<&'fd OwnedFd>,
+}
+
+/// An owned file descriptor.
+///
+/// This closes the file descriptor on drop.
+///
+/// This uses `repr(transparent)` and has the representation of a host file
+/// descriptor, so it can be used in FFI in places where a file descriptor is
+/// passed as a consumed argument or returned as an owned value, and it never
+/// has the value `-1`.
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
+// 32-bit c_int. Below is -2, in two's complement, but that only works out
+// because c_int is 32 bits.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+#[rustc_nonnull_optimization_guaranteed]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct OwnedFd {
+ fd: RawFd,
+}
+
+impl BorrowedFd<'_> {
+ /// Return a `BorrowedFd` holding the given raw file descriptor.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `fd` must remain open for the duration of
+ /// the returned `BorrowedFd`, and it must not have the value `-1`.
+ #[inline]
+ #[rustc_const_stable(feature = "io_safety", since = "1.63.0")]
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub const unsafe fn borrow_raw(fd: RawFd) -> Self {
+ assert!(fd != u32::MAX as RawFd);
+ // SAFETY: we just asserted that the value is in the valid range and isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { Self { fd, _phantom: PhantomData } }
+ }
+}
+
+impl OwnedFd {
+ /// Creates a new `OwnedFd` instance that shares the same underlying file
+ /// description as the existing `OwnedFd` instance.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone(&self) -> crate::io::Result<Self> {
+ self.as_fd().try_clone_to_owned()
+ }
+}
+
+impl BorrowedFd<'_> {
+ /// Creates a new `OwnedFd` instance that shares the same underlying file
+ /// description as the existing `BorrowedFd` instance.
+ #[cfg(not(target_arch = "wasm32"))]
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> {
+ // We want to atomically duplicate this file descriptor and set the
+ // CLOEXEC flag, and currently that's done via F_DUPFD_CLOEXEC. This
+ // is a POSIX flag that was added to Linux in 2.6.24.
+ #[cfg(not(target_os = "espidf"))]
+ let cmd = libc::F_DUPFD_CLOEXEC;
+
+ // For ESP-IDF, F_DUPFD is used instead, because the CLOEXEC semantics
+ // will never be supported, as this is a bare metal framework with
+ // no capabilities for multi-process execution. While F_DUPFD is also
+ // not supported yet, it might be (currently it returns ENOSYS).
+ #[cfg(target_os = "espidf")]
+ let cmd = libc::F_DUPFD;
+
+ let fd = cvt(unsafe { libc::fcntl(self.as_raw_fd(), cmd, 0) })?;
+ Ok(unsafe { OwnedFd::from_raw_fd(fd) })
+ }
+
+ /// Creates a new `OwnedFd` instance that shares the same underlying file
+ /// description as the existing `BorrowedFd` instance.
+ #[cfg(target_arch = "wasm32")]
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> {
+ Err(crate::io::const_io_error!(
+ crate::io::ErrorKind::Unsupported,
+ "operation not supported on WASI yet",
+ ))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawFd for BorrowedFd<'_> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawFd for OwnedFd {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl IntoRawFd for OwnedFd {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ let fd = self.fd;
+ forget(self);
+ fd
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl FromRawFd for OwnedFd {
+ /// Constructs a new instance of `Self` from the given raw file descriptor.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `fd` must be open and suitable for assuming
+ /// ownership. The resource must not require any cleanup other than `close`.
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> Self {
+ assert_ne!(fd, u32::MAX as RawFd);
+ // SAFETY: we just asserted that the value is in the valid range and isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { Self { fd } }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl Drop for OwnedFd {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ // Note that errors are ignored when closing a file descriptor. The
+ // reason for this is that if an error occurs we don't actually know if
+ // the file descriptor was closed or not, and if we retried (for
+ // something like EINTR), we might close another valid file descriptor
+ // opened after we closed ours.
+ let _ = libc::close(self.fd);
+ }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for BorrowedFd<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowedFd").field("fd", &self.fd).finish()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for OwnedFd {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OwnedFd").field("fd", &self.fd).finish()
+ }
+}
+
+/// A trait to borrow the file descriptor from an underlying object.
+///
+/// This is only available on unix platforms and must be imported in order to
+/// call the method. Windows platforms have a corresponding `AsHandle` and
+/// `AsSocket` set of traits.
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub trait AsFd {
+ /// Borrows the file descriptor.
+ ///
+ /// # Example
+ ///
+ /// ```rust,no_run
+ /// use std::fs::File;
+ /// # use std::io;
+ /// # #[cfg(target_os = "wasi")]
+ /// # use std::os::wasi::io::{AsFd, BorrowedFd};
+ /// # #[cfg(unix)]
+ /// # use std::os::unix::io::{AsFd, BorrowedFd};
+ ///
+ /// let mut f = File::open("foo.txt")?;
+ /// # #[cfg(any(unix, target_os = "wasi"))]
+ /// let borrowed_fd: BorrowedFd<'_> = f.as_fd();
+ /// # Ok::<(), io::Error>(())
+ /// ```
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ fn as_fd(&self) -> BorrowedFd<'_>;
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsFd> AsFd for &T {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ T::as_fd(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsFd> AsFd for &mut T {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ T::as_fd(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for BorrowedFd<'_> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ *self
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for OwnedFd {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ // Safety: `OwnedFd` and `BorrowedFd` have the same validity
+ // invariants, and the `BorrowdFd` is bounded by the lifetime
+ // of `&self`.
+ unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for fs::File {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<fs::File> for OwnedFd {
+ #[inline]
+ fn from(file: fs::File) -> OwnedFd {
+ file.into_inner().into_inner().into_inner()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for fs::File {
+ #[inline]
+ fn from(owned_fd: OwnedFd) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(owned_fd)))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::net::TcpStream {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().socket().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::TcpStream> for OwnedFd {
+ #[inline]
+ fn from(tcp_stream: crate::net::TcpStream) -> OwnedFd {
+ tcp_stream.into_inner().into_socket().into_inner().into_inner().into()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for crate::net::TcpStream {
+ #[inline]
+ fn from(owned_fd: OwnedFd) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(FromInner::from_inner(
+ owned_fd,
+ ))))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::net::TcpListener {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().socket().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::TcpListener> for OwnedFd {
+ #[inline]
+ fn from(tcp_listener: crate::net::TcpListener) -> OwnedFd {
+ tcp_listener.into_inner().into_socket().into_inner().into_inner().into()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for crate::net::TcpListener {
+ #[inline]
+ fn from(owned_fd: OwnedFd) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(FromInner::from_inner(
+ owned_fd,
+ ))))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::net::UdpSocket {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().socket().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::UdpSocket> for OwnedFd {
+ #[inline]
+ fn from(udp_socket: crate::net::UdpSocket) -> OwnedFd {
+ udp_socket.into_inner().into_socket().into_inner().into_inner().into()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for crate::net::UdpSocket {
+ #[inline]
+ fn from(owned_fd: OwnedFd) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(FromInner::from_inner(
+ owned_fd,
+ ))))
+ }
+}
+
+#[stable(feature = "asfd_ptrs", since = "1.64.0")]
+/// This impl allows implementing traits that require `AsFd` on Arc.
+/// ```
+/// # #[cfg(any(unix, target_os = "wasi"))] mod group_cfg {
+/// # #[cfg(target_os = "wasi")]
+/// # use std::os::wasi::io::AsFd;
+/// # #[cfg(unix)]
+/// # use std::os::unix::io::AsFd;
+/// use std::net::UdpSocket;
+/// use std::sync::Arc;
+///
+/// trait MyTrait: AsFd {}
+/// impl MyTrait for Arc<UdpSocket> {}
+/// impl MyTrait for Box<UdpSocket> {}
+/// # }
+/// ```
+impl<T: AsFd> AsFd for crate::sync::Arc<T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ (**self).as_fd()
+ }
+}
+
+#[stable(feature = "asfd_ptrs", since = "1.64.0")]
+impl<T: AsFd> AsFd for Box<T> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ (**self).as_fd()
+ }
+}
diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs
new file mode 100644
index 000000000..081915ed1
--- /dev/null
+++ b/library/std/src/os/fd/raw.rs
@@ -0,0 +1,259 @@
+//! Raw Unix-like file descriptors.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fs;
+use crate::io;
+use crate::os::raw;
+#[cfg(all(doc, not(target_arch = "wasm32")))]
+use crate::os::unix::io::AsFd;
+#[cfg(unix)]
+use crate::os::unix::io::OwnedFd;
+#[cfg(target_os = "wasi")]
+use crate::os::wasi::io::OwnedFd;
+use crate::sys_common::{AsInner, IntoInner};
+
+/// Raw file descriptors.
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type RawFd = raw::c_int;
+
+/// A trait to extract the raw file descriptor from an underlying object.
+///
+/// This is only available on unix and WASI platforms and must be imported in
+/// order to call the method. Windows platforms have a corresponding
+/// `AsRawHandle` and `AsRawSocket` set of traits.
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait AsRawFd {
+ /// Extracts the raw file descriptor.
+ ///
+ /// This function is typically used to **borrow** an owned file descriptor.
+ /// When used in this way, this method does **not** pass ownership of the
+ /// raw file descriptor to the caller, and the file descriptor is only
+ /// guaranteed to be valid while the original object has not yet been
+ /// destroyed.
+ ///
+ /// However, borrowing is not strictly required. See [`AsFd::as_fd`]
+ /// for an API which strictly borrows a file descriptor.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// # use std::io;
+ /// #[cfg(unix)]
+ /// use std::os::unix::io::{AsRawFd, RawFd};
+ /// #[cfg(target_os = "wasi")]
+ /// use std::os::wasi::io::{AsRawFd, RawFd};
+ ///
+ /// let mut f = File::open("foo.txt")?;
+ /// // Note that `raw_fd` is only valid as long as `f` exists.
+ /// #[cfg(any(unix, target_os = "wasi"))]
+ /// let raw_fd: RawFd = f.as_raw_fd();
+ /// # Ok::<(), io::Error>(())
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_raw_fd(&self) -> RawFd;
+}
+
+/// A trait to express the ability to construct an object from a raw file
+/// descriptor.
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+pub trait FromRawFd {
+ /// Constructs a new instance of `Self` from the given raw file
+ /// descriptor.
+ ///
+ /// This function is typically used to **consume ownership** of the
+ /// specified file descriptor. When used in this way, the returned object
+ /// will take responsibility for closing it when the object goes out of
+ /// scope.
+ ///
+ /// However, consuming ownership is not strictly required. Use a
+ /// [`From<OwnedFd>::from`] implementation for an API which strictly
+ /// consumes ownership.
+ ///
+ /// # Safety
+ ///
+ /// The `fd` passed in must be a valid and open file descriptor.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// # use std::io;
+ /// #[cfg(unix)]
+ /// use std::os::unix::io::{FromRawFd, IntoRawFd, RawFd};
+ /// #[cfg(target_os = "wasi")]
+ /// use std::os::wasi::io::{FromRawFd, IntoRawFd, RawFd};
+ ///
+ /// let f = File::open("foo.txt")?;
+ /// # #[cfg(any(unix, target_os = "wasi"))]
+ /// let raw_fd: RawFd = f.into_raw_fd();
+ /// // SAFETY: no other functions should call `from_raw_fd`, so there
+ /// // is only one owner for the file descriptor.
+ /// # #[cfg(any(unix, target_os = "wasi"))]
+ /// let f = unsafe { File::from_raw_fd(raw_fd) };
+ /// # Ok::<(), io::Error>(())
+ /// ```
+ #[stable(feature = "from_raw_os", since = "1.1.0")]
+ unsafe fn from_raw_fd(fd: RawFd) -> Self;
+}
+
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw file descriptor.
+#[cfg_attr(not(bootstrap), rustc_allowed_through_unstable_modules)]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+pub trait IntoRawFd {
+ /// Consumes this object, returning the raw underlying file descriptor.
+ ///
+ /// This function is typically used to **transfer ownership** of the underlying
+ /// file descriptor to the caller. When used in this way, callers are then the unique
+ /// owners of the file descriptor and must close it once it's no longer needed.
+ ///
+ /// However, transferring ownership is not strictly required. Use a
+ /// [`Into<OwnedFd>::into`] implementation for an API which strictly
+ /// transfers ownership.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// # use std::io;
+ /// #[cfg(unix)]
+ /// use std::os::unix::io::{IntoRawFd, RawFd};
+ /// #[cfg(target_os = "wasi")]
+ /// use std::os::wasi::io::{IntoRawFd, RawFd};
+ ///
+ /// let f = File::open("foo.txt")?;
+ /// #[cfg(any(unix, target_os = "wasi"))]
+ /// let raw_fd: RawFd = f.into_raw_fd();
+ /// # Ok::<(), io::Error>(())
+ /// ```
+ #[stable(feature = "into_raw_os", since = "1.4.0")]
+ fn into_raw_fd(self) -> RawFd;
+}
+
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl AsRawFd for RawFd {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ *self
+ }
+}
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl IntoRawFd for RawFd {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self
+ }
+}
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl FromRawFd for RawFd {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> RawFd {
+ fd
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRawFd for fs::File {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().as_raw_fd()
+ }
+}
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+impl FromRawFd for fs::File {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> fs::File {
+ unsafe { fs::File::from(OwnedFd::from_raw_fd(fd)) }
+ }
+}
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawFd for fs::File {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawFd for io::Stdin {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDIN_FILENO
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawFd for io::Stdout {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDOUT_FILENO
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawFd for io::Stderr {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDERR_FILENO
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawFd for io::StdinLock<'a> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDIN_FILENO
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawFd for io::StdoutLock<'a> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDOUT_FILENO
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawFd for io::StderrLock<'a> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ libc::STDERR_FILENO
+ }
+}
+
+/// This impl allows implementing traits that require `AsRawFd` on Arc.
+/// ```
+/// # #[cfg(any(unix, target_os = "wasi"))] mod group_cfg {
+/// # #[cfg(target_os = "wasi")]
+/// # use std::os::wasi::io::AsRawFd;
+/// # #[cfg(unix)]
+/// # use std::os::unix::io::AsRawFd;
+/// use std::net::UdpSocket;
+/// use std::sync::Arc;
+/// trait MyTrait: AsRawFd {
+/// }
+/// impl MyTrait for Arc<UdpSocket> {}
+/// impl MyTrait for Box<UdpSocket> {}
+/// # }
+/// ```
+#[stable(feature = "asrawfd_ptrs", since = "1.63.0")]
+impl<T: AsRawFd> AsRawFd for crate::sync::Arc<T> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ (**self).as_raw_fd()
+ }
+}
+
+#[stable(feature = "asrawfd_ptrs", since = "1.63.0")]
+impl<T: AsRawFd> AsRawFd for Box<T> {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ (**self).as_raw_fd()
+ }
+}
diff --git a/library/std/src/os/fd/tests.rs b/library/std/src/os/fd/tests.rs
new file mode 100644
index 000000000..b39863644
--- /dev/null
+++ b/library/std/src/os/fd/tests.rs
@@ -0,0 +1,53 @@
+#[cfg(any(unix, target_os = "wasi"))]
+#[test]
+fn test_raw_fd() {
+ #[cfg(unix)]
+ use crate::os::unix::io::{AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+ #[cfg(target_os = "wasi")]
+ use crate::os::wasi::io::{AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+
+ let raw_fd: RawFd = crate::io::stdin().as_raw_fd();
+
+ let stdin_as_file = unsafe { crate::fs::File::from_raw_fd(raw_fd) };
+ assert_eq!(stdin_as_file.as_raw_fd(), raw_fd);
+ assert_eq!(unsafe { BorrowedFd::borrow_raw(raw_fd).as_raw_fd() }, raw_fd);
+ assert_eq!(stdin_as_file.into_raw_fd(), 0);
+}
+
+#[cfg(any(unix, target_os = "wasi"))]
+#[test]
+fn test_fd() {
+ #[cfg(unix)]
+ use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+ #[cfg(target_os = "wasi")]
+ use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+
+ let stdin = crate::io::stdin();
+ let fd: BorrowedFd<'_> = stdin.as_fd();
+ let raw_fd: RawFd = fd.as_raw_fd();
+ let owned_fd: OwnedFd = unsafe { OwnedFd::from_raw_fd(raw_fd) };
+
+ let stdin_as_file = crate::fs::File::from(owned_fd);
+
+ assert_eq!(stdin_as_file.as_fd().as_raw_fd(), raw_fd);
+ assert_eq!(Into::<OwnedFd>::into(stdin_as_file).into_raw_fd(), raw_fd);
+}
+
+#[cfg(any(unix, target_os = "wasi"))]
+#[test]
+fn test_niche_optimizations() {
+ use crate::mem::size_of;
+ #[cfg(unix)]
+ use crate::os::unix::io::{BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+ #[cfg(target_os = "wasi")]
+ use crate::os::wasi::io::{BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+
+ assert_eq!(size_of::<Option<OwnedFd>>(), size_of::<RawFd>());
+ assert_eq!(size_of::<Option<BorrowedFd<'static>>>(), size_of::<RawFd>());
+ unsafe {
+ assert_eq!(OwnedFd::from_raw_fd(RawFd::MIN).into_raw_fd(), RawFd::MIN);
+ assert_eq!(OwnedFd::from_raw_fd(RawFd::MAX).into_raw_fd(), RawFd::MAX);
+ assert_eq!(Some(OwnedFd::from_raw_fd(RawFd::MIN)).unwrap().into_raw_fd(), RawFd::MIN);
+ assert_eq!(Some(OwnedFd::from_raw_fd(RawFd::MAX)).unwrap().into_raw_fd(), RawFd::MAX);
+ }
+}
diff --git a/library/std/src/os/fortanix_sgx/arch.rs b/library/std/src/os/fortanix_sgx/arch.rs
new file mode 100644
index 000000000..8358cb9e8
--- /dev/null
+++ b/library/std/src/os/fortanix_sgx/arch.rs
@@ -0,0 +1,80 @@
+//! SGX-specific access to architectural features.
+//!
+//! The functionality in this module is further documented in the Intel
+//! Software Developer's Manual, Volume 3, Chapter 40.
+#![unstable(feature = "sgx_platform", issue = "56975")]
+
+use crate::mem::MaybeUninit;
+use core::arch::asm;
+
+/// Wrapper struct to force 16-byte alignment.
+#[repr(align(16))]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct Align16<T>(pub T);
+
+/// Wrapper struct to force 128-byte alignment.
+#[repr(align(128))]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct Align128<T>(pub T);
+
+/// Wrapper struct to force 512-byte alignment.
+#[repr(align(512))]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct Align512<T>(pub T);
+
+const ENCLU_EREPORT: u32 = 0;
+const ENCLU_EGETKEY: u32 = 1;
+
+/// Call the `EGETKEY` instruction to obtain a 128-bit secret key.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn egetkey(request: &Align512<[u8; 512]>) -> Result<Align16<[u8; 16]>, u32> {
+ unsafe {
+ let mut out = MaybeUninit::uninit();
+ let error;
+
+ asm!(
+ // rbx is reserved by LLVM
+ "xchg %rbx, {0}",
+ "enclu",
+ "mov {0}, %rbx",
+ inout(reg) request => _,
+ inlateout("eax") ENCLU_EGETKEY => error,
+ in("rcx") out.as_mut_ptr(),
+ options(att_syntax, nostack),
+ );
+
+ match error {
+ 0 => Ok(out.assume_init()),
+ err => Err(err),
+ }
+ }
+}
+
+/// Call the `EREPORT` instruction.
+///
+/// This creates a cryptographic report describing the contents of the current
+/// enclave. The report may be verified by the enclave described in
+/// `targetinfo`.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn ereport(
+ targetinfo: &Align512<[u8; 512]>,
+ reportdata: &Align128<[u8; 64]>,
+) -> Align512<[u8; 432]> {
+ unsafe {
+ let mut report = MaybeUninit::uninit();
+
+ asm!(
+ // rbx is reserved by LLVM
+ "xchg %rbx, {0}",
+ "enclu",
+ "mov {0}, %rbx",
+ inout(reg) targetinfo => _,
+ in("eax") ENCLU_EREPORT,
+ in("rcx") reportdata,
+ in("rdx") report.as_mut_ptr(),
+ options(att_syntax, preserves_flags, nostack),
+ );
+
+ report.assume_init()
+ }
+}
diff --git a/library/std/src/os/fortanix_sgx/ffi.rs b/library/std/src/os/fortanix_sgx/ffi.rs
new file mode 100644
index 000000000..ac1db0e5e
--- /dev/null
+++ b/library/std/src/os/fortanix_sgx/ffi.rs
@@ -0,0 +1,41 @@
+//! SGX-specific extension to the primitives in the `std::ffi` module
+//!
+//! # Examples
+//!
+//! ```
+//! use std::ffi::OsString;
+//! use std::os::fortanix_sgx::ffi::OsStringExt;
+//!
+//! let bytes = b"foo".to_vec();
+//!
+//! // OsStringExt::from_vec
+//! let os_string = OsString::from_vec(bytes);
+//! assert_eq!(os_string.to_str(), Some("foo"));
+//!
+//! // OsStringExt::into_vec
+//! let bytes = os_string.into_vec();
+//! assert_eq!(bytes, b"foo");
+//! ```
+//!
+//! ```
+//! use std::ffi::OsStr;
+//! use std::os::fortanix_sgx::ffi::OsStrExt;
+//!
+//! let bytes = b"foo";
+//!
+//! // OsStrExt::from_bytes
+//! let os_str = OsStr::from_bytes(bytes);
+//! assert_eq!(os_str.to_str(), Some("foo"));
+//!
+//! // OsStrExt::as_bytes
+//! let bytes = os_str.as_bytes();
+//! assert_eq!(bytes, b"foo");
+//! ```
+
+#![unstable(feature = "sgx_platform", issue = "56975")]
+
+#[path = "../unix/ffi/os_str.rs"]
+mod os_str;
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub use self::os_str::{OsStrExt, OsStringExt};
diff --git a/library/std/src/os/fortanix_sgx/io.rs b/library/std/src/os/fortanix_sgx/io.rs
new file mode 100644
index 000000000..7223ade68
--- /dev/null
+++ b/library/std/src/os/fortanix_sgx/io.rs
@@ -0,0 +1,144 @@
+//! SGX-specific extensions to general I/O primitives
+//!
+//! SGX file descriptors behave differently from Unix file descriptors. See the
+//! description of [`TryIntoRawFd`] for more details.
+#![unstable(feature = "sgx_platform", issue = "56975")]
+
+use crate::net;
+pub use crate::sys::abi::usercalls::raw::Fd as RawFd;
+use crate::sys::{self, AsInner, FromInner, IntoInner, TryIntoInner};
+
+/// A trait to extract the raw SGX file descriptor from an underlying
+/// object.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait AsRawFd {
+ /// Extracts the raw file descriptor.
+ ///
+ /// This method does **not** pass ownership of the raw file descriptor
+ /// to the caller. The descriptor is only guaranteed to be valid while
+ /// the original object has not yet been destroyed.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ fn as_raw_fd(&self) -> RawFd;
+}
+
+/// A trait to express the ability to construct an object from a raw file
+/// descriptor.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait FromRawFd {
+ /// An associated type that contains relevant metadata for `Self`.
+ type Metadata: Default;
+
+ /// Constructs a new instance of `Self` from the given raw file
+ /// descriptor and metadata.
+ ///
+ /// This function **consumes ownership** of the specified file
+ /// descriptor. The returned object will take responsibility for closing
+ /// it when the object goes out of scope.
+ ///
+ /// This function is also unsafe as the primitives currently returned
+ /// have the contract that they are the sole owner of the file
+ /// descriptor they are wrapping. Usage of this function could
+ /// accidentally allow violating this contract which can cause memory
+ /// unsafety in code that relies on it being true.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ unsafe fn from_raw_fd(fd: RawFd, metadata: Self::Metadata) -> Self;
+}
+
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw file descriptor.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait TryIntoRawFd: Sized {
+ /// Consumes this object, returning the raw underlying file descriptor, if
+ /// this object is not cloned.
+ ///
+ /// This function **transfers ownership** of the underlying file descriptor
+ /// to the caller. Callers are then the unique owners of the file descriptor
+ /// and must close the descriptor once it's no longer needed.
+ ///
+ /// Unlike other platforms, on SGX, the file descriptor is shared between
+ /// all clones of an object. To avoid race conditions, this function will
+ /// only return `Ok` when called on the final clone.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ fn try_into_raw_fd(self) -> Result<RawFd, Self>;
+}
+
+impl AsRawFd for net::TcpStream {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ *self.as_inner().as_inner().as_inner().as_inner()
+ }
+}
+
+impl AsRawFd for net::TcpListener {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ *self.as_inner().as_inner().as_inner().as_inner()
+ }
+}
+
+/// Metadata for `TcpStream`.
+#[derive(Debug, Clone, Default)]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct TcpStreamMetadata {
+ /// Local address of the TCP stream
+ pub local_addr: Option<String>,
+ /// Peer address of the TCP stream
+ pub peer_addr: Option<String>,
+}
+
+impl FromRawFd for net::TcpStream {
+ type Metadata = TcpStreamMetadata;
+
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd, metadata: Self::Metadata) -> net::TcpStream {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let socket = sys::net::Socket::from_inner((fd, metadata.local_addr));
+ net::TcpStream::from_inner(sys::net::TcpStream::from_inner((socket, metadata.peer_addr)))
+ }
+}
+
+/// Metadata for `TcpListener`.
+#[derive(Debug, Clone, Default)]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct TcpListenerMetadata {
+ /// Local address of the TCP listener
+ pub local_addr: Option<String>,
+}
+
+impl FromRawFd for net::TcpListener {
+ type Metadata = TcpListenerMetadata;
+
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd, metadata: Self::Metadata) -> net::TcpListener {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let socket = sys::net::Socket::from_inner((fd, metadata.local_addr));
+ net::TcpListener::from_inner(sys::net::TcpListener::from_inner(socket))
+ }
+}
+
+impl TryIntoRawFd for net::TcpStream {
+ #[inline]
+ fn try_into_raw_fd(self) -> Result<RawFd, Self> {
+ let (socket, peer_addr) = self.into_inner().into_inner();
+ match socket.try_into_inner() {
+ Ok(fd) => Ok(fd.into_inner()),
+ Err(socket) => {
+ let sys = sys::net::TcpStream::from_inner((socket, peer_addr));
+ Err(net::TcpStream::from_inner(sys))
+ }
+ }
+ }
+}
+
+impl TryIntoRawFd for net::TcpListener {
+ #[inline]
+ fn try_into_raw_fd(self) -> Result<RawFd, Self> {
+ match self.into_inner().into_inner().try_into_inner() {
+ Ok(fd) => Ok(fd.into_inner()),
+ Err(socket) => {
+ let sys = sys::net::TcpListener::from_inner(socket);
+ Err(net::TcpListener::from_inner(sys))
+ }
+ }
+ }
+}
diff --git a/library/std/src/os/fortanix_sgx/mod.rs b/library/std/src/os/fortanix_sgx/mod.rs
new file mode 100644
index 000000000..a40dabe19
--- /dev/null
+++ b/library/std/src/os/fortanix_sgx/mod.rs
@@ -0,0 +1,53 @@
+//! Functionality specific to the `x86_64-fortanix-unknown-sgx` target.
+//!
+//! This includes functions to deal with memory isolation, usercalls, and the
+//! SGX instruction set.
+
+#![deny(missing_docs)]
+#![unstable(feature = "sgx_platform", issue = "56975")]
+
+/// Low-level interfaces to usercalls. See the [ABI documentation] for more
+/// information.
+///
+/// [ABI documentation]: https://docs.rs/fortanix-sgx-abi/
+pub mod usercalls {
+ pub use crate::sys::abi::usercalls::*;
+
+ /// Primitives for allocating memory in userspace as well as copying data
+ /// to and from user memory.
+ pub mod alloc {
+ pub use crate::sys::abi::usercalls::alloc::*;
+ }
+
+ /// Lowest-level interfaces to usercalls and usercall ABI type definitions.
+ pub mod raw {
+ pub use crate::sys::abi::usercalls::raw::{
+ accept_stream, alloc, async_queues, bind_stream, close, connect_stream, exit, flush,
+ free, insecure_time, launch_thread, read, read_alloc, send, wait, write,
+ };
+ pub use crate::sys::abi::usercalls::raw::{do_usercall, Usercalls as UsercallNrs};
+
+ // fortanix-sgx-abi re-exports
+ pub use crate::sys::abi::usercalls::raw::Error;
+ pub use crate::sys::abi::usercalls::raw::{ByteBuffer, FifoDescriptor, Return, Usercall};
+ pub use crate::sys::abi::usercalls::raw::{Fd, Result, Tcs};
+ pub use crate::sys::abi::usercalls::raw::{
+ EV_RETURNQ_NOT_EMPTY, EV_UNPARK, EV_USERCALLQ_NOT_FULL, FD_STDERR, FD_STDIN, FD_STDOUT,
+ RESULT_SUCCESS, USERCALL_USER_DEFINED, WAIT_INDEFINITE, WAIT_NO,
+ };
+ }
+}
+
+/// Functions for querying mapping information for pointers.
+pub mod mem {
+ pub use crate::sys::abi::mem::*;
+}
+
+pub mod arch;
+pub mod ffi;
+pub mod io;
+
+/// Functions for querying thread-related information.
+pub mod thread {
+ pub use crate::sys::abi::thread::current;
+}
diff --git a/library/std/src/os/freebsd/fs.rs b/library/std/src/os/freebsd/fs.rs
new file mode 100644
index 000000000..8db3a950c
--- /dev/null
+++ b/library/std/src/os/freebsd/fs.rs
@@ -0,0 +1,154 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::freebsd::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_lspare(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ // The methods below use libc::stat, so they work fine when libc is built with FreeBSD 12 ABI.
+ // This method would just return nonsense.
+ #[cfg(freebsd12)]
+ panic!("as_raw_stat not supported with FreeBSD 12 ABI");
+ #[cfg(not(freebsd12))]
+ unsafe {
+ &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat)
+ }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+ #[cfg(freebsd12)]
+ fn st_lspare(&self) -> u32 {
+ panic!("st_lspare not supported with FreeBSD 12 ABI");
+ }
+ #[cfg(not(freebsd12))]
+ fn st_lspare(&self) -> u32 {
+ self.as_inner().as_inner().st_lspare as u32
+ }
+}
diff --git a/library/std/src/os/freebsd/mod.rs b/library/std/src/os/freebsd/mod.rs
new file mode 100644
index 000000000..c072fae55
--- /dev/null
+++ b/library/std/src/os/freebsd/mod.rs
@@ -0,0 +1,6 @@
+//! FreeBSD-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/freebsd/raw.rs b/library/std/src/os/freebsd/raw.rs
new file mode 100644
index 000000000..ab0bf7923
--- /dev/null
+++ b/library/std/src/os/freebsd/raw.rs
@@ -0,0 +1,86 @@
+//! FreeBSD-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[cfg(target_arch = "x86")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [u8; 8],
+}
diff --git a/library/std/src/os/fuchsia/fs.rs b/library/std/src/os/fuchsia/fs.rs
new file mode 100644
index 000000000..b48a46f91
--- /dev/null
+++ b/library/std/src/os/fuchsia/fs.rs
@@ -0,0 +1,95 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/fuchsia/mod.rs b/library/std/src/os/fuchsia/mod.rs
new file mode 100644
index 000000000..cd1b8233e
--- /dev/null
+++ b/library/std/src/os/fuchsia/mod.rs
@@ -0,0 +1,6 @@
+//! Fuchsia-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/fuchsia/raw.rs b/library/std/src/os/fuchsia/raw.rs
new file mode 100644
index 000000000..060d6e86b
--- /dev/null
+++ b/library/std/src/os/fuchsia/raw.rs
@@ -0,0 +1,293 @@
+//! Fuchsia-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+
+#[cfg(any(
+ target_arch = "x86",
+ target_arch = "le32",
+ target_arch = "powerpc",
+ target_arch = "arm"
+))]
+mod arch {
+ use crate::os::raw::{c_long, c_short, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: c_short,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ }
+}
+
+#[cfg(target_arch = "mips")]
+mod arch {
+ use crate::os::raw::{c_long, c_ulong};
+
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad1: [c_long; 3],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad2: [c_long; 2],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad5: [c_long; 14],
+ }
+}
+
+#[cfg(target_arch = "mips64")]
+mod arch {
+ pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+}
+
+#[cfg(target_arch = "aarch64")]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_int; 2],
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_long; 3],
+ }
+}
diff --git a/library/std/src/os/haiku/fs.rs b/library/std/src/os/haiku/fs.rs
new file mode 100644
index 000000000..a23a2af8f
--- /dev/null
+++ b/library/std/src/os/haiku/fs.rs
@@ -0,0 +1,127 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::haiku::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_crtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_crtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_crtime(&self) -> i64 {
+ self.as_inner().as_inner().st_crtime as i64
+ }
+ fn st_crtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_crtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/haiku/mod.rs b/library/std/src/os/haiku/mod.rs
new file mode 100644
index 000000000..73f500cad
--- /dev/null
+++ b/library/std/src/os/haiku/mod.rs
@@ -0,0 +1,6 @@
+//! Haiku-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/haiku/raw.rs b/library/std/src/os/haiku/raw.rs
new file mode 100644
index 000000000..afbb66ccb
--- /dev/null
+++ b/library/std/src/os/haiku/raw.rs
@@ -0,0 +1,79 @@
+//! Haiku-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.53.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+// Use the direct definition of usize, instead of uintptr_t like in libc
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i32;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_crtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_crtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_type: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+}
diff --git a/library/std/src/os/hermit/ffi.rs b/library/std/src/os/hermit/ffi.rs
new file mode 100644
index 000000000..19761fd99
--- /dev/null
+++ b/library/std/src/os/hermit/ffi.rs
@@ -0,0 +1,41 @@
+//! HermitCore-specific extension to the primitives in the `std::ffi` module
+//!
+//! # Examples
+//!
+//! ```
+//! use std::ffi::OsString;
+//! use std::os::hermit::ffi::OsStringExt;
+//!
+//! let bytes = b"foo".to_vec();
+//!
+//! // OsStringExt::from_vec
+//! let os_string = OsString::from_vec(bytes);
+//! assert_eq!(os_string.to_str(), Some("foo"));
+//!
+//! // OsStringExt::into_vec
+//! let bytes = os_string.into_vec();
+//! assert_eq!(bytes, b"foo");
+//! ```
+//!
+//! ```
+//! use std::ffi::OsStr;
+//! use std::os::hermit::ffi::OsStrExt;
+//!
+//! let bytes = b"foo";
+//!
+//! // OsStrExt::from_bytes
+//! let os_str = OsStr::from_bytes(bytes);
+//! assert_eq!(os_str.to_str(), Some("foo"));
+//!
+//! // OsStrExt::as_bytes
+//! let bytes = os_str.as_bytes();
+//! assert_eq!(bytes, b"foo");
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[path = "../unix/ffi/os_str.rs"]
+mod os_str;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStrExt, OsStringExt};
diff --git a/library/std/src/os/hermit/mod.rs b/library/std/src/os/hermit/mod.rs
new file mode 100644
index 000000000..4657b545a
--- /dev/null
+++ b/library/std/src/os/hermit/mod.rs
@@ -0,0 +1,13 @@
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod ffi;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+}
diff --git a/library/std/src/os/horizon/fs.rs b/library/std/src/os/horizon/fs.rs
new file mode 100644
index 000000000..132552210
--- /dev/null
+++ b/library/std/src/os/horizon/fs.rs
@@ -0,0 +1,95 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atim.tv_sec
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atim.tv_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtim.tv_sec
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtim.tv_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctim.tv_sec
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctim.tv_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/horizon/mod.rs b/library/std/src/os/horizon/mod.rs
new file mode 100644
index 000000000..326d0ae9c
--- /dev/null
+++ b/library/std/src/os/horizon/mod.rs
@@ -0,0 +1,6 @@
+//! Definitions for Horizon OS
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub(crate) mod raw;
diff --git a/library/std/src/os/horizon/raw.rs b/library/std/src/os/horizon/raw.rs
new file mode 100644
index 000000000..929fa7db1
--- /dev/null
+++ b/library/std/src/os/horizon/raw.rs
@@ -0,0 +1,70 @@
+//! Horizon OS raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = libc::pthread_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = libc::blkcnt_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = libc::blksize_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = libc::dev_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = libc::ino_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = libc::mode_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = libc::nlink_t;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = libc::off_t;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = libc::time_t;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_spare4: [c_long; 2usize],
+}
diff --git a/library/std/src/os/illumos/fs.rs b/library/std/src/os/illumos/fs.rs
new file mode 100644
index 000000000..63be48b81
--- /dev/null
+++ b/library/std/src/os/illumos/fs.rs
@@ -0,0 +1,116 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::illumos::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/illumos/mod.rs b/library/std/src/os/illumos/mod.rs
new file mode 100644
index 000000000..e61926f89
--- /dev/null
+++ b/library/std/src/os/illumos/mod.rs
@@ -0,0 +1,6 @@
+//! illumos-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/illumos/raw.rs b/library/std/src/os/illumos/raw.rs
new file mode 100644
index 000000000..2bea9ebb3
--- /dev/null
+++ b/library/std/src/os/illumos/raw.rs
@@ -0,0 +1,74 @@
+//! illumos-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by the standard library, the `libc` \
+ crate on crates.io should be used instead for the correct definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = u32;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [u8; 16],
+}
diff --git a/library/std/src/os/ios/fs.rs b/library/std/src/os/ios/fs.rs
new file mode 100644
index 000000000..4a4637ce0
--- /dev/null
+++ b/library/std/src/os/ios/fs.rs
@@ -0,0 +1,142 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::ios::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_lspare(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+ fn st_lspare(&self) -> u32 {
+ self.as_inner().as_inner().st_lspare as u32
+ }
+}
diff --git a/library/std/src/os/ios/mod.rs b/library/std/src/os/ios/mod.rs
new file mode 100644
index 000000000..fdefa1f6b
--- /dev/null
+++ b/library/std/src/os/ios/mod.rs
@@ -0,0 +1,6 @@
+//! iOS-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/ios/raw.rs b/library/std/src/os/ios/raw.rs
new file mode 100644
index 000000000..af12aeebe
--- /dev/null
+++ b/library/std/src/os/ios/raw.rs
@@ -0,0 +1,83 @@
+//! iOS-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_qspare: [i64; 2],
+}
diff --git a/library/std/src/os/l4re/fs.rs b/library/std/src/os/l4re/fs.rs
new file mode 100644
index 000000000..6d6a535b1
--- /dev/null
+++ b/library/std/src/os/l4re/fs.rs
@@ -0,0 +1,382 @@
+//! L4Re-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::l4re::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned [`stat`] are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ ///
+ /// [`stat`]: struct@crate::os::linux::raw::stat
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let stat = meta.as_raw_stat();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(since = "1.8.0", note = "other methods of this trait are now preferred")]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/l4re/mod.rs b/library/std/src/os/l4re/mod.rs
new file mode 100644
index 000000000..14c2425c1
--- /dev/null
+++ b/library/std/src/os/l4re/mod.rs
@@ -0,0 +1,7 @@
+//! L4Re-specific definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![doc(cfg(target_os = "l4re"))]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/l4re/raw.rs b/library/std/src/os/l4re/raw.rs
new file mode 100644
index 000000000..699e8be33
--- /dev/null
+++ b/library/std/src/os/l4re/raw.rs
@@ -0,0 +1,365 @@
+//! L4Re-specific raw type definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+
+#[cfg(any(
+ target_arch = "x86",
+ target_arch = "le32",
+ target_arch = "m68k",
+ target_arch = "powerpc",
+ target_arch = "sparc",
+ target_arch = "arm",
+ target_arch = "asmjs",
+ target_arch = "wasm32"
+))]
+mod arch {
+ use crate::os::raw::{c_long, c_short, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: c_short,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ }
+}
+
+#[cfg(target_arch = "mips")]
+mod arch {
+ use crate::os::raw::{c_long, c_ulong};
+
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad1: [c_long; 3],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad2: [c_long; 2],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad5: [c_long; 14],
+ }
+}
+
+#[cfg(target_arch = "hexagon")]
+mod arch {
+ use crate::os::raw::{c_int, c_long, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = c_long;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = c_uint;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad3: [c_int; 2],
+ }
+}
+
+#[cfg(any(
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64",
+ target_arch = "riscv64",
+ target_arch = "riscv32"
+))]
+mod arch {
+ pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+}
+
+#[cfg(target_arch = "aarch64")]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = i32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = c_long;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_int; 2],
+ }
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_long; 3],
+ }
+}
diff --git a/library/std/src/os/linux/fs.rs b/library/std/src/os/linux/fs.rs
new file mode 100644
index 000000000..479bbcc17
--- /dev/null
+++ b/library/std/src/os/linux/fs.rs
@@ -0,0 +1,397 @@
+//! Linux-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::linux::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned [`stat`] are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ ///
+ /// [`stat`]: struct@crate::os::linux::raw::stat
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let stat = meta.as_raw_stat();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(since = "1.8.0", note = "other methods of this trait are now preferred")]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ let file_attr = self.as_inner();
+ #[cfg(all(target_env = "gnu", target_pointer_width = "32"))]
+ if let Some(atime) = file_attr.stx_atime() {
+ return atime.tv_sec;
+ }
+ file_attr.as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ let file_attr = self.as_inner();
+ #[cfg(all(target_env = "gnu", target_pointer_width = "32"))]
+ if let Some(mtime) = file_attr.stx_mtime() {
+ return mtime.tv_sec;
+ }
+ file_attr.as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ let file_attr = self.as_inner();
+ #[cfg(all(target_env = "gnu", target_pointer_width = "32"))]
+ if let Some(ctime) = file_attr.stx_ctime() {
+ return ctime.tv_sec;
+ }
+ file_attr.as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/linux/mod.rs b/library/std/src/os/linux/mod.rs
new file mode 100644
index 000000000..8e7776f66
--- /dev/null
+++ b/library/std/src/os/linux/mod.rs
@@ -0,0 +1,8 @@
+//! Linux-specific definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![doc(cfg(target_os = "linux"))]
+
+pub mod fs;
+pub mod process;
+pub mod raw;
diff --git a/library/std/src/os/linux/process.rs b/library/std/src/os/linux/process.rs
new file mode 100644
index 000000000..540363c03
--- /dev/null
+++ b/library/std/src/os/linux/process.rs
@@ -0,0 +1,165 @@
+//! Linux-specific extensions to primitives in the [`std::process`] module.
+//!
+//! [`std::process`]: crate::process
+
+#![unstable(feature = "linux_pidfd", issue = "82971")]
+
+use crate::io::Result;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::process;
+use crate::sealed::Sealed;
+#[cfg(not(doc))]
+use crate::sys::fd::FileDesc;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+#[cfg(doc)]
+struct FileDesc;
+
+/// This type represents a file descriptor that refers to a process.
+///
+/// A `PidFd` can be obtained by setting the corresponding option on [`Command`]
+/// with [`create_pidfd`]. Subsequently, the created pidfd can be retrieved
+/// from the [`Child`] by calling [`pidfd`] or [`take_pidfd`].
+///
+/// Example:
+/// ```no_run
+/// #![feature(linux_pidfd)]
+/// use std::os::linux::process::{CommandExt, ChildExt};
+/// use std::process::Command;
+///
+/// let mut child = Command::new("echo")
+/// .create_pidfd(true)
+/// .spawn()
+/// .expect("Failed to spawn child");
+///
+/// let pidfd = child
+/// .take_pidfd()
+/// .expect("Failed to retrieve pidfd");
+///
+/// // The file descriptor will be closed when `pidfd` is dropped.
+/// ```
+/// Refer to the man page of [`pidfd_open(2)`] for further details.
+///
+/// [`Command`]: process::Command
+/// [`create_pidfd`]: CommandExt::create_pidfd
+/// [`Child`]: process::Child
+/// [`pidfd`]: fn@ChildExt::pidfd
+/// [`take_pidfd`]: ChildExt::take_pidfd
+/// [`pidfd_open(2)`]: https://man7.org/linux/man-pages/man2/pidfd_open.2.html
+#[derive(Debug)]
+pub struct PidFd {
+ inner: FileDesc,
+}
+
+impl AsInner<FileDesc> for PidFd {
+ fn as_inner(&self) -> &FileDesc {
+ &self.inner
+ }
+}
+
+impl FromInner<FileDesc> for PidFd {
+ fn from_inner(inner: FileDesc) -> PidFd {
+ PidFd { inner }
+ }
+}
+
+impl IntoInner<FileDesc> for PidFd {
+ fn into_inner(self) -> FileDesc {
+ self.inner
+ }
+}
+
+impl AsRawFd for PidFd {
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().as_raw_fd()
+ }
+}
+
+impl FromRawFd for PidFd {
+ unsafe fn from_raw_fd(fd: RawFd) -> Self {
+ Self::from_inner(FileDesc::from_raw_fd(fd))
+ }
+}
+
+impl IntoRawFd for PidFd {
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_raw_fd()
+ }
+}
+
+impl AsFd for PidFd {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().as_fd()
+ }
+}
+
+impl From<OwnedFd> for PidFd {
+ fn from(fd: OwnedFd) -> Self {
+ Self::from_inner(FileDesc::from_inner(fd))
+ }
+}
+
+impl From<PidFd> for OwnedFd {
+ fn from(pid_fd: PidFd) -> Self {
+ pid_fd.into_inner().into_inner()
+ }
+}
+
+/// Os-specific extensions for [`Child`]
+///
+/// [`Child`]: process::Child
+pub trait ChildExt: Sealed {
+ /// Obtains a reference to the [`PidFd`] created for this [`Child`], if available.
+ ///
+ /// A pidfd will only be available if its creation was requested with
+ /// [`create_pidfd`] when the corresponding [`Command`] was created.
+ ///
+ /// Even if requested, a pidfd may not be available due to an older
+ /// version of Linux being in use, or if some other error occurred.
+ ///
+ /// [`Command`]: process::Command
+ /// [`create_pidfd`]: CommandExt::create_pidfd
+ /// [`Child`]: process::Child
+ fn pidfd(&self) -> Result<&PidFd>;
+
+ /// Takes ownership of the [`PidFd`] created for this [`Child`], if available.
+ ///
+ /// A pidfd will only be available if its creation was requested with
+ /// [`create_pidfd`] when the corresponding [`Command`] was created.
+ ///
+ /// Even if requested, a pidfd may not be available due to an older
+ /// version of Linux being in use, or if some other error occurred.
+ ///
+ /// [`Command`]: process::Command
+ /// [`create_pidfd`]: CommandExt::create_pidfd
+ /// [`Child`]: process::Child
+ fn take_pidfd(&mut self) -> Result<PidFd>;
+}
+
+/// Os-specific extensions for [`Command`]
+///
+/// [`Command`]: process::Command
+pub trait CommandExt: Sealed {
+ /// Sets whether a [`PidFd`](struct@PidFd) should be created for the [`Child`]
+ /// spawned by this [`Command`].
+ /// By default, no pidfd will be created.
+ ///
+ /// The pidfd can be retrieved from the child with [`pidfd`] or [`take_pidfd`].
+ ///
+ /// A pidfd will only be created if it is possible to do so
+ /// in a guaranteed race-free manner (e.g. if the `clone3` system call
+ /// is supported). Otherwise, [`pidfd`] will return an error.
+ ///
+ /// [`Command`]: process::Command
+ /// [`Child`]: process::Child
+ /// [`pidfd`]: fn@ChildExt::pidfd
+ /// [`take_pidfd`]: ChildExt::take_pidfd
+ fn create_pidfd(&mut self, val: bool) -> &mut process::Command;
+}
+
+impl CommandExt for process::Command {
+ fn create_pidfd(&mut self, val: bool) -> &mut process::Command {
+ self.as_inner_mut().create_pidfd(val);
+ self
+ }
+}
diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs
new file mode 100644
index 000000000..c73791d14
--- /dev/null
+++ b/library/std/src/os/linux/raw.rs
@@ -0,0 +1,366 @@
+//! Linux-specific raw type definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+
+#[cfg(any(
+ target_arch = "x86",
+ target_arch = "le32",
+ target_arch = "m68k",
+ target_arch = "powerpc",
+ target_arch = "sparc",
+ target_arch = "arm",
+ target_arch = "asmjs",
+ target_arch = "wasm32"
+))]
+mod arch {
+ use crate::os::raw::{c_long, c_short, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: c_short,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ }
+}
+
+#[cfg(target_arch = "mips")]
+mod arch {
+ use crate::os::raw::{c_long, c_ulong};
+
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad1: [c_long; 3],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad2: [c_long; 2],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad5: [c_long; 14],
+ }
+}
+
+#[cfg(target_arch = "hexagon")]
+mod arch {
+ use crate::os::raw::{c_int, c_long, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = c_long;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = c_uint;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad3: [c_int; 2],
+ }
+}
+
+#[cfg(any(
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64",
+ target_arch = "riscv64",
+ target_arch = "riscv32"
+))]
+mod arch {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+}
+
+#[cfg(target_arch = "aarch64")]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = i32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = c_long;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_int; 2],
+ }
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_long; 3],
+ }
+}
diff --git a/library/std/src/os/macos/fs.rs b/library/std/src/os/macos/fs.rs
new file mode 100644
index 000000000..91915da6a
--- /dev/null
+++ b/library/std/src/os/macos/fs.rs
@@ -0,0 +1,148 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::macos::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_lspare(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_qspare(&self) -> [u64; 2];
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+ fn st_lspare(&self) -> u32 {
+ self.as_inner().as_inner().st_lspare as u32
+ }
+ fn st_qspare(&self) -> [u64; 2] {
+ let qspare = self.as_inner().as_inner().st_qspare;
+ [qspare[0] as u64, qspare[1] as u64]
+ }
+}
diff --git a/library/std/src/os/macos/mod.rs b/library/std/src/os/macos/mod.rs
new file mode 100644
index 000000000..791d703b1
--- /dev/null
+++ b/library/std/src/os/macos/mod.rs
@@ -0,0 +1,6 @@
+//! macOS-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/macos/raw.rs b/library/std/src/os/macos/raw.rs
new file mode 100644
index 000000000..0b21f6ee5
--- /dev/null
+++ b/library/std/src/os/macos/raw.rs
@@ -0,0 +1,83 @@
+//! macOS-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_qspare: [i64; 2],
+}
diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs
new file mode 100644
index 000000000..6fbaa42c7
--- /dev/null
+++ b/library/std/src/os/mod.rs
@@ -0,0 +1,150 @@
+//! OS-specific functionality.
+
+#![stable(feature = "os", since = "1.0.0")]
+#![allow(missing_docs, nonstandard_style, missing_debug_implementations)]
+
+pub mod raw;
+
+// The code below could be written clearer using `cfg_if!`. However, the items below are
+// publicly exported by `std` and external tools can have trouble analysing them because of the use
+// of a macro that is not vendored by Rust and included in the toolchain.
+// See https://github.com/rust-analyzer/rust-analyzer/issues/6038.
+
+// On certain platforms right now the "main modules" modules that are
+// documented don't compile (missing things in `libc` which is empty),
+// so just omit them with an empty module and add the "unstable" attribute.
+
+// Unix, linux, wasi and windows are handled a bit differently.
+#[cfg(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+))]
+#[unstable(issue = "none", feature = "std_internals")]
+pub mod unix {}
+#[cfg(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+))]
+#[unstable(issue = "none", feature = "std_internals")]
+pub mod linux {}
+#[cfg(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+))]
+#[unstable(issue = "none", feature = "std_internals")]
+pub mod wasi {}
+#[cfg(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+))]
+#[unstable(issue = "none", feature = "std_internals")]
+pub mod windows {}
+
+// unix
+#[cfg(not(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+)))]
+#[cfg(target_os = "hermit")]
+#[path = "hermit/mod.rs"]
+pub mod unix;
+#[cfg(not(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+)))]
+#[cfg(all(not(target_os = "hermit"), any(unix, doc)))]
+pub mod unix;
+
+// linux
+#[cfg(not(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+)))]
+#[cfg(any(target_os = "linux", doc))]
+pub mod linux;
+
+// wasi
+#[cfg(not(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+)))]
+#[cfg(any(target_os = "wasi", doc))]
+pub mod wasi;
+
+// windows
+#[cfg(not(all(
+ doc,
+ any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+ )
+)))]
+#[cfg(any(windows, doc))]
+pub mod windows;
+
+// Others.
+#[cfg(target_os = "android")]
+pub mod android;
+#[cfg(target_os = "dragonfly")]
+pub mod dragonfly;
+#[cfg(target_os = "emscripten")]
+pub mod emscripten;
+#[cfg(target_os = "espidf")]
+pub mod espidf;
+#[cfg(all(target_vendor = "fortanix", target_env = "sgx"))]
+pub mod fortanix_sgx;
+#[cfg(target_os = "freebsd")]
+pub mod freebsd;
+#[cfg(target_os = "fuchsia")]
+pub mod fuchsia;
+#[cfg(target_os = "haiku")]
+pub mod haiku;
+#[cfg(target_os = "horizon")]
+pub mod horizon;
+#[cfg(target_os = "illumos")]
+pub mod illumos;
+#[cfg(target_os = "ios")]
+pub mod ios;
+#[cfg(target_os = "l4re")]
+pub mod l4re;
+#[cfg(target_os = "macos")]
+pub mod macos;
+#[cfg(target_os = "netbsd")]
+pub mod netbsd;
+#[cfg(target_os = "openbsd")]
+pub mod openbsd;
+#[cfg(target_os = "redox")]
+pub mod redox;
+#[cfg(target_os = "solaris")]
+pub mod solaris;
+#[cfg(target_os = "solid_asp3")]
+pub mod solid;
+#[cfg(target_os = "vxworks")]
+pub mod vxworks;
+
+#[cfg(any(unix, target_os = "wasi", doc))]
+mod fd;
diff --git a/library/std/src/os/netbsd/fs.rs b/library/std/src/os/netbsd/fs.rs
new file mode 100644
index 000000000..fe0be069e
--- /dev/null
+++ b/library/std/src/os/netbsd/fs.rs
@@ -0,0 +1,137 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::netbsd::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atimensec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtimensec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctimensec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtimensec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+}
diff --git a/library/std/src/os/netbsd/mod.rs b/library/std/src/os/netbsd/mod.rs
new file mode 100644
index 000000000..497a51a1d
--- /dev/null
+++ b/library/std/src/os/netbsd/mod.rs
@@ -0,0 +1,6 @@
+//! OpenBSD-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/netbsd/raw.rs b/library/std/src/os/netbsd/raw.rs
new file mode 100644
index 000000000..18057291f
--- /dev/null
+++ b/library/std/src/os/netbsd/raw.rs
@@ -0,0 +1,83 @@
+//! NetBSD-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ st_spare: [u32; 2],
+}
diff --git a/library/std/src/os/openbsd/fs.rs b/library/std/src/os/openbsd/fs.rs
new file mode 100644
index 000000000..b8d8d31c5
--- /dev/null
+++ b/library/std/src/os/openbsd/fs.rs
@@ -0,0 +1,137 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::openbsd::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_birthtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_flags(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gen(&self) -> u32;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_birthtime(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime as i64
+ }
+ fn st_birthtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_birthtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_gen(&self) -> u32 {
+ self.as_inner().as_inner().st_gen as u32
+ }
+ fn st_flags(&self) -> u32 {
+ self.as_inner().as_inner().st_flags as u32
+ }
+}
diff --git a/library/std/src/os/openbsd/mod.rs b/library/std/src/os/openbsd/mod.rs
new file mode 100644
index 000000000..497a51a1d
--- /dev/null
+++ b/library/std/src/os/openbsd/mod.rs
@@ -0,0 +1,6 @@
+//! OpenBSD-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/openbsd/raw.rs b/library/std/src/os/openbsd/raw.rs
new file mode 100644
index 000000000..6711fb51b
--- /dev/null
+++ b/library/std/src/os/openbsd/raw.rs
@@ -0,0 +1,81 @@
+//! OpenBSD-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = usize;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+}
diff --git a/library/std/src/os/raw/mod.rs b/library/std/src/os/raw/mod.rs
new file mode 100644
index 000000000..19d0ffb2e
--- /dev/null
+++ b/library/std/src/os/raw/mod.rs
@@ -0,0 +1,31 @@
+//! Compatibility module for C platform-specific types. Use [`core::ffi`] instead.
+
+#![stable(feature = "raw_os", since = "1.1.0")]
+
+#[cfg(test)]
+mod tests;
+
+macro_rules! alias_core_ffi {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "raw_os", since = "1.1.0")]
+ #[doc = include_str!(concat!("../../../../core/src/ffi/", stringify!($t), ".md"))]
+ // Make this type alias appear cfg-dependent so that Clippy does not suggest
+ // replacing expressions like `0 as c_char` with `0_i8`/`0_u8`. This #[cfg(all())] can be
+ // removed after the false positive in https://github.com/rust-lang/rust-clippy/issues/8093
+ // is fixed.
+ #[cfg(all())]
+ #[doc(cfg(all()))]
+ pub type $t = core::ffi::$t;
+ )*}
+}
+
+alias_core_ffi! {
+ c_char c_schar c_uchar
+ c_short c_ushort
+ c_int c_uint
+ c_long c_ulong
+ c_longlong c_ulonglong
+ c_float
+ c_double
+ c_void
+}
diff --git a/library/std/src/os/raw/tests.rs b/library/std/src/os/raw/tests.rs
new file mode 100644
index 000000000..e7bb7d7e7
--- /dev/null
+++ b/library/std/src/os/raw/tests.rs
@@ -0,0 +1,15 @@
+use crate::any::TypeId;
+
+macro_rules! ok {
+ ($($t:ident)*) => {$(
+ assert!(TypeId::of::<libc::$t>() == TypeId::of::<raw::$t>(),
+ "{} is wrong", stringify!($t));
+ )*}
+}
+
+#[test]
+fn same() {
+ use crate::os::raw;
+ ok!(c_char c_schar c_uchar c_short c_ushort c_int c_uint c_long c_ulong
+ c_longlong c_ulonglong c_float c_double);
+}
diff --git a/library/std/src/os/redox/fs.rs b/library/std/src/os/redox/fs.rs
new file mode 100644
index 000000000..682ca6a2c
--- /dev/null
+++ b/library/std/src/os/redox/fs.rs
@@ -0,0 +1,382 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::redox::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned [`stat`] are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ ///
+ /// [`stat`]: crate::os::redox::raw::stat
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let stat = meta.as_raw_stat();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::redox::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/redox/mod.rs b/library/std/src/os/redox/mod.rs
new file mode 100644
index 000000000..d786759c6
--- /dev/null
+++ b/library/std/src/os/redox/mod.rs
@@ -0,0 +1,6 @@
+//! Redox-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/redox/raw.rs b/library/std/src/os/redox/raw.rs
new file mode 100644
index 000000000..7b1cd8ae8
--- /dev/null
+++ b/library/std/src/os/redox/raw.rs
@@ -0,0 +1,78 @@
+//! Redox-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::{c_char, c_int, c_long, c_ulong, c_void};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = c_long;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type gid_t = c_int;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = c_int;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type uid_t = c_int;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = *mut c_void;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = c_ulong;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = c_long;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = c_long;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub _pad: [c_char; 24],
+}
diff --git a/library/std/src/os/solaris/fs.rs b/library/std/src/os/solaris/fs.rs
new file mode 100644
index 000000000..093143737
--- /dev/null
+++ b/library/std/src/os/solaris/fs.rs
@@ -0,0 +1,117 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::solaris::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[deprecated(
+ since = "1.8.0",
+ note = "deprecated in favor of the accessor \
+ methods of this trait"
+ )]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
diff --git a/library/std/src/os/solaris/mod.rs b/library/std/src/os/solaris/mod.rs
new file mode 100644
index 000000000..e4cfd5329
--- /dev/null
+++ b/library/std/src/os/solaris/mod.rs
@@ -0,0 +1,6 @@
+//! Solaris-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/solaris/raw.rs b/library/std/src/os/solaris/raw.rs
new file mode 100644
index 000000000..63426c969
--- /dev/null
+++ b/library/std/src/os/solaris/raw.rs
@@ -0,0 +1,76 @@
+//! Solaris-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_long;
+use crate::os::unix::raw::{gid_t, uid_t};
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type blksize_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type ino_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type nlink_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type time_t = i64;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = u32;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [u8; 16],
+}
diff --git a/library/std/src/os/solid/ffi.rs b/library/std/src/os/solid/ffi.rs
new file mode 100644
index 000000000..aaa2070a6
--- /dev/null
+++ b/library/std/src/os/solid/ffi.rs
@@ -0,0 +1,41 @@
+//! SOLID-specific extension to the primitives in the `std::ffi` module
+//!
+//! # Examples
+//!
+//! ```
+//! use std::ffi::OsString;
+//! use std::os::solid::ffi::OsStringExt;
+//!
+//! let bytes = b"foo".to_vec();
+//!
+//! // OsStringExt::from_vec
+//! let os_string = OsString::from_vec(bytes);
+//! assert_eq!(os_string.to_str(), Some("foo"));
+//!
+//! // OsStringExt::into_vec
+//! let bytes = os_string.into_vec();
+//! assert_eq!(bytes, b"foo");
+//! ```
+//!
+//! ```
+//! use std::ffi::OsStr;
+//! use std::os::solid::ffi::OsStrExt;
+//!
+//! let bytes = b"foo";
+//!
+//! // OsStrExt::from_bytes
+//! let os_str = OsStr::from_bytes(bytes);
+//! assert_eq!(os_str.to_str(), Some("foo"));
+//!
+//! // OsStrExt::as_bytes
+//! let bytes = os_str.as_bytes();
+//! assert_eq!(bytes, b"foo");
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[path = "../unix/ffi/os_str.rs"]
+mod os_str;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStrExt, OsStringExt};
diff --git a/library/std/src/os/solid/io.rs b/library/std/src/os/solid/io.rs
new file mode 100644
index 000000000..33cc5a015
--- /dev/null
+++ b/library/std/src/os/solid/io.rs
@@ -0,0 +1,113 @@
+//! SOLID-specific extensions to general I/O primitives
+
+#![deny(unsafe_op_in_unsafe_fn)]
+#![unstable(feature = "solid_ext", issue = "none")]
+
+use crate::net;
+use crate::sys;
+use crate::sys_common::{self, AsInner, FromInner, IntoInner};
+
+/// Raw file descriptors.
+pub type RawFd = i32;
+
+/// A trait to extract the raw SOLID Sockets file descriptor from an underlying
+/// object.
+pub trait AsRawFd {
+ /// Extracts the raw file descriptor.
+ ///
+ /// This method does **not** pass ownership of the raw file descriptor
+ /// to the caller. The descriptor is only guaranteed to be valid while
+ /// the original object has not yet been destroyed.
+ fn as_raw_fd(&self) -> RawFd;
+}
+
+/// A trait to express the ability to construct an object from a raw file
+/// descriptor.
+pub trait FromRawFd {
+ /// Constructs a new instance of `Self` from the given raw file
+ /// descriptor.
+ ///
+ /// This function **consumes ownership** of the specified file
+ /// descriptor. The returned object will take responsibility for closing
+ /// it when the object goes out of scope.
+ ///
+ /// This function is also unsafe as the primitives currently returned
+ /// have the contract that they are the sole owner of the file
+ /// descriptor they are wrapping. Usage of this function could
+ /// accidentally allow violating this contract which can cause memory
+ /// unsafety in code that relies on it being true.
+ unsafe fn from_raw_fd(fd: RawFd) -> Self;
+}
+
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw file descriptor.
+pub trait IntoRawFd {
+ /// Consumes this object, returning the raw underlying file descriptor.
+ ///
+ /// This function **transfers ownership** of the underlying file descriptor
+ /// to the caller. Callers are then the unique owners of the file descriptor
+ /// and must close the descriptor once it's no longer needed.
+ fn into_raw_fd(self) -> RawFd;
+}
+
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl AsRawFd for RawFd {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ *self
+ }
+}
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl IntoRawFd for RawFd {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self
+ }
+}
+#[stable(feature = "raw_fd_reflexive_traits", since = "1.48.0")]
+impl FromRawFd for RawFd {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> RawFd {
+ fd
+ }
+}
+
+macro_rules! impl_as_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl AsRawFd for net::$t {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ *self.as_inner().socket().as_inner()
+ }
+ }
+ )*};
+}
+impl_as_raw_fd! { TcpStream TcpListener UdpSocket }
+
+macro_rules! impl_from_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "from_raw_os", since = "1.1.0")]
+ impl FromRawFd for net::$t {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> net::$t {
+ let socket = sys::net::Socket::from_inner(fd);
+ net::$t::from_inner(sys_common::net::$t::from_inner(socket))
+ }
+ }
+ )*};
+}
+impl_from_raw_fd! { TcpStream TcpListener UdpSocket }
+
+macro_rules! impl_into_raw_fd {
+ ($($t:ident)*) => {$(
+ #[stable(feature = "into_raw_os", since = "1.4.0")]
+ impl IntoRawFd for net::$t {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_socket().into_inner()
+ }
+ }
+ )*};
+}
+impl_into_raw_fd! { TcpStream TcpListener UdpSocket }
diff --git a/library/std/src/os/solid/mod.rs b/library/std/src/os/solid/mod.rs
new file mode 100644
index 000000000..4328ba7c3
--- /dev/null
+++ b/library/std/src/os/solid/mod.rs
@@ -0,0 +1,17 @@
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod ffi;
+pub mod io;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
+}
diff --git a/library/std/src/os/unix/ffi/mod.rs b/library/std/src/os/unix/ffi/mod.rs
new file mode 100644
index 000000000..5b49f5076
--- /dev/null
+++ b/library/std/src/os/unix/ffi/mod.rs
@@ -0,0 +1,42 @@
+//! Unix-specific extensions to primitives in the [`std::ffi`] module.
+//!
+//! # Examples
+//!
+//! ```
+//! use std::ffi::OsString;
+//! use std::os::unix::ffi::OsStringExt;
+//!
+//! let bytes = b"foo".to_vec();
+//!
+//! // OsStringExt::from_vec
+//! let os_string = OsString::from_vec(bytes);
+//! assert_eq!(os_string.to_str(), Some("foo"));
+//!
+//! // OsStringExt::into_vec
+//! let bytes = os_string.into_vec();
+//! assert_eq!(bytes, b"foo");
+//! ```
+//!
+//! ```
+//! use std::ffi::OsStr;
+//! use std::os::unix::ffi::OsStrExt;
+//!
+//! let bytes = b"foo";
+//!
+//! // OsStrExt::from_bytes
+//! let os_str = OsStr::from_bytes(bytes);
+//! assert_eq!(os_str.to_str(), Some("foo"));
+//!
+//! // OsStrExt::as_bytes
+//! let bytes = os_str.as_bytes();
+//! assert_eq!(bytes, b"foo");
+//! ```
+//!
+//! [`std::ffi`]: crate::ffi
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod os_str;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStrExt, OsStringExt};
diff --git a/library/std/src/os/unix/ffi/os_str.rs b/library/std/src/os/unix/ffi/os_str.rs
new file mode 100644
index 000000000..650f712bc
--- /dev/null
+++ b/library/std/src/os/unix/ffi/os_str.rs
@@ -0,0 +1,70 @@
+use crate::ffi::{OsStr, OsString};
+use crate::mem;
+use crate::sealed::Sealed;
+use crate::sys::os_str::Buf;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+// Note: this file is currently reused in other `std::os::{platform}::ffi` modules to reduce duplication.
+// Keep this in mind when applying changes to this file that only apply to `unix`.
+
+/// Platform-specific extensions to [`OsString`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait OsStringExt: Sealed {
+ /// Creates an [`OsString`] from a byte vector.
+ ///
+ /// See the module documentation for an example.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_vec(vec: Vec<u8>) -> Self;
+
+ /// Yields the underlying byte vector of this [`OsString`].
+ ///
+ /// See the module documentation for an example.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn into_vec(self) -> Vec<u8>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl OsStringExt for OsString {
+ #[inline]
+ fn from_vec(vec: Vec<u8>) -> OsString {
+ FromInner::from_inner(Buf { inner: vec })
+ }
+ #[inline]
+ fn into_vec(self) -> Vec<u8> {
+ self.into_inner().inner
+ }
+}
+
+/// Platform-specific extensions to [`OsStr`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait OsStrExt: Sealed {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ /// Creates an [`OsStr`] from a byte slice.
+ ///
+ /// See the module documentation for an example.
+ fn from_bytes(slice: &[u8]) -> &Self;
+
+ /// Gets the underlying byte view of the [`OsStr`] slice.
+ ///
+ /// See the module documentation for an example.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_bytes(&self) -> &[u8];
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl OsStrExt for OsStr {
+ #[inline]
+ fn from_bytes(slice: &[u8]) -> &OsStr {
+ unsafe { mem::transmute(slice) }
+ }
+ #[inline]
+ fn as_bytes(&self) -> &[u8] {
+ &self.as_inner().inner
+ }
+}
diff --git a/library/std/src/os/unix/fs.rs b/library/std/src/os/unix/fs.rs
new file mode 100644
index 000000000..3fc6cc44c
--- /dev/null
+++ b/library/std/src/os/unix/fs.rs
@@ -0,0 +1,1022 @@
+//! Unix-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use super::platform::fs::MetadataExt as _;
+use crate::fs::{self, OpenOptions, Permissions};
+use crate::io;
+use crate::os::unix::io::{AsFd, AsRawFd};
+use crate::path::Path;
+use crate::sys;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner};
+// Used for `File::read` on intra-doc links
+use crate::ffi::OsStr;
+use crate::sealed::Sealed;
+#[allow(unused_imports)]
+use io::{Read, Write};
+
+/// Unix-specific extensions to [`fs::File`].
+#[stable(feature = "file_offset", since = "1.15.0")]
+pub trait FileExt {
+ /// Reads a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes read.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// Note that similar to [`File::read`], it is not an error to return with a
+ /// short read.
+ ///
+ /// [`File::read`]: fs::File::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs::File;
+ /// use std::os::unix::prelude::FileExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut buf = [0u8; 8];
+ /// let file = File::open("foo.txt")?;
+ ///
+ /// // We now read 8 bytes from the offset 10.
+ /// let num_bytes_read = file.read_at(&mut buf, 10)?;
+ /// println!("read {num_bytes_read} bytes: {buf:?}");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>;
+
+ /// Reads the exact number of byte required to fill `buf` from the given offset.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// Similar to [`io::Read::read_exact`] but uses [`read_at`] instead of `read`.
+ ///
+ /// [`read_at`]: FileExt::read_at
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters an error of the kind
+ /// [`io::ErrorKind::Interrupted`] then the error is ignored and the operation
+ /// will continue.
+ ///
+ /// If this function encounters an "end of file" before completely filling
+ /// the buffer, it returns an error of the kind [`io::ErrorKind::UnexpectedEof`].
+ /// The contents of `buf` are unspecified in this case.
+ ///
+ /// If any other read error is encountered then this function immediately
+ /// returns. The contents of `buf` are unspecified in this case.
+ ///
+ /// If this function returns an error, it is unspecified how many bytes it
+ /// has read, but it will never read more than would be necessary to
+ /// completely fill the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs::File;
+ /// use std::os::unix::prelude::FileExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut buf = [0u8; 8];
+ /// let file = File::open("foo.txt")?;
+ ///
+ /// // We now read exactly 8 bytes from the offset 10.
+ /// file.read_exact_at(&mut buf, 10)?;
+ /// println!("read {} bytes: {:?}", buf.len(), buf);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rw_exact_all_at", since = "1.33.0")]
+ fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> {
+ while !buf.is_empty() {
+ match self.read_at(buf, offset) {
+ Ok(0) => break,
+ Ok(n) => {
+ let tmp = buf;
+ buf = &mut tmp[n..];
+ offset += n as u64;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ if !buf.is_empty() {
+ Err(io::const_io_error!(io::ErrorKind::UnexpectedEof, "failed to fill whole buffer",))
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Writes a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes written.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// When writing beyond the end of the file, the file is appropriately
+ /// extended and the intermediate bytes are initialized with the value 0.
+ ///
+ /// Note that similar to [`File::write`], it is not an error to return a
+ /// short write.
+ ///
+ /// [`File::write`]: fs::File::write
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io;
+ /// use std::os::unix::prelude::FileExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let file = File::open("foo.txt")?;
+ ///
+ /// // We now write at the offset 10.
+ /// file.write_at(b"sushi", 10)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>;
+
+ /// Attempts to write an entire buffer starting from a given offset.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// This method will continuously call [`write_at`] until there is no more data
+ /// to be written or an error of non-[`io::ErrorKind::Interrupted`] kind is
+ /// returned. This method will not return until the entire buffer has been
+ /// successfully written or such an error occurs. The first error that is
+ /// not of [`io::ErrorKind::Interrupted`] kind generated from this method will be
+ /// returned.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the first error of
+ /// non-[`io::ErrorKind::Interrupted`] kind that [`write_at`] returns.
+ ///
+ /// [`write_at`]: FileExt::write_at
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io;
+ /// use std::os::unix::prelude::FileExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let file = File::open("foo.txt")?;
+ ///
+ /// // We now write at the offset 10.
+ /// file.write_all_at(b"sushi", 10)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "rw_exact_all_at", since = "1.33.0")]
+ fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> {
+ while !buf.is_empty() {
+ match self.write_at(buf, offset) {
+ Ok(0) => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ ));
+ }
+ Ok(n) => {
+ buf = &buf[n..];
+ offset += n as u64
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(())
+ }
+}
+
+#[stable(feature = "file_offset", since = "1.15.0")]
+impl FileExt for fs::File {
+ fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ self.as_inner().read_at(buf, offset)
+ }
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ self.as_inner().write_at(buf, offset)
+ }
+}
+
+/// Unix-specific extensions to [`fs::Permissions`].
+#[stable(feature = "fs_ext", since = "1.1.0")]
+pub trait PermissionsExt {
+ /// Returns the underlying raw `st_mode` bits that contain the standard
+ /// Unix permissions for this file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::create("foo.txt")?;
+ /// let metadata = f.metadata()?;
+ /// let permissions = metadata.permissions();
+ ///
+ /// println!("permissions: {:o}", permissions.mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "fs_ext", since = "1.1.0")]
+ fn mode(&self) -> u32;
+
+ /// Sets the underlying raw bits for this set of permissions.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let f = File::create("foo.txt")?;
+ /// let metadata = f.metadata()?;
+ /// let mut permissions = metadata.permissions();
+ ///
+ /// permissions.set_mode(0o644); // Read/write for owner and read for others.
+ /// assert_eq!(permissions.mode(), 0o644);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "fs_ext", since = "1.1.0")]
+ fn set_mode(&mut self, mode: u32);
+
+ /// Creates a new instance of `Permissions` from the given set of Unix
+ /// permission bits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::Permissions;
+ /// use std::os::unix::fs::PermissionsExt;
+ ///
+ /// // Read/write for owner and read for others.
+ /// let permissions = Permissions::from_mode(0o644);
+ /// assert_eq!(permissions.mode(), 0o644);
+ /// ```
+ #[stable(feature = "fs_ext", since = "1.1.0")]
+ fn from_mode(mode: u32) -> Self;
+}
+
+#[stable(feature = "fs_ext", since = "1.1.0")]
+impl PermissionsExt for Permissions {
+ fn mode(&self) -> u32 {
+ self.as_inner().mode()
+ }
+
+ fn set_mode(&mut self, mode: u32) {
+ *self = Permissions::from_inner(FromInner::from_inner(mode));
+ }
+
+ fn from_mode(mode: u32) -> Permissions {
+ Permissions::from_inner(FromInner::from_inner(mode))
+ }
+}
+
+/// Unix-specific extensions to [`fs::OpenOptions`].
+#[stable(feature = "fs_ext", since = "1.1.0")]
+pub trait OpenOptionsExt {
+ /// Sets the mode bits that a new file will be created with.
+ ///
+ /// If a new file is created as part of an `OpenOptions::open` call then this
+ /// specified `mode` will be used as the permission bits for the new file.
+ /// If no `mode` is set, the default of `0o666` will be used.
+ /// The operating system masks out bits with the system's `umask`, to produce
+ /// the final permissions.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ /// use std::os::unix::fs::OpenOptionsExt;
+ ///
+ /// # fn main() {
+ /// let mut options = OpenOptions::new();
+ /// options.mode(0o644); // Give read/write for owner and read for others.
+ /// let file = options.open("foo.txt");
+ /// # }
+ /// ```
+ #[stable(feature = "fs_ext", since = "1.1.0")]
+ fn mode(&mut self, mode: u32) -> &mut Self;
+
+ /// Pass custom flags to the `flags` argument of `open`.
+ ///
+ /// The bits that define the access mode are masked out with `O_ACCMODE`, to
+ /// ensure they do not interfere with the access mode set by Rusts options.
+ ///
+ /// Custom flags can only set flags, not remove flags set by Rusts options.
+ /// This options overwrites any previously set custom flags.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # #![feature(rustc_private)]
+ /// extern crate libc;
+ /// use std::fs::OpenOptions;
+ /// use std::os::unix::fs::OpenOptionsExt;
+ ///
+ /// # fn main() {
+ /// let mut options = OpenOptions::new();
+ /// options.write(true);
+ /// if cfg!(unix) {
+ /// options.custom_flags(libc::O_NOFOLLOW);
+ /// }
+ /// let file = options.open("foo.txt");
+ /// # }
+ /// ```
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn custom_flags(&mut self, flags: i32) -> &mut Self;
+}
+
+#[stable(feature = "fs_ext", since = "1.1.0")]
+impl OpenOptionsExt for OpenOptions {
+ fn mode(&mut self, mode: u32) -> &mut OpenOptions {
+ self.as_inner_mut().mode(mode);
+ self
+ }
+
+ fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions {
+ self.as_inner_mut().custom_flags(flags);
+ self
+ }
+}
+
+/// Unix-specific extensions to [`fs::Metadata`].
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Returns the ID of the device containing the file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let dev_id = meta.dev();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let inode = meta.ino();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn ino(&self) -> u64;
+ /// Returns the rights applied to this file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let mode = meta.mode();
+ /// let user_has_write_access = mode & 0o200;
+ /// let user_has_read_write_access = mode & 0o600;
+ /// let group_has_read_access = mode & 0o040;
+ /// let others_have_exec_access = mode & 0o001;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn mode(&self) -> u32;
+ /// Returns the number of hard links pointing to this file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let nb_hard_links = meta.nlink();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn nlink(&self) -> u64;
+ /// Returns the user ID of the owner of this file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let user_id = meta.uid();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn uid(&self) -> u32;
+ /// Returns the group ID of the owner of this file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let group_id = meta.gid();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn gid(&self) -> u32;
+ /// Returns the device ID of this file (if it is a special one).
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let device_id = meta.rdev();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn rdev(&self) -> u64;
+ /// Returns the total size of this file in bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let file_size = meta.size();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let last_access_time = meta.atime();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`atime`].
+ ///
+ /// [`atime`]: MetadataExt::atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let nano_last_access_time = meta.atime_nsec();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let last_modification_time = meta.mtime();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`mtime`].
+ ///
+ /// [`mtime`]: MetadataExt::mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let nano_last_modification_time = meta.mtime_nsec();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let last_status_change_time = meta.ctime();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`ctime`].
+ ///
+ /// [`ctime`]: MetadataExt::ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let nano_last_status_change_time = meta.ctime_nsec();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn ctime_nsec(&self) -> i64;
+ /// Returns the block size for filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let block_size = meta.blksize();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, in 512-byte units.
+ ///
+ /// Please note that this may be smaller than `st_size / 512` when the file has holes.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::MetadataExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let blocks = meta.blocks();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn blocks(&self) -> u64;
+ #[cfg(target_os = "vxworks")]
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn attrib(&self) -> u8;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for fs::Metadata {
+ fn dev(&self) -> u64 {
+ self.st_dev()
+ }
+ fn ino(&self) -> u64 {
+ self.st_ino()
+ }
+ fn mode(&self) -> u32 {
+ self.st_mode()
+ }
+ fn nlink(&self) -> u64 {
+ self.st_nlink()
+ }
+ fn uid(&self) -> u32 {
+ self.st_uid()
+ }
+ fn gid(&self) -> u32 {
+ self.st_gid()
+ }
+ fn rdev(&self) -> u64 {
+ self.st_rdev()
+ }
+ fn size(&self) -> u64 {
+ self.st_size()
+ }
+ fn atime(&self) -> i64 {
+ self.st_atime()
+ }
+ fn atime_nsec(&self) -> i64 {
+ self.st_atime_nsec()
+ }
+ fn mtime(&self) -> i64 {
+ self.st_mtime()
+ }
+ fn mtime_nsec(&self) -> i64 {
+ self.st_mtime_nsec()
+ }
+ fn ctime(&self) -> i64 {
+ self.st_ctime()
+ }
+ fn ctime_nsec(&self) -> i64 {
+ self.st_ctime_nsec()
+ }
+ fn blksize(&self) -> u64 {
+ self.st_blksize()
+ }
+ fn blocks(&self) -> u64 {
+ self.st_blocks()
+ }
+ #[cfg(target_os = "vxworks")]
+ fn attrib(&self) -> u8 {
+ self.st_attrib()
+ }
+}
+
+/// Unix-specific extensions for [`fs::FileType`].
+///
+/// Adds support for special Unix file types such as block/character devices,
+/// pipes, and sockets.
+#[stable(feature = "file_type_ext", since = "1.5.0")]
+pub trait FileTypeExt {
+ /// Returns `true` if this file type is a block device.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::FileTypeExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("block_device_file")?;
+ /// let file_type = meta.file_type();
+ /// assert!(file_type.is_block_device());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_type_ext", since = "1.5.0")]
+ fn is_block_device(&self) -> bool;
+ /// Returns `true` if this file type is a char device.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::FileTypeExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("char_device_file")?;
+ /// let file_type = meta.file_type();
+ /// assert!(file_type.is_char_device());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_type_ext", since = "1.5.0")]
+ fn is_char_device(&self) -> bool;
+ /// Returns `true` if this file type is a fifo.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::FileTypeExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("fifo_file")?;
+ /// let file_type = meta.file_type();
+ /// assert!(file_type.is_fifo());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_type_ext", since = "1.5.0")]
+ fn is_fifo(&self) -> bool;
+ /// Returns `true` if this file type is a socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::os::unix::fs::FileTypeExt;
+ /// use std::io;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("unix.socket")?;
+ /// let file_type = meta.file_type();
+ /// assert!(file_type.is_socket());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_type_ext", since = "1.5.0")]
+ fn is_socket(&self) -> bool;
+}
+
+#[stable(feature = "file_type_ext", since = "1.5.0")]
+impl FileTypeExt for fs::FileType {
+ fn is_block_device(&self) -> bool {
+ self.as_inner().is(libc::S_IFBLK)
+ }
+ fn is_char_device(&self) -> bool {
+ self.as_inner().is(libc::S_IFCHR)
+ }
+ fn is_fifo(&self) -> bool {
+ self.as_inner().is(libc::S_IFIFO)
+ }
+ fn is_socket(&self) -> bool {
+ self.as_inner().is(libc::S_IFSOCK)
+ }
+}
+
+/// Unix-specific extension methods for [`fs::DirEntry`].
+#[stable(feature = "dir_entry_ext", since = "1.1.0")]
+pub trait DirEntryExt {
+ /// Returns the underlying `d_ino` field in the contained `dirent`
+ /// structure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs;
+ /// use std::os::unix::fs::DirEntryExt;
+ ///
+ /// if let Ok(entries) = fs::read_dir(".") {
+ /// for entry in entries {
+ /// if let Ok(entry) = entry {
+ /// // Here, `entry` is a `DirEntry`.
+ /// println!("{:?}: {}", entry.file_name(), entry.ino());
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "dir_entry_ext", since = "1.1.0")]
+ fn ino(&self) -> u64;
+}
+
+#[stable(feature = "dir_entry_ext", since = "1.1.0")]
+impl DirEntryExt for fs::DirEntry {
+ fn ino(&self) -> u64 {
+ self.as_inner().ino()
+ }
+}
+
+/// Sealed Unix-specific extension methods for [`fs::DirEntry`].
+#[unstable(feature = "dir_entry_ext2", issue = "85573")]
+pub trait DirEntryExt2: Sealed {
+ /// Returns a reference to the underlying `OsStr` of this entry's filename.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(dir_entry_ext2)]
+ /// use std::os::unix::fs::DirEntryExt2;
+ /// use std::{fs, io};
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut entries = fs::read_dir(".")?.collect::<Result<Vec<_>, io::Error>>()?;
+ /// entries.sort_unstable_by(|a, b| a.file_name_ref().cmp(b.file_name_ref()));
+ ///
+ /// for p in entries {
+ /// println!("{p:?}");
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ fn file_name_ref(&self) -> &OsStr;
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl Sealed for fs::DirEntry {}
+
+#[unstable(feature = "dir_entry_ext2", issue = "85573")]
+impl DirEntryExt2 for fs::DirEntry {
+ fn file_name_ref(&self) -> &OsStr {
+ self.as_inner().file_name_os_str()
+ }
+}
+
+/// Creates a new symbolic link on the filesystem.
+///
+/// The `link` path will be a symbolic link pointing to the `original` path.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::unix::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::symlink("a.txt", "b.txt")?;
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "symlink", since = "1.1.0")]
+pub fn symlink<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
+ sys::fs::symlink(original.as_ref(), link.as_ref())
+}
+
+/// Unix-specific extensions to [`fs::DirBuilder`].
+#[stable(feature = "dir_builder", since = "1.6.0")]
+pub trait DirBuilderExt {
+ /// Sets the mode to create new directories with. This option defaults to
+ /// 0o777.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::DirBuilder;
+ /// use std::os::unix::fs::DirBuilderExt;
+ ///
+ /// let mut builder = DirBuilder::new();
+ /// builder.mode(0o755);
+ /// ```
+ #[stable(feature = "dir_builder", since = "1.6.0")]
+ fn mode(&mut self, mode: u32) -> &mut Self;
+}
+
+#[stable(feature = "dir_builder", since = "1.6.0")]
+impl DirBuilderExt for fs::DirBuilder {
+ fn mode(&mut self, mode: u32) -> &mut fs::DirBuilder {
+ self.as_inner_mut().set_mode(mode);
+ self
+ }
+}
+
+/// Change the owner and group of the specified path.
+///
+/// Specifying either the uid or gid as `None` will leave it unchanged.
+///
+/// Changing the owner typically requires privileges, such as root or a specific capability.
+/// Changing the group typically requires either being the owner and a member of the group, or
+/// having privileges.
+///
+/// If called on a symbolic link, this will change the owner and group of the link target. To
+/// change the owner and group of the link itself, see [`lchown`].
+///
+/// # Examples
+///
+/// ```no_run
+/// #![feature(unix_chown)]
+/// use std::os::unix::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::chown("/sandbox", Some(0), Some(0))?;
+/// Ok(())
+/// }
+/// ```
+#[unstable(feature = "unix_chown", issue = "88989")]
+pub fn chown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
+ sys::fs::chown(dir.as_ref(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
+}
+
+/// Change the owner and group of the file referenced by the specified open file descriptor.
+///
+/// For semantics and required privileges, see [`chown`].
+///
+/// # Examples
+///
+/// ```no_run
+/// #![feature(unix_chown)]
+/// use std::os::unix::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// let f = std::fs::File::open("/file")?;
+/// fs::fchown(&f, Some(0), Some(0))?;
+/// Ok(())
+/// }
+/// ```
+#[unstable(feature = "unix_chown", issue = "88989")]
+pub fn fchown<F: AsFd>(fd: F, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
+ sys::fs::fchown(fd.as_fd().as_raw_fd(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
+}
+
+/// Change the owner and group of the specified path, without dereferencing symbolic links.
+///
+/// Identical to [`chown`], except that if called on a symbolic link, this will change the owner
+/// and group of the link itself rather than the owner and group of the link target.
+///
+/// # Examples
+///
+/// ```no_run
+/// #![feature(unix_chown)]
+/// use std::os::unix::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::lchown("/symlink", Some(0), Some(0))?;
+/// Ok(())
+/// }
+/// ```
+#[unstable(feature = "unix_chown", issue = "88989")]
+pub fn lchown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
+ sys::fs::lchown(dir.as_ref(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
+}
+
+/// Change the root directory of the current process to the specified path.
+///
+/// This typically requires privileges, such as root or a specific capability.
+///
+/// This does not change the current working directory; you should call
+/// [`std::env::set_current_dir`][`crate::env::set_current_dir`] afterwards.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::unix::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::chroot("/sandbox")?;
+/// std::env::set_current_dir("/")?;
+/// // continue working in sandbox
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "unix_chroot", since = "1.56.0")]
+#[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))]
+pub fn chroot<P: AsRef<Path>>(dir: P) -> io::Result<()> {
+ sys::fs::chroot(dir.as_ref())
+}
diff --git a/library/std/src/os/unix/io/fd.rs b/library/std/src/os/unix/io/fd.rs
new file mode 100644
index 000000000..d4cb69645
--- /dev/null
+++ b/library/std/src/os/unix/io/fd.rs
@@ -0,0 +1,8 @@
+//! Owned and borrowed file descriptors.
+
+// Tests for this module
+#[cfg(test)]
+mod tests;
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub use crate::os::fd::owned::*;
diff --git a/library/std/src/os/unix/io/fd/tests.rs b/library/std/src/os/unix/io/fd/tests.rs
new file mode 100644
index 000000000..84d2a7a1a
--- /dev/null
+++ b/library/std/src/os/unix/io/fd/tests.rs
@@ -0,0 +1,11 @@
+use crate::mem::size_of;
+use crate::os::unix::io::RawFd;
+
+#[test]
+fn test_raw_fd_layout() {
+ // `OwnedFd` and `BorrowedFd` use `rustc_layout_scalar_valid_range_start`
+ // and `rustc_layout_scalar_valid_range_end`, with values that depend on
+ // the bit width of `RawFd`. If this ever changes, those values will need
+ // to be updated.
+ assert_eq!(size_of::<RawFd>(), 4);
+}
diff --git a/library/std/src/os/unix/io/mod.rs b/library/std/src/os/unix/io/mod.rs
new file mode 100644
index 000000000..3ab5606f8
--- /dev/null
+++ b/library/std/src/os/unix/io/mod.rs
@@ -0,0 +1,86 @@
+//! Unix-specific extensions to general I/O primitives.
+//!
+//! Just like raw pointers, raw file descriptors point to resources with
+//! dynamic lifetimes, and they can dangle if they outlive their resources
+//! or be forged if they're created from invalid values.
+//!
+//! This module provides three types for representing file descriptors,
+//! with different ownership properties: raw, borrowed, and owned, which are
+//! analogous to types used for representing pointers:
+//!
+//! | Type | Analogous to |
+//! | ------------------ | ------------ |
+//! | [`RawFd`] | `*const _` |
+//! | [`BorrowedFd<'a>`] | `&'a _` |
+//! | [`OwnedFd`] | `Box<_>` |
+//!
+//! Like raw pointers, `RawFd` values are primitive values. And in new code,
+//! they should be considered unsafe to do I/O on (analogous to dereferencing
+//! them). Rust did not always provide this guidance, so existing code in the
+//! Rust ecosystem often doesn't mark `RawFd` usage as unsafe. Once the
+//! `io_safety` feature is stable, libraries will be encouraged to migrate,
+//! either by adding `unsafe` to APIs that dereference `RawFd` values, or by
+//! using to `BorrowedFd` or `OwnedFd` instead.
+//!
+//! Like references, `BorrowedFd` values are tied to a lifetime, to ensure
+//! that they don't outlive the resource they point to. These are safe to
+//! use. `BorrowedFd` values may be used in APIs which provide safe access to
+//! any system call except for:
+//!
+//! - `close`, because that would end the dynamic lifetime of the resource
+//! without ending the lifetime of the file descriptor.
+//!
+//! - `dup2`/`dup3`, in the second argument, because this argument is
+//! closed and assigned a new resource, which may break the assumptions
+//! other code using that file descriptor.
+//!
+//! `BorrowedFd` values may be used in APIs which provide safe access to `dup`
+//! system calls, so types implementing `AsFd` or `From<OwnedFd>` should not
+//! assume they always have exclusive access to the underlying file
+//! description.
+//!
+//! `BorrowedFd` values may also be used with `mmap`, since `mmap` uses the
+//! provided file descriptor in a manner similar to `dup` and does not require
+//! the `BorrowedFd` passed to it to live for the lifetime of the resulting
+//! mapping. That said, `mmap` is unsafe for other reasons: it operates on raw
+//! pointers, and it can have undefined behavior if the underlying storage is
+//! mutated. Mutations may come from other processes, or from the same process
+//! if the API provides `BorrowedFd` access, since as mentioned earlier,
+//! `BorrowedFd` values may be used in APIs which provide safe access to any
+//! system call. Consequently, code using `mmap` and presenting a safe API must
+//! take full responsibility for ensuring that safe Rust code cannot evoke
+//! undefined behavior through it.
+//!
+//! Like boxes, `OwnedFd` values conceptually own the resource they point to,
+//! and free (close) it when they are dropped.
+//!
+//! ## `/proc/self/mem` and similar OS features
+//!
+//! Some platforms have special files, such as `/proc/self/mem`, which
+//! provide read and write access to the process's memory. Such reads
+//! and writes happen outside the control of the Rust compiler, so they do not
+//! uphold Rust's memory safety guarantees.
+//!
+//! This does not mean that all APIs that might allow `/proc/self/mem`
+//! to be opened and read from or written must be `unsafe`. Rust's safety guarantees
+//! only cover what the program itself can do, and not what entities outside
+//! the program can do to it. `/proc/self/mem` is considered to be such an
+//! external entity, along with debugging interfaces, and people with physical access to
+//! the hardware. This is true even in cases where the program is controlling
+//! the external entity.
+//!
+//! If you desire to comprehensively prevent programs from reaching out and
+//! causing external entities to reach back in and violate memory safety, it's
+//! necessary to use *sandboxing*, which is outside the scope of `std`.
+//!
+//! [`BorrowedFd<'a>`]: crate::os::unix::io::BorrowedFd
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod fd;
+mod raw;
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub use fd::*;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use raw::*;
diff --git a/library/std/src/os/unix/io/raw.rs b/library/std/src/os/unix/io/raw.rs
new file mode 100644
index 000000000..a4d2ba797
--- /dev/null
+++ b/library/std/src/os/unix/io/raw.rs
@@ -0,0 +1,6 @@
+//! Unix-specific extensions to general I/O primitives.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use crate::os::fd::raw::*;
diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs
new file mode 100644
index 000000000..411cc0925
--- /dev/null
+++ b/library/std/src/os/unix/mod.rs
@@ -0,0 +1,126 @@
+//! Platform-specific extensions to `std` for Unix platforms.
+//!
+//! Provides access to platform-level information on Unix platforms, and
+//! exposes Unix-specific functions that would otherwise be inappropriate as
+//! part of the core `std` library.
+//!
+//! It exposes more ways to deal with platform-specific strings ([`OsStr`],
+//! [`OsString`]), allows to set permissions more granularly, extract low-level
+//! file descriptors from files and sockets, and has platform-specific helpers
+//! for spawning processes.
+//!
+//! # Examples
+//!
+//! ```no_run
+//! use std::fs::File;
+//! use std::os::unix::prelude::*;
+//!
+//! fn main() -> std::io::Result<()> {
+//! let f = File::create("foo.txt")?;
+//! let fd = f.as_raw_fd();
+//!
+//! // use fd with native unix bindings
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`OsStr`]: crate::ffi::OsStr
+//! [`OsString`]: crate::ffi::OsString
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![doc(cfg(unix))]
+
+// Use linux as the default platform when documenting on other platforms like Windows
+#[cfg(doc)]
+use crate::os::linux as platform;
+
+#[cfg(not(doc))]
+mod platform {
+ #[cfg(target_os = "android")]
+ pub use crate::os::android::*;
+ #[cfg(target_os = "dragonfly")]
+ pub use crate::os::dragonfly::*;
+ #[cfg(target_os = "emscripten")]
+ pub use crate::os::emscripten::*;
+ #[cfg(target_os = "espidf")]
+ pub use crate::os::espidf::*;
+ #[cfg(target_os = "freebsd")]
+ pub use crate::os::freebsd::*;
+ #[cfg(target_os = "fuchsia")]
+ pub use crate::os::fuchsia::*;
+ #[cfg(target_os = "haiku")]
+ pub use crate::os::haiku::*;
+ #[cfg(target_os = "horizon")]
+ pub use crate::os::horizon::*;
+ #[cfg(target_os = "illumos")]
+ pub use crate::os::illumos::*;
+ #[cfg(target_os = "ios")]
+ pub use crate::os::ios::*;
+ #[cfg(target_os = "l4re")]
+ pub use crate::os::l4re::*;
+ #[cfg(target_os = "linux")]
+ pub use crate::os::linux::*;
+ #[cfg(target_os = "macos")]
+ pub use crate::os::macos::*;
+ #[cfg(target_os = "netbsd")]
+ pub use crate::os::netbsd::*;
+ #[cfg(target_os = "openbsd")]
+ pub use crate::os::openbsd::*;
+ #[cfg(target_os = "redox")]
+ pub use crate::os::redox::*;
+ #[cfg(target_os = "solaris")]
+ pub use crate::os::solaris::*;
+ #[cfg(target_os = "vxworks")]
+ pub use crate::os::vxworks::*;
+}
+
+pub mod ffi;
+pub mod fs;
+pub mod io;
+pub mod net;
+pub mod process;
+pub mod raw;
+pub mod thread;
+
+#[unstable(feature = "peer_credentials_unix_socket", issue = "42839", reason = "unstable")]
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub mod ucred;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::fs::DirEntryExt;
+ #[doc(no_inline)]
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ pub use super::fs::FileExt;
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::fs::{FileTypeExt, MetadataExt, OpenOptionsExt, PermissionsExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::process::{CommandExt, ExitStatusExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::thread::JoinHandleExt;
+}
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
new file mode 100644
index 000000000..9aeae4b2c
--- /dev/null
+++ b/library/std/src/os/unix/net/addr.rs
@@ -0,0 +1,350 @@
+use crate::ffi::OsStr;
+use crate::os::unix::ffi::OsStrExt;
+use crate::path::Path;
+use crate::sys::cvt;
+use crate::{ascii, fmt, io, mem, ptr};
+
+// FIXME(#43348): Make libc adapt #[doc(cfg(...))] so we don't need these fake definitions here?
+#[cfg(not(unix))]
+#[allow(non_camel_case_types)]
+mod libc {
+ pub use libc::c_int;
+ pub type socklen_t = u32;
+ pub struct sockaddr;
+ #[derive(Clone)]
+ pub struct sockaddr_un;
+}
+
+fn sun_path_offset(addr: &libc::sockaddr_un) -> usize {
+ // Work with an actual instance of the type since using a null pointer is UB
+ let base = (addr as *const libc::sockaddr_un).addr();
+ let path = (&addr.sun_path as *const libc::c_char).addr();
+ path - base
+}
+
+pub(super) fn sockaddr_un(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
+ // SAFETY: All zeros is a valid representation for `sockaddr_un`.
+ let mut addr: libc::sockaddr_un = unsafe { mem::zeroed() };
+ addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ let bytes = path.as_os_str().as_bytes();
+
+ if bytes.contains(&0) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "paths must not contain interior null bytes",
+ ));
+ }
+
+ if bytes.len() >= addr.sun_path.len() {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "path must be shorter than SUN_LEN",
+ ));
+ }
+ // SAFETY: `bytes` and `addr.sun_path` are not overlapping and
+ // both point to valid memory.
+ // NOTE: We zeroed the memory above, so the path is already null
+ // terminated.
+ unsafe {
+ ptr::copy_nonoverlapping(bytes.as_ptr(), addr.sun_path.as_mut_ptr().cast(), bytes.len())
+ };
+
+ let mut len = sun_path_offset(&addr) + bytes.len();
+ match bytes.get(0) {
+ Some(&0) | None => {}
+ Some(_) => len += 1,
+ }
+ Ok((addr, len as libc::socklen_t))
+}
+
+enum AddressKind<'a> {
+ Unnamed,
+ Pathname(&'a Path),
+ Abstract(&'a [u8]),
+}
+
+struct AsciiEscaped<'a>(&'a [u8]);
+
+impl<'a> fmt::Display for AsciiEscaped<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "\"")?;
+ for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
+ write!(fmt, "{}", byte as char)?;
+ }
+ write!(fmt, "\"")
+ }
+}
+
+/// An address associated with a Unix socket.
+///
+/// # Examples
+///
+/// ```
+/// use std::os::unix::net::UnixListener;
+///
+/// let socket = match UnixListener::bind("/tmp/sock") {
+/// Ok(sock) => sock,
+/// Err(e) => {
+/// println!("Couldn't bind: {e:?}");
+/// return
+/// }
+/// };
+/// let addr = socket.local_addr().expect("Couldn't get local address");
+/// ```
+#[derive(Clone)]
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub struct SocketAddr {
+ pub(super) addr: libc::sockaddr_un,
+ pub(super) len: libc::socklen_t,
+}
+
+impl SocketAddr {
+ pub(super) fn new<F>(f: F) -> io::Result<SocketAddr>
+ where
+ F: FnOnce(*mut libc::sockaddr, *mut libc::socklen_t) -> libc::c_int,
+ {
+ unsafe {
+ let mut addr: libc::sockaddr_un = mem::zeroed();
+ let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
+ cvt(f(&mut addr as *mut _ as *mut _, &mut len))?;
+ SocketAddr::from_parts(addr, len)
+ }
+ }
+
+ pub(super) fn from_parts(
+ addr: libc::sockaddr_un,
+ mut len: libc::socklen_t,
+ ) -> io::Result<SocketAddr> {
+ if len == 0 {
+ // When there is a datagram from unnamed unix socket
+ // linux returns zero bytes of address
+ len = sun_path_offset(&addr) as libc::socklen_t; // i.e., zero-length address
+ } else if addr.sun_family != libc::AF_UNIX as libc::sa_family_t {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "file descriptor did not correspond to a Unix socket",
+ ));
+ }
+
+ Ok(SocketAddr { addr, len })
+ }
+
+ /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if the path is longer than `SUN_LEN` or if it contains
+ /// NULL bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::SocketAddr;
+ /// use std::path::Path;
+ ///
+ /// # fn main() -> std::io::Result<()> {
+ /// let address = SocketAddr::from_pathname("/path/to/socket")?;
+ /// assert_eq!(address.as_pathname(), Some(Path::new("/path/to/socket")));
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// Creating a `SocketAddr` with a NULL byte results in an error.
+ ///
+ /// ```
+ /// use std::os::unix::net::SocketAddr;
+ ///
+ /// assert!(SocketAddr::from_pathname("/path/with/\0/bytes").is_err());
+ /// ```
+ #[stable(feature = "unix_socket_creation", since = "1.61.0")]
+ pub fn from_pathname<P>(path: P) -> io::Result<SocketAddr>
+ where
+ P: AsRef<Path>,
+ {
+ sockaddr_un(path.as_ref()).map(|(addr, len)| SocketAddr { addr, len })
+ }
+
+ /// Returns `true` if the address is unnamed.
+ ///
+ /// # Examples
+ ///
+ /// A named address:
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixListener::bind("/tmp/sock")?;
+ /// let addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(addr.is_unnamed(), false);
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// An unnamed address:
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::unbound()?;
+ /// let addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(addr.is_unnamed(), true);
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn is_unnamed(&self) -> bool {
+ matches!(self.address(), AddressKind::Unnamed)
+ }
+
+ /// Returns the contents of this address if it is a `pathname` address.
+ ///
+ /// # Examples
+ ///
+ /// With a pathname:
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ /// use std::path::Path;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixListener::bind("/tmp/sock")?;
+ /// let addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(addr.as_pathname(), Some(Path::new("/tmp/sock")));
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// Without a pathname:
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::unbound()?;
+ /// let addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(addr.as_pathname(), None);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ #[must_use]
+ pub fn as_pathname(&self) -> Option<&Path> {
+ if let AddressKind::Pathname(path) = self.address() { Some(path) } else { None }
+ }
+
+ /// Returns the contents of this address if it is an abstract namespace
+ /// without the leading null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener, SocketAddr};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let namespace = b"hidden";
+ /// let namespace_addr = SocketAddr::from_abstract_namespace(&namespace[..])?;
+ /// let socket = UnixListener::bind_addr(&namespace_addr)?;
+ /// let local_addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(local_addr.as_abstract_namespace(), Some(&namespace[..]));
+ /// Ok(())
+ /// }
+ /// ```
+ #[doc(cfg(any(target_os = "android", target_os = "linux")))]
+ #[cfg(any(doc, target_os = "android", target_os = "linux",))]
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
+ if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
+ }
+
+ fn address(&self) -> AddressKind<'_> {
+ let len = self.len as usize - sun_path_offset(&self.addr);
+ let path = unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.addr.sun_path) };
+
+ // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses
+ if len == 0
+ || (cfg!(not(any(target_os = "linux", target_os = "android")))
+ && self.addr.sun_path[0] == 0)
+ {
+ AddressKind::Unnamed
+ } else if self.addr.sun_path[0] == 0 {
+ AddressKind::Abstract(&path[1..len])
+ } else {
+ AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
+ }
+ }
+
+ /// Creates an abstract domain socket address from a namespace
+ ///
+ /// An abstract address does not create a file unlike traditional path-based
+ /// Unix sockets. The advantage of this is that the address will disappear when
+ /// the socket bound to it is closed, so no filesystem clean up is required.
+ ///
+ /// The leading null byte for the abstract namespace is automatically added.
+ ///
+ /// This is a Linux-specific extension. See more at [`unix(7)`].
+ ///
+ /// [`unix(7)`]: https://man7.org/linux/man-pages/man7/unix.7.html
+ ///
+ /// # Errors
+ ///
+ /// This will return an error if the given namespace is too long
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener, SocketAddr};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let addr = SocketAddr::from_abstract_namespace(b"hidden")?;
+ /// let listener = match UnixListener::bind_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(err) => {
+ /// println!("Couldn't bind: {err:?}");
+ /// return Err(err);
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ #[doc(cfg(any(target_os = "android", target_os = "linux")))]
+ #[cfg(any(doc, target_os = "android", target_os = "linux",))]
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn from_abstract_namespace(namespace: &[u8]) -> io::Result<SocketAddr> {
+ unsafe {
+ let mut addr: libc::sockaddr_un = mem::zeroed();
+ addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
+
+ if namespace.len() + 1 > addr.sun_path.len() {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "namespace must be shorter than SUN_LEN",
+ ));
+ }
+
+ crate::ptr::copy_nonoverlapping(
+ namespace.as_ptr(),
+ addr.sun_path.as_mut_ptr().offset(1) as *mut u8,
+ namespace.len(),
+ );
+ let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;
+ SocketAddr::from_parts(addr, len)
+ }
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl fmt::Debug for SocketAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.address() {
+ AddressKind::Unnamed => write!(fmt, "(unnamed)"),
+ AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
+ AddressKind::Pathname(path) => write!(fmt, "{path:?} (pathname)"),
+ }
+ }
+}
diff --git a/library/std/src/os/unix/net/ancillary.rs b/library/std/src/os/unix/net/ancillary.rs
new file mode 100644
index 000000000..7cc901a79
--- /dev/null
+++ b/library/std/src/os/unix/net/ancillary.rs
@@ -0,0 +1,674 @@
+// FIXME: This is currently disabled on *BSD.
+
+use super::{sockaddr_un, SocketAddr};
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::marker::PhantomData;
+use crate::mem::{size_of, zeroed};
+use crate::os::unix::io::RawFd;
+use crate::path::Path;
+use crate::ptr::{eq, read_unaligned};
+use crate::slice::from_raw_parts;
+use crate::sys::net::Socket;
+
+// FIXME(#43348): Make libc adapt #[doc(cfg(...))] so we don't need these fake definitions here?
+#[cfg(all(doc, not(target_os = "linux"), not(target_os = "android"), not(target_os = "netbsd")))]
+#[allow(non_camel_case_types)]
+mod libc {
+ pub use libc::c_int;
+ pub struct ucred;
+ pub struct cmsghdr;
+ pub type pid_t = i32;
+ pub type gid_t = u32;
+ pub type uid_t = u32;
+}
+
+pub(super) fn recv_vectored_with_ancillary_from(
+ socket: &Socket,
+ bufs: &mut [IoSliceMut<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+) -> io::Result<(usize, bool, io::Result<SocketAddr>)> {
+ unsafe {
+ let mut msg_name: libc::sockaddr_un = zeroed();
+ let mut msg: libc::msghdr = zeroed();
+ msg.msg_name = &mut msg_name as *mut _ as *mut _;
+ msg.msg_namelen = size_of::<libc::sockaddr_un>() as libc::socklen_t;
+ msg.msg_iov = bufs.as_mut_ptr().cast();
+ msg.msg_iovlen = bufs.len() as _;
+ msg.msg_controllen = ancillary.buffer.len() as _;
+ // macos requires that the control pointer is null when the len is 0.
+ if msg.msg_controllen > 0 {
+ msg.msg_control = ancillary.buffer.as_mut_ptr().cast();
+ }
+
+ let count = socket.recv_msg(&mut msg)?;
+
+ ancillary.length = msg.msg_controllen as usize;
+ ancillary.truncated = msg.msg_flags & libc::MSG_CTRUNC == libc::MSG_CTRUNC;
+
+ let truncated = msg.msg_flags & libc::MSG_TRUNC == libc::MSG_TRUNC;
+ let addr = SocketAddr::from_parts(msg_name, msg.msg_namelen);
+
+ Ok((count, truncated, addr))
+ }
+}
+
+pub(super) fn send_vectored_with_ancillary_to(
+ socket: &Socket,
+ path: Option<&Path>,
+ bufs: &[IoSlice<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+) -> io::Result<usize> {
+ unsafe {
+ let (mut msg_name, msg_namelen) =
+ if let Some(path) = path { sockaddr_un(path)? } else { (zeroed(), 0) };
+
+ let mut msg: libc::msghdr = zeroed();
+ msg.msg_name = &mut msg_name as *mut _ as *mut _;
+ msg.msg_namelen = msg_namelen;
+ msg.msg_iov = bufs.as_ptr() as *mut _;
+ msg.msg_iovlen = bufs.len() as _;
+ msg.msg_controllen = ancillary.length as _;
+ // macos requires that the control pointer is null when the len is 0.
+ if msg.msg_controllen > 0 {
+ msg.msg_control = ancillary.buffer.as_mut_ptr().cast();
+ }
+
+ ancillary.truncated = false;
+
+ socket.send_msg(&mut msg)
+ }
+}
+
+fn add_to_ancillary_data<T>(
+ buffer: &mut [u8],
+ length: &mut usize,
+ source: &[T],
+ cmsg_level: libc::c_int,
+ cmsg_type: libc::c_int,
+) -> bool {
+ let source_len = if let Some(source_len) = source.len().checked_mul(size_of::<T>()) {
+ if let Ok(source_len) = u32::try_from(source_len) {
+ source_len
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ };
+
+ unsafe {
+ let additional_space = libc::CMSG_SPACE(source_len) as usize;
+
+ let new_length = if let Some(new_length) = additional_space.checked_add(*length) {
+ new_length
+ } else {
+ return false;
+ };
+
+ if new_length > buffer.len() {
+ return false;
+ }
+
+ buffer[*length..new_length].fill(0);
+
+ *length = new_length;
+
+ let mut msg: libc::msghdr = zeroed();
+ msg.msg_control = buffer.as_mut_ptr().cast();
+ msg.msg_controllen = *length as _;
+
+ let mut cmsg = libc::CMSG_FIRSTHDR(&msg);
+ let mut previous_cmsg = cmsg;
+ while !cmsg.is_null() {
+ previous_cmsg = cmsg;
+ cmsg = libc::CMSG_NXTHDR(&msg, cmsg);
+
+ // Most operating systems, but not Linux or emscripten, return the previous pointer
+ // when its length is zero. Therefore, check if the previous pointer is the same as
+ // the current one.
+ if eq(cmsg, previous_cmsg) {
+ break;
+ }
+ }
+
+ if previous_cmsg.is_null() {
+ return false;
+ }
+
+ (*previous_cmsg).cmsg_level = cmsg_level;
+ (*previous_cmsg).cmsg_type = cmsg_type;
+ (*previous_cmsg).cmsg_len = libc::CMSG_LEN(source_len) as _;
+
+ let data = libc::CMSG_DATA(previous_cmsg).cast();
+
+ libc::memcpy(data, source.as_ptr().cast(), source_len as usize);
+ }
+ true
+}
+
+struct AncillaryDataIter<'a, T> {
+ data: &'a [u8],
+ phantom: PhantomData<T>,
+}
+
+impl<'a, T> AncillaryDataIter<'a, T> {
+ /// Create `AncillaryDataIter` struct to iterate through the data unit in the control message.
+ ///
+ /// # Safety
+ ///
+ /// `data` must contain a valid control message.
+ unsafe fn new(data: &'a [u8]) -> AncillaryDataIter<'a, T> {
+ AncillaryDataIter { data, phantom: PhantomData }
+ }
+}
+
+impl<'a, T> Iterator for AncillaryDataIter<'a, T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ if size_of::<T>() <= self.data.len() {
+ unsafe {
+ let unit = read_unaligned(self.data.as_ptr().cast());
+ self.data = &self.data[size_of::<T>()..];
+ Some(unit)
+ }
+ } else {
+ None
+ }
+ }
+}
+
+#[cfg(all(doc, not(target_os = "android"), not(target_os = "linux"), not(target_os = "netbsd")))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+#[derive(Clone)]
+pub struct SocketCred(());
+
+/// Unix credential.
+#[cfg(any(target_os = "android", target_os = "linux",))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+#[derive(Clone)]
+pub struct SocketCred(libc::ucred);
+
+#[cfg(target_os = "netbsd")]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+#[derive(Clone)]
+pub struct SocketCred(libc::sockcred);
+
+#[doc(cfg(any(target_os = "android", target_os = "linux")))]
+#[cfg(any(target_os = "android", target_os = "linux"))]
+impl SocketCred {
+ /// Create a Unix credential struct.
+ ///
+ /// PID, UID and GID is set to 0.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ #[must_use]
+ pub fn new() -> SocketCred {
+ SocketCred(libc::ucred { pid: 0, uid: 0, gid: 0 })
+ }
+
+ /// Set the PID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_pid(&mut self, pid: libc::pid_t) {
+ self.0.pid = pid;
+ }
+
+ /// Get the current PID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_pid(&self) -> libc::pid_t {
+ self.0.pid
+ }
+
+ /// Set the UID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_uid(&mut self, uid: libc::uid_t) {
+ self.0.uid = uid;
+ }
+
+ /// Get the current UID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_uid(&self) -> libc::uid_t {
+ self.0.uid
+ }
+
+ /// Set the GID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_gid(&mut self, gid: libc::gid_t) {
+ self.0.gid = gid;
+ }
+
+ /// Get the current GID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_gid(&self) -> libc::gid_t {
+ self.0.gid
+ }
+}
+
+#[cfg(target_os = "netbsd")]
+impl SocketCred {
+ /// Create a Unix credential struct.
+ ///
+ /// PID, UID and GID is set to 0.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn new() -> SocketCred {
+ SocketCred(libc::sockcred {
+ sc_pid: 0,
+ sc_uid: 0,
+ sc_euid: 0,
+ sc_gid: 0,
+ sc_egid: 0,
+ sc_ngroups: 0,
+ sc_groups: [0u32; 1],
+ })
+ }
+
+ /// Set the PID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_pid(&mut self, pid: libc::pid_t) {
+ self.0.sc_pid = pid;
+ }
+
+ /// Get the current PID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_pid(&self) -> libc::pid_t {
+ self.0.sc_pid
+ }
+
+ /// Set the UID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_uid(&mut self, uid: libc::uid_t) {
+ self.0.sc_uid = uid;
+ }
+
+ /// Get the current UID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_uid(&self) -> libc::uid_t {
+ self.0.sc_uid
+ }
+
+ /// Set the GID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_gid(&mut self, gid: libc::gid_t) {
+ self.0.sc_gid = gid;
+ }
+
+ /// Get the current GID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_gid(&self) -> libc::gid_t {
+ self.0.sc_gid
+ }
+}
+
+/// This control message contains file descriptors.
+///
+/// The level is equal to `SOL_SOCKET` and the type is equal to `SCM_RIGHTS`.
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct ScmRights<'a>(AncillaryDataIter<'a, RawFd>);
+
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+impl<'a> Iterator for ScmRights<'a> {
+ type Item = RawFd;
+
+ fn next(&mut self) -> Option<RawFd> {
+ self.0.next()
+ }
+}
+
+#[cfg(all(doc, not(target_os = "android"), not(target_os = "linux"), not(target_os = "netbsd")))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct ScmCredentials<'a>(AncillaryDataIter<'a, ()>);
+
+/// This control message contains unix credentials.
+///
+/// The level is equal to `SOL_SOCKET` and the type is equal to `SCM_CREDENTIALS` or `SCM_CREDS`.
+#[cfg(any(target_os = "android", target_os = "linux",))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct ScmCredentials<'a>(AncillaryDataIter<'a, libc::ucred>);
+
+#[cfg(target_os = "netbsd")]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct ScmCredentials<'a>(AncillaryDataIter<'a, libc::sockcred>);
+
+#[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+impl<'a> Iterator for ScmCredentials<'a> {
+ type Item = SocketCred;
+
+ fn next(&mut self) -> Option<SocketCred> {
+ Some(SocketCred(self.0.next()?))
+ }
+}
+
+/// The error type which is returned from parsing the type a control message.
+#[non_exhaustive]
+#[derive(Debug)]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub enum AncillaryError {
+ Unknown { cmsg_level: i32, cmsg_type: i32 },
+}
+
+/// This enum represent one control message of variable type.
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub enum AncillaryData<'a> {
+ ScmRights(ScmRights<'a>),
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ ScmCredentials(ScmCredentials<'a>),
+}
+
+impl<'a> AncillaryData<'a> {
+ /// Create an `AncillaryData::ScmRights` variant.
+ ///
+ /// # Safety
+ ///
+ /// `data` must contain a valid control message and the control message must be type of
+ /// `SOL_SOCKET` and level of `SCM_RIGHTS`.
+ unsafe fn as_rights(data: &'a [u8]) -> Self {
+ let ancillary_data_iter = AncillaryDataIter::new(data);
+ let scm_rights = ScmRights(ancillary_data_iter);
+ AncillaryData::ScmRights(scm_rights)
+ }
+
+ /// Create an `AncillaryData::ScmCredentials` variant.
+ ///
+ /// # Safety
+ ///
+ /// `data` must contain a valid control message and the control message must be type of
+ /// `SOL_SOCKET` and level of `SCM_CREDENTIALS` or `SCM_CREDS`.
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ unsafe fn as_credentials(data: &'a [u8]) -> Self {
+ let ancillary_data_iter = AncillaryDataIter::new(data);
+ let scm_credentials = ScmCredentials(ancillary_data_iter);
+ AncillaryData::ScmCredentials(scm_credentials)
+ }
+
+ fn try_from_cmsghdr(cmsg: &'a libc::cmsghdr) -> Result<Self, AncillaryError> {
+ unsafe {
+ let cmsg_len_zero = libc::CMSG_LEN(0) as usize;
+ let data_len = (*cmsg).cmsg_len as usize - cmsg_len_zero;
+ let data = libc::CMSG_DATA(cmsg).cast();
+ let data = from_raw_parts(data, data_len);
+
+ match (*cmsg).cmsg_level {
+ libc::SOL_SOCKET => match (*cmsg).cmsg_type {
+ libc::SCM_RIGHTS => Ok(AncillaryData::as_rights(data)),
+ #[cfg(any(target_os = "android", target_os = "linux",))]
+ libc::SCM_CREDENTIALS => Ok(AncillaryData::as_credentials(data)),
+ #[cfg(target_os = "netbsd")]
+ libc::SCM_CREDS => Ok(AncillaryData::as_credentials(data)),
+ cmsg_type => {
+ Err(AncillaryError::Unknown { cmsg_level: libc::SOL_SOCKET, cmsg_type })
+ }
+ },
+ cmsg_level => {
+ Err(AncillaryError::Unknown { cmsg_level, cmsg_type: (*cmsg).cmsg_type })
+ }
+ }
+ }
+ }
+}
+
+/// This struct is used to iterate through the control messages.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct Messages<'a> {
+ buffer: &'a [u8],
+ current: Option<&'a libc::cmsghdr>,
+}
+
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+impl<'a> Iterator for Messages<'a> {
+ type Item = Result<AncillaryData<'a>, AncillaryError>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ unsafe {
+ let mut msg: libc::msghdr = zeroed();
+ msg.msg_control = self.buffer.as_ptr() as *mut _;
+ msg.msg_controllen = self.buffer.len() as _;
+
+ let cmsg = if let Some(current) = self.current {
+ libc::CMSG_NXTHDR(&msg, current)
+ } else {
+ libc::CMSG_FIRSTHDR(&msg)
+ };
+
+ let cmsg = cmsg.as_ref()?;
+
+ // Most operating systems, but not Linux or emscripten, return the previous pointer
+ // when its length is zero. Therefore, check if the previous pointer is the same as
+ // the current one.
+ if let Some(current) = self.current {
+ if eq(current, cmsg) {
+ return None;
+ }
+ }
+
+ self.current = Some(cmsg);
+ let ancillary_result = AncillaryData::try_from_cmsghdr(cmsg);
+ Some(ancillary_result)
+ }
+ }
+}
+
+/// A Unix socket Ancillary data struct.
+///
+/// # Example
+/// ```no_run
+/// #![feature(unix_socket_ancillary_data)]
+/// use std::os::unix::net::{UnixStream, SocketAncillary, AncillaryData};
+/// use std::io::IoSliceMut;
+///
+/// fn main() -> std::io::Result<()> {
+/// let sock = UnixStream::connect("/tmp/sock")?;
+///
+/// let mut fds = [0; 8];
+/// let mut ancillary_buffer = [0; 128];
+/// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+///
+/// let mut buf = [1; 8];
+/// let mut bufs = &mut [IoSliceMut::new(&mut buf[..])][..];
+/// sock.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+///
+/// for ancillary_result in ancillary.messages() {
+/// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+/// for fd in scm_rights {
+/// println!("receive file descriptor: {fd}");
+/// }
+/// }
+/// }
+/// Ok(())
+/// }
+/// ```
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+#[derive(Debug)]
+pub struct SocketAncillary<'a> {
+ buffer: &'a mut [u8],
+ length: usize,
+ truncated: bool,
+}
+
+impl<'a> SocketAncillary<'a> {
+ /// Create an ancillary data with the given buffer.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// # #![allow(unused_mut)]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::SocketAncillary;
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// ```
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn new(buffer: &'a mut [u8]) -> Self {
+ SocketAncillary { buffer, length: 0, truncated: false }
+ }
+
+ /// Returns the capacity of the buffer.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn capacity(&self) -> usize {
+ self.buffer.len()
+ }
+
+ /// Returns `true` if the ancillary data is empty.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Returns the number of used bytes.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns the iterator of the control messages.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn messages(&self) -> Messages<'_> {
+ Messages { buffer: &self.buffer[..self.length], current: None }
+ }
+
+ /// Is `true` if during a recv operation the ancillary was truncated.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixStream, SocketAncillary};
+ /// use std::io::IoSliceMut;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixStream::connect("/tmp/sock")?;
+ ///
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ ///
+ /// let mut buf = [1; 8];
+ /// let mut bufs = &mut [IoSliceMut::new(&mut buf[..])][..];
+ /// sock.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+ ///
+ /// println!("Is truncated: {}", ancillary.truncated());
+ /// Ok(())
+ /// }
+ /// ```
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn truncated(&self) -> bool {
+ self.truncated
+ }
+
+ /// Add file descriptors to the ancillary data.
+ ///
+ /// The function returns `true` if there was enough space in the buffer.
+ /// If there was not enough space then no file descriptors was appended.
+ /// Technically, that means this operation adds a control message with the level `SOL_SOCKET`
+ /// and type `SCM_RIGHTS`.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixStream, SocketAncillary};
+ /// use std::os::unix::io::AsRawFd;
+ /// use std::io::IoSlice;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixStream::connect("/tmp/sock")?;
+ ///
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// ancillary.add_fds(&[sock.as_raw_fd()][..]);
+ ///
+ /// let buf = [1; 8];
+ /// let mut bufs = &mut [IoSlice::new(&buf[..])][..];
+ /// sock.send_vectored_with_ancillary(bufs, &mut ancillary)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn add_fds(&mut self, fds: &[RawFd]) -> bool {
+ self.truncated = false;
+ add_to_ancillary_data(
+ &mut self.buffer,
+ &mut self.length,
+ fds,
+ libc::SOL_SOCKET,
+ libc::SCM_RIGHTS,
+ )
+ }
+
+ /// Add credentials to the ancillary data.
+ ///
+ /// The function returns `true` if there was enough space in the buffer.
+ /// If there was not enough space then no credentials was appended.
+ /// Technically, that means this operation adds a control message with the level `SOL_SOCKET`
+ /// and type `SCM_CREDENTIALS` or `SCM_CREDS`.
+ ///
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn add_creds(&mut self, creds: &[SocketCred]) -> bool {
+ self.truncated = false;
+ add_to_ancillary_data(
+ &mut self.buffer,
+ &mut self.length,
+ creds,
+ libc::SOL_SOCKET,
+ #[cfg(not(target_os = "netbsd"))]
+ libc::SCM_CREDENTIALS,
+ #[cfg(target_os = "netbsd")]
+ libc::SCM_CREDS,
+ )
+ }
+
+ /// Clears the ancillary data, removing all values.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixStream, SocketAncillary, AncillaryData};
+ /// use std::io::IoSliceMut;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixStream::connect("/tmp/sock")?;
+ ///
+ /// let mut fds1 = [0; 8];
+ /// let mut fds2 = [0; 8];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ ///
+ /// let mut buf = [1; 8];
+ /// let mut bufs = &mut [IoSliceMut::new(&mut buf[..])][..];
+ ///
+ /// sock.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+ /// for ancillary_result in ancillary.messages() {
+ /// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+ /// for fd in scm_rights {
+ /// println!("receive file descriptor: {fd}");
+ /// }
+ /// }
+ /// }
+ ///
+ /// ancillary.clear();
+ ///
+ /// sock.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+ /// for ancillary_result in ancillary.messages() {
+ /// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+ /// for fd in scm_rights {
+ /// println!("receive file descriptor: {fd}");
+ /// }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn clear(&mut self) {
+ self.length = 0;
+ self.truncated = false;
+ }
+}
diff --git a/library/std/src/os/unix/net/datagram.rs b/library/std/src/os/unix/net/datagram.rs
new file mode 100644
index 000000000..8008acfd1
--- /dev/null
+++ b/library/std/src/os/unix/net/datagram.rs
@@ -0,0 +1,987 @@
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+use super::{recv_vectored_with_ancillary_from, send_vectored_with_ancillary_to, SocketAncillary};
+use super::{sockaddr_un, SocketAddr};
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+use crate::io::{IoSlice, IoSliceMut};
+use crate::net::Shutdown;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::path::Path;
+use crate::sys::cvt;
+use crate::sys::net::Socket;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::Duration;
+use crate::{fmt, io};
+
+#[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "netbsd",
+ target_os = "haiku"
+))]
+use libc::MSG_NOSIGNAL;
+#[cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "netbsd",
+ target_os = "haiku"
+)))]
+const MSG_NOSIGNAL: libc::c_int = 0x0;
+
+/// A Unix datagram socket.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::unix::net::UnixDatagram;
+///
+/// fn main() -> std::io::Result<()> {
+/// let socket = UnixDatagram::bind("/path/to/my/socket")?;
+/// socket.send_to(b"hello world", "/path/to/other/socket")?;
+/// let mut buf = [0; 100];
+/// let (count, address) = socket.recv_from(&mut buf)?;
+/// println!("socket {:?} sent {:?}", address, &buf[..count]);
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub struct UnixDatagram(Socket);
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl fmt::Debug for UnixDatagram {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = fmt.debug_struct("UnixDatagram");
+ builder.field("fd", self.0.as_inner());
+ if let Ok(addr) = self.local_addr() {
+ builder.field("local", &addr);
+ }
+ if let Ok(addr) = self.peer_addr() {
+ builder.field("peer", &addr);
+ }
+ builder.finish()
+ }
+}
+
+impl UnixDatagram {
+ /// Creates a Unix datagram socket bound to the given path.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// let sock = match UnixDatagram::bind("/path/to/the/socket") {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't bind: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
+ unsafe {
+ let socket = UnixDatagram::unbound()?;
+ let (addr, len) = sockaddr_un(path.as_ref())?;
+
+ cvt(libc::bind(socket.as_raw_fd(), &addr as *const _ as *const _, len as _))?;
+
+ Ok(socket)
+ }
+ }
+
+ /// Creates a Unix datagram socket bound to an address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixDatagram};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock1 = UnixDatagram::bind("path/to/socket")?;
+ /// let addr = sock1.local_addr()?;
+ ///
+ /// let sock2 = match UnixDatagram::bind_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(err) => {
+ /// println!("Couldn't bind: {err:?}");
+ /// return Err(err);
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixDatagram> {
+ unsafe {
+ let socket = UnixDatagram::unbound()?;
+ cvt(libc::bind(
+ socket.as_raw_fd(),
+ &socket_addr.addr as *const _ as *const _,
+ socket_addr.len as _,
+ ))?;
+ Ok(socket)
+ }
+ }
+
+ /// Creates a Unix Datagram socket which is not bound to any address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// let sock = match UnixDatagram::unbound() {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't unbound: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn unbound() -> io::Result<UnixDatagram> {
+ let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+ Ok(UnixDatagram(inner))
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// Returns two `UnixDatagrams`s which are connected to each other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// let (sock1, sock2) = match UnixDatagram::pair() {
+ /// Ok((sock1, sock2)) => (sock1, sock2),
+ /// Err(e) => {
+ /// println!("Couldn't unbound: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
+ let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_DGRAM)?;
+ Ok((UnixDatagram(i1), UnixDatagram(i2)))
+ }
+
+ /// Connects the socket to the specified path address.
+ ///
+ /// The [`send`] method may be used to send data to the specified address.
+ /// [`recv`] and [`recv_from`] will only receive data from that address.
+ ///
+ /// [`send`]: UnixDatagram::send
+ /// [`recv`]: UnixDatagram::recv
+ /// [`recv_from`]: UnixDatagram::recv_from
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// match sock.connect("/path/to/the/socket") {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't connect: {e:?}");
+ /// return Err(e)
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ unsafe {
+ let (addr, len) = sockaddr_un(path.as_ref())?;
+
+ cvt(libc::connect(self.as_raw_fd(), &addr as *const _ as *const _, len))?;
+ }
+ Ok(())
+ }
+
+ /// Connects the socket to an address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixDatagram};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let bound = UnixDatagram::bind("/path/to/socket")?;
+ /// let addr = bound.local_addr()?;
+ ///
+ /// let sock = UnixDatagram::unbound()?;
+ /// match sock.connect_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't connect: {e:?}");
+ /// return Err(e)
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn connect_addr(&self, socket_addr: &SocketAddr) -> io::Result<()> {
+ unsafe {
+ cvt(libc::connect(
+ self.as_raw_fd(),
+ &socket_addr.addr as *const _ as *const _,
+ socket_addr.len,
+ ))?;
+ }
+ Ok(())
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UnixDatagram` is a reference to the same socket that this
+ /// object references. Both handles can be used to accept incoming
+ /// connections and options set on one side will affect the other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::bind("/path/to/the/socket")?;
+ /// let sock_copy = sock.try_clone().expect("try_clone failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn try_clone(&self) -> io::Result<UnixDatagram> {
+ self.0.duplicate().map(UnixDatagram)
+ }
+
+ /// Returns the address of this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::bind("/path/to/the/socket")?;
+ /// let addr = sock.local_addr().expect("Couldn't get local address");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ SocketAddr::new(|addr, len| unsafe { libc::getsockname(self.as_raw_fd(), addr, len) })
+ }
+
+ /// Returns the address of this socket's peer.
+ ///
+ /// The [`connect`] method will connect the socket to a peer.
+ ///
+ /// [`connect`]: UnixDatagram::connect
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.connect("/path/to/the/socket")?;
+ ///
+ /// let addr = sock.peer_addr().expect("Couldn't get peer address");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ SocketAddr::new(|addr, len| unsafe { libc::getpeername(self.as_raw_fd(), addr, len) })
+ }
+
+ fn recv_from_flags(
+ &self,
+ buf: &mut [u8],
+ flags: libc::c_int,
+ ) -> io::Result<(usize, SocketAddr)> {
+ let mut count = 0;
+ let addr = SocketAddr::new(|addr, len| unsafe {
+ count = libc::recvfrom(
+ self.as_raw_fd(),
+ buf.as_mut_ptr() as *mut _,
+ buf.len(),
+ flags,
+ addr,
+ len,
+ );
+ if count > 0 {
+ 1
+ } else if count == 0 {
+ 0
+ } else {
+ -1
+ }
+ })?;
+
+ Ok((count as usize, addr))
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read and the address from
+ /// whence the data came.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// let mut buf = vec![0; 10];
+ /// let (size, sender) = sock.recv_from(buf.as_mut_slice())?;
+ /// println!("received {size} bytes from {sender:?}");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_flags(buf, 0)
+ }
+
+ /// Receives data from the socket.
+ ///
+ /// On success, returns the number of bytes read.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::bind("/path/to/the/socket")?;
+ /// let mut buf = vec![0; 10];
+ /// sock.recv(buf.as_mut_slice()).expect("recv function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ /// Receives data and ancillary data from socket.
+ ///
+ /// On success, returns the number of bytes read, if the data was truncated and the address from whence the msg came.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixDatagram, SocketAncillary, AncillaryData};
+ /// use std::io::IoSliceMut;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// let mut buf1 = [1; 8];
+ /// let mut buf2 = [2; 16];
+ /// let mut buf3 = [3; 8];
+ /// let mut bufs = &mut [
+ /// IoSliceMut::new(&mut buf1),
+ /// IoSliceMut::new(&mut buf2),
+ /// IoSliceMut::new(&mut buf3),
+ /// ][..];
+ /// let mut fds = [0; 8];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// let (size, _truncated, sender) = sock.recv_vectored_with_ancillary_from(bufs, &mut ancillary)?;
+ /// println!("received {size}");
+ /// for ancillary_result in ancillary.messages() {
+ /// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+ /// for fd in scm_rights {
+ /// println!("receive file descriptor: {fd}");
+ /// }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn recv_vectored_with_ancillary_from(
+ &self,
+ bufs: &mut [IoSliceMut<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ ) -> io::Result<(usize, bool, SocketAddr)> {
+ let (count, truncated, addr) = recv_vectored_with_ancillary_from(&self.0, bufs, ancillary)?;
+ let addr = addr?;
+
+ Ok((count, truncated, addr))
+ }
+
+ /// Receives data and ancillary data from socket.
+ ///
+ /// On success, returns the number of bytes read and if the data was truncated.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixDatagram, SocketAncillary, AncillaryData};
+ /// use std::io::IoSliceMut;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// let mut buf1 = [1; 8];
+ /// let mut buf2 = [2; 16];
+ /// let mut buf3 = [3; 8];
+ /// let mut bufs = &mut [
+ /// IoSliceMut::new(&mut buf1),
+ /// IoSliceMut::new(&mut buf2),
+ /// IoSliceMut::new(&mut buf3),
+ /// ][..];
+ /// let mut fds = [0; 8];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// let (size, _truncated) = sock.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+ /// println!("received {size}");
+ /// for ancillary_result in ancillary.messages() {
+ /// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+ /// for fd in scm_rights {
+ /// println!("receive file descriptor: {fd}");
+ /// }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn recv_vectored_with_ancillary(
+ &self,
+ bufs: &mut [IoSliceMut<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ ) -> io::Result<(usize, bool)> {
+ let (count, truncated, addr) = recv_vectored_with_ancillary_from(&self.0, bufs, ancillary)?;
+ addr?;
+
+ Ok((count, truncated))
+ }
+
+ /// Sends data on the socket to the specified address.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.send_to(b"omelette au fromage", "/some/sock").expect("send_to function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
+ unsafe {
+ let (addr, len) = sockaddr_un(path.as_ref())?;
+
+ let count = cvt(libc::sendto(
+ self.as_raw_fd(),
+ buf.as_ptr() as *const _,
+ buf.len(),
+ MSG_NOSIGNAL,
+ &addr as *const _ as *const _,
+ len,
+ ))?;
+ Ok(count as usize)
+ }
+ }
+
+ /// Sends data on the socket to the specified [SocketAddr].
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// [SocketAddr]: crate::os::unix::net::SocketAddr
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixDatagram};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let bound = UnixDatagram::bind("/path/to/socket")?;
+ /// let addr = bound.local_addr()?;
+ ///
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.send_to_addr(b"bacon egg and cheese", &addr).expect("send_to_addr function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn send_to_addr(&self, buf: &[u8], socket_addr: &SocketAddr) -> io::Result<usize> {
+ unsafe {
+ let count = cvt(libc::sendto(
+ self.as_raw_fd(),
+ buf.as_ptr() as *const _,
+ buf.len(),
+ MSG_NOSIGNAL,
+ &socket_addr.addr as *const _ as *const _,
+ socket_addr.len,
+ ))?;
+ Ok(count as usize)
+ }
+ }
+
+ /// Sends data on the socket to the socket's peer.
+ ///
+ /// The peer address may be set by the `connect` method, and this method
+ /// will return an error if the socket has not already been connected.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.connect("/some/sock").expect("Couldn't connect");
+ /// sock.send(b"omelette au fromage").expect("send_to function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ /// Sends data and ancillary data on the socket to the specified address.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixDatagram, SocketAncillary};
+ /// use std::io::IoSlice;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// let buf1 = [1; 8];
+ /// let buf2 = [2; 16];
+ /// let buf3 = [3; 8];
+ /// let bufs = &[
+ /// IoSlice::new(&buf1),
+ /// IoSlice::new(&buf2),
+ /// IoSlice::new(&buf3),
+ /// ][..];
+ /// let fds = [0, 1, 2];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// ancillary.add_fds(&fds[..]);
+ /// sock.send_vectored_with_ancillary_to(bufs, &mut ancillary, "/some/sock")
+ /// .expect("send_vectored_with_ancillary_to function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn send_vectored_with_ancillary_to<P: AsRef<Path>>(
+ &self,
+ bufs: &[IoSlice<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ path: P,
+ ) -> io::Result<usize> {
+ send_vectored_with_ancillary_to(&self.0, Some(path.as_ref()), bufs, ancillary)
+ }
+
+ /// Sends data and ancillary data on the socket.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixDatagram, SocketAncillary};
+ /// use std::io::IoSlice;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// let buf1 = [1; 8];
+ /// let buf2 = [2; 16];
+ /// let buf3 = [3; 8];
+ /// let bufs = &[
+ /// IoSlice::new(&buf1),
+ /// IoSlice::new(&buf2),
+ /// IoSlice::new(&buf3),
+ /// ][..];
+ /// let fds = [0, 1, 2];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// ancillary.add_fds(&fds[..]);
+ /// sock.send_vectored_with_ancillary(bufs, &mut ancillary)
+ /// .expect("send_vectored_with_ancillary function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn send_vectored_with_ancillary(
+ &self,
+ bufs: &[IoSlice<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ ) -> io::Result<usize> {
+ send_vectored_with_ancillary_to(&self.0, None, bufs, ancillary)
+ }
+
+ /// Sets the read timeout for the socket.
+ ///
+ /// If the provided value is [`None`], then [`recv`] and [`recv_from`] calls will
+ /// block indefinitely. An [`Err`] is returned if the zero [`Duration`]
+ /// is passed to this method.
+ ///
+ /// [`recv`]: UnixDatagram::recv
+ /// [`recv_from`]: UnixDatagram::recv_from
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_read_timeout(Some(Duration::new(1, 0)))
+ /// .expect("set_read_timeout function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::unbound()?;
+ /// let result = socket.set_read_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+ self.0.set_timeout(timeout, libc::SO_RCVTIMEO)
+ }
+
+ /// Sets the write timeout for the socket.
+ ///
+ /// If the provided value is [`None`], then [`send`] and [`send_to`] calls will
+ /// block indefinitely. An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method.
+ ///
+ /// [`send`]: UnixDatagram::send
+ /// [`send_to`]: UnixDatagram::send_to
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_write_timeout(Some(Duration::new(1, 0)))
+ /// .expect("set_write_timeout function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::unbound()?;
+ /// let result = socket.set_write_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+ self.0.set_timeout(timeout, libc::SO_SNDTIMEO)
+ }
+
+ /// Returns the read timeout of this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_read_timeout(Some(Duration::new(1, 0)))
+ /// .expect("set_read_timeout function failed");
+ /// assert_eq!(sock.read_timeout()?, Some(Duration::new(1, 0)));
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.timeout(libc::SO_RCVTIMEO)
+ }
+
+ /// Returns the write timeout of this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_write_timeout(Some(Duration::new(1, 0)))
+ /// .expect("set_write_timeout function failed");
+ /// assert_eq!(sock.write_timeout()?, Some(Duration::new(1, 0)));
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.timeout(libc::SO_SNDTIMEO)
+ }
+
+ /// Moves the socket into or out of nonblocking mode.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_nonblocking(true).expect("set_nonblocking function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+
+ /// Moves the socket to pass unix credentials as control message in [`SocketAncillary`].
+ ///
+ /// Set the socket option `SO_PASSCRED`.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.set_passcred(true).expect("set_passcred function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
+ self.0.set_passcred(passcred)
+ }
+
+ /// Get the current value of the socket for passing unix credentials in [`SocketAncillary`].
+ /// This value can be change by [`set_passcred`].
+ ///
+ /// Get the socket option `SO_PASSCRED`.
+ ///
+ /// [`set_passcred`]: UnixDatagram::set_passcred
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn passcred(&self) -> io::Result<bool> {
+ self.0.passcred()
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// if let Ok(Some(err)) = sock.take_error() {
+ /// println!("Got error: {err:?}");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Shut down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of [`Shutdown`]).
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixDatagram;
+ /// use std::net::Shutdown;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let sock = UnixDatagram::unbound()?;
+ /// sock.shutdown(Shutdown::Both).expect("shutdown function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.0.shutdown(how)
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_peek)]
+ ///
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::bind("/tmp/sock")?;
+ /// let mut buf = [0; 10];
+ /// let len = socket.peek(&mut buf).expect("peek failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_peek", issue = "76923")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
+ /// Receives a single datagram message on the socket, without removing it from the
+ /// queue. On success, returns the number of bytes read and the origin.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call.
+ ///
+ /// Do not use this function to implement busy waiting, instead use `libc::poll` to
+ /// synchronize IO events on one or more sockets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_peek)]
+ ///
+ /// use std::os::unix::net::UnixDatagram;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixDatagram::bind("/tmp/sock")?;
+ /// let mut buf = [0; 10];
+ /// let (len, addr) = socket.peek_from(&mut buf).expect("peek failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_peek", issue = "76923")]
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_flags(buf, libc::MSG_PEEK)
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl AsRawFd for UnixDatagram {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_inner().as_raw_fd()
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl FromRawFd for UnixDatagram {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
+ UnixDatagram(Socket::from_inner(FromInner::from_inner(OwnedFd::from_raw_fd(fd))))
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl IntoRawFd for UnixDatagram {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for UnixDatagram {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<UnixDatagram> for OwnedFd {
+ #[inline]
+ fn from(unix_datagram: UnixDatagram) -> OwnedFd {
+ unsafe { OwnedFd::from_raw_fd(unix_datagram.into_raw_fd()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for UnixDatagram {
+ #[inline]
+ fn from(owned: OwnedFd) -> Self {
+ unsafe { Self::from_raw_fd(owned.into_raw_fd()) }
+ }
+}
diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs
new file mode 100644
index 000000000..7c0d53950
--- /dev/null
+++ b/library/std/src/os/unix/net/listener.rs
@@ -0,0 +1,385 @@
+use super::{sockaddr_un, SocketAddr, UnixStream};
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::path::Path;
+use crate::sys::cvt;
+use crate::sys::net::Socket;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::{fmt, io, mem};
+
+/// A structure representing a Unix domain socket server.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::thread;
+/// use std::os::unix::net::{UnixStream, UnixListener};
+///
+/// fn handle_client(stream: UnixStream) {
+/// // ...
+/// }
+///
+/// fn main() -> std::io::Result<()> {
+/// let listener = UnixListener::bind("/path/to/the/socket")?;
+///
+/// // accept connections and process them, spawning a new thread for each one
+/// for stream in listener.incoming() {
+/// match stream {
+/// Ok(stream) => {
+/// /* connection succeeded */
+/// thread::spawn(|| handle_client(stream));
+/// }
+/// Err(err) => {
+/// /* connection failed */
+/// break;
+/// }
+/// }
+/// }
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub struct UnixListener(Socket);
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl fmt::Debug for UnixListener {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = fmt.debug_struct("UnixListener");
+ builder.field("fd", self.0.as_inner());
+ if let Ok(addr) = self.local_addr() {
+ builder.field("local", &addr);
+ }
+ builder.finish()
+ }
+}
+
+impl UnixListener {
+ /// Creates a new `UnixListener` bound to the specified socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// let listener = match UnixListener::bind("/path/to/the/socket") {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't connect: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
+ unsafe {
+ let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (addr, len) = sockaddr_un(path.as_ref())?;
+
+ cvt(libc::bind(inner.as_inner().as_raw_fd(), &addr as *const _ as *const _, len as _))?;
+ cvt(libc::listen(inner.as_inner().as_raw_fd(), 128))?;
+
+ Ok(UnixListener(inner))
+ }
+ }
+
+ /// Creates a new `UnixListener` bound to the specified [`socket address`].
+ ///
+ /// [`socket address`]: crate::os::unix::net::SocketAddr
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener1 = UnixListener::bind("path/to/socket")?;
+ /// let addr = listener1.local_addr()?;
+ ///
+ /// let listener2 = match UnixListener::bind_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(err) => {
+ /// println!("Couldn't bind: {err:?}");
+ /// return Err(err);
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
+ unsafe {
+ let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ cvt(libc::bind(
+ inner.as_raw_fd(),
+ &socket_addr.addr as *const _ as *const _,
+ socket_addr.len as _,
+ ))?;
+ cvt(libc::listen(inner.as_raw_fd(), 128))?;
+ Ok(UnixListener(inner))
+ }
+ }
+
+ /// Accepts a new incoming connection to this listener.
+ ///
+ /// This function will block the calling thread until a new Unix connection
+ /// is established. When established, the corresponding [`UnixStream`] and
+ /// the remote peer's address will be returned.
+ ///
+ /// [`UnixStream`]: crate::os::unix::net::UnixStream
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ ///
+ /// match listener.accept() {
+ /// Ok((socket, addr)) => println!("Got a client: {addr:?}"),
+ /// Err(e) => println!("accept function failed: {e:?}"),
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
+ let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() };
+ let mut len = mem::size_of_val(&storage) as libc::socklen_t;
+ let sock = self.0.accept(&mut storage as *mut _ as *mut _, &mut len)?;
+ let addr = SocketAddr::from_parts(storage, len)?;
+ Ok((UnixStream(sock), addr))
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UnixListener` is a reference to the same socket that this
+ /// object references. Both handles can be used to accept incoming
+ /// connections and options set on one listener will affect the other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ /// let listener_copy = listener.try_clone().expect("try_clone failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn try_clone(&self) -> io::Result<UnixListener> {
+ self.0.duplicate().map(UnixListener)
+ }
+
+ /// Returns the local socket address of this listener.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ /// let addr = listener.local_addr().expect("Couldn't get local address");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ SocketAddr::new(|addr, len| unsafe { libc::getsockname(self.as_raw_fd(), addr, len) })
+ }
+
+ /// Moves the socket into or out of nonblocking mode.
+ ///
+ /// This will result in the `accept` operation becoming nonblocking,
+ /// i.e., immediately returning from their calls. If the IO operation is
+ /// successful, `Ok` is returned and no further action is required. If the
+ /// IO operation could not be completed and needs to be retried, an error
+ /// with kind [`io::ErrorKind::WouldBlock`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ /// listener.set_nonblocking(true).expect("Couldn't set non blocking");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixListener;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/tmp/sock")?;
+ ///
+ /// if let Ok(Some(err)) = listener.take_error() {
+ /// println!("Got error: {err:?}");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// # Platform specific
+ /// On Redox this always returns `None`.
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Returns an iterator over incoming connections.
+ ///
+ /// The iterator will never return [`None`] and will also not yield the
+ /// peer's [`SocketAddr`] structure.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread;
+ /// use std::os::unix::net::{UnixStream, UnixListener};
+ ///
+ /// fn handle_client(stream: UnixStream) {
+ /// // ...
+ /// }
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ ///
+ /// for stream in listener.incoming() {
+ /// match stream {
+ /// Ok(stream) => {
+ /// thread::spawn(|| handle_client(stream));
+ /// }
+ /// Err(err) => {
+ /// break;
+ /// }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn incoming(&self) -> Incoming<'_> {
+ Incoming { listener: self }
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl AsRawFd for UnixListener {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_inner().as_raw_fd()
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl FromRawFd for UnixListener {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
+ UnixListener(Socket::from_inner(FromInner::from_inner(OwnedFd::from_raw_fd(fd))))
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl IntoRawFd for UnixListener {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for UnixListener {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for UnixListener {
+ #[inline]
+ fn from(fd: OwnedFd) -> UnixListener {
+ UnixListener(Socket::from_inner(FromInner::from_inner(fd)))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<UnixListener> for OwnedFd {
+ #[inline]
+ fn from(listener: UnixListener) -> OwnedFd {
+ listener.0.into_inner().into_inner()
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl<'a> IntoIterator for &'a UnixListener {
+ type Item = io::Result<UnixStream>;
+ type IntoIter = Incoming<'a>;
+
+ fn into_iter(self) -> Incoming<'a> {
+ self.incoming()
+ }
+}
+
+/// An iterator over incoming connections to a [`UnixListener`].
+///
+/// It will never return [`None`].
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::thread;
+/// use std::os::unix::net::{UnixStream, UnixListener};
+///
+/// fn handle_client(stream: UnixStream) {
+/// // ...
+/// }
+///
+/// fn main() -> std::io::Result<()> {
+/// let listener = UnixListener::bind("/path/to/the/socket")?;
+///
+/// for stream in listener.incoming() {
+/// match stream {
+/// Ok(stream) => {
+/// thread::spawn(|| handle_client(stream));
+/// }
+/// Err(err) => {
+/// break;
+/// }
+/// }
+/// }
+/// Ok(())
+/// }
+/// ```
+#[derive(Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub struct Incoming<'a> {
+ listener: &'a UnixListener,
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl<'a> Iterator for Incoming<'a> {
+ type Item = io::Result<UnixStream>;
+
+ fn next(&mut self) -> Option<io::Result<UnixStream>> {
+ Some(self.listener.accept().map(|s| s.0))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
diff --git a/library/std/src/os/unix/net/mod.rs b/library/std/src/os/unix/net/mod.rs
new file mode 100644
index 000000000..6da3e350b
--- /dev/null
+++ b/library/std/src/os/unix/net/mod.rs
@@ -0,0 +1,26 @@
+//! Unix-specific networking functionality.
+
+#![allow(irrefutable_let_patterns)]
+#![stable(feature = "unix_socket", since = "1.10.0")]
+
+mod addr;
+#[doc(cfg(any(target_os = "android", target_os = "linux")))]
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+mod ancillary;
+mod datagram;
+mod listener;
+mod stream;
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub use self::addr::*;
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub use self::ancillary::*;
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub use self::datagram::*;
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub use self::listener::*;
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub use self::stream::*;
diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs
new file mode 100644
index 000000000..cc3a88587
--- /dev/null
+++ b/library/std/src/os/unix/net/stream.rs
@@ -0,0 +1,711 @@
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+use super::{recv_vectored_with_ancillary_from, send_vectored_with_ancillary_to, SocketAncillary};
+use super::{sockaddr_un, SocketAddr};
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::net::Shutdown;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "watchos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+use crate::os::unix::ucred;
+use crate::path::Path;
+use crate::sys::cvt;
+use crate::sys::net::Socket;
+use crate::sys_common::{AsInner, FromInner};
+use crate::time::Duration;
+
+#[unstable(feature = "peer_credentials_unix_socket", issue = "42839", reason = "unstable")]
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "watchos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub use ucred::UCred;
+
+/// A Unix stream socket.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::unix::net::UnixStream;
+/// use std::io::prelude::*;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut stream = UnixStream::connect("/path/to/my/socket")?;
+/// stream.write_all(b"hello world")?;
+/// let mut response = String::new();
+/// stream.read_to_string(&mut response)?;
+/// println!("{response}");
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "unix_socket", since = "1.10.0")]
+pub struct UnixStream(pub(super) Socket);
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl fmt::Debug for UnixStream {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = fmt.debug_struct("UnixStream");
+ builder.field("fd", self.0.as_inner());
+ if let Ok(addr) = self.local_addr() {
+ builder.field("local", &addr);
+ }
+ if let Ok(addr) = self.peer_addr() {
+ builder.field("peer", &addr);
+ }
+ builder.finish()
+ }
+}
+
+impl UnixStream {
+ /// Connects to the socket named by `path`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// let socket = match UnixStream::connect("/tmp/sock") {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't connect: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
+ unsafe {
+ let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ let (addr, len) = sockaddr_un(path.as_ref())?;
+
+ cvt(libc::connect(inner.as_raw_fd(), &addr as *const _ as *const _, len))?;
+ Ok(UnixStream(inner))
+ }
+ }
+
+ /// Connects to the socket specified by [`address`].
+ ///
+ /// [`address`]: crate::os::unix::net::SocketAddr
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener, UnixStream};
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let listener = UnixListener::bind("/path/to/the/socket")?;
+ /// let addr = listener.local_addr()?;
+ ///
+ /// let sock = match UnixStream::connect_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(e) => {
+ /// println!("Couldn't connect: {e:?}");
+ /// return Err(e)
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ````
+ #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ pub fn connect_addr(socket_addr: &SocketAddr) -> io::Result<UnixStream> {
+ unsafe {
+ let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ cvt(libc::connect(
+ inner.as_raw_fd(),
+ &socket_addr.addr as *const _ as *const _,
+ socket_addr.len,
+ ))?;
+ Ok(UnixStream(inner))
+ }
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// Returns two `UnixStream`s which are connected to each other.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// let (sock1, sock2) = match UnixStream::pair() {
+ /// Ok((sock1, sock2)) => (sock1, sock2),
+ /// Err(e) => {
+ /// println!("Couldn't create a pair of sockets: {e:?}");
+ /// return
+ /// }
+ /// };
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
+ let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_STREAM)?;
+ Ok((UnixStream(i1), UnixStream(i2)))
+ }
+
+ /// Creates a new independently owned handle to the underlying socket.
+ ///
+ /// The returned `UnixStream` is a reference to the same stream that this
+ /// object references. Both handles will read and write the same stream of
+ /// data, and options set on one stream will be propagated to the other
+ /// stream.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let sock_copy = socket.try_clone().expect("Couldn't clone socket");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn try_clone(&self) -> io::Result<UnixStream> {
+ self.0.duplicate().map(UnixStream)
+ }
+
+ /// Returns the socket address of the local half of this connection.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let addr = socket.local_addr().expect("Couldn't get local address");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ SocketAddr::new(|addr, len| unsafe { libc::getsockname(self.as_raw_fd(), addr, len) })
+ }
+
+ /// Returns the socket address of the remote half of this connection.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let addr = socket.peer_addr().expect("Couldn't get peer address");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ SocketAddr::new(|addr, len| unsafe { libc::getpeername(self.as_raw_fd(), addr, len) })
+ }
+
+ /// Gets the peer credentials for this Unix domain socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(peer_credentials_unix_socket)]
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let peer_cred = socket.peer_cred().expect("Couldn't get peer credentials");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "peer_credentials_unix_socket", issue = "42839", reason = "unstable")]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "watchos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ pub fn peer_cred(&self) -> io::Result<UCred> {
+ ucred::peer_cred(self)
+ }
+
+ /// Sets the read timeout for the socket.
+ ///
+ /// If the provided value is [`None`], then [`read`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method.
+ ///
+ /// [`read`]: io::Read::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout");
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::os::unix::net::UnixStream;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let result = socket.set_read_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+ self.0.set_timeout(timeout, libc::SO_RCVTIMEO)
+ }
+
+ /// Sets the write timeout for the socket.
+ ///
+ /// If the provided value is [`None`], then [`write`] calls will block
+ /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is
+ /// passed to this method.
+ ///
+ /// [`read`]: io::Read::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_write_timeout(Some(Duration::new(1, 0)))
+ /// .expect("Couldn't set write timeout");
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// An [`Err`] is returned if the zero [`Duration`] is passed to this
+ /// method:
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::net::UdpSocket;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UdpSocket::bind("127.0.0.1:34254")?;
+ /// let result = socket.set_write_timeout(Some(Duration::new(0, 0)));
+ /// let err = result.unwrap_err();
+ /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
+ self.0.set_timeout(timeout, libc::SO_SNDTIMEO)
+ }
+
+ /// Returns the read timeout of this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout");
+ /// assert_eq!(socket.read_timeout()?, Some(Duration::new(1, 0)));
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.timeout(libc::SO_RCVTIMEO)
+ }
+
+ /// Returns the write timeout of this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ /// use std::time::Duration;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_write_timeout(Some(Duration::new(1, 0)))
+ /// .expect("Couldn't set write timeout");
+ /// assert_eq!(socket.write_timeout()?, Some(Duration::new(1, 0)));
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0.timeout(libc::SO_SNDTIMEO)
+ }
+
+ /// Moves the socket into or out of nonblocking mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_nonblocking(true).expect("Couldn't set nonblocking");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.0.set_nonblocking(nonblocking)
+ }
+
+ /// Moves the socket to pass unix credentials as control message in [`SocketAncillary`].
+ ///
+ /// Set the socket option `SO_PASSCRED`.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.set_passcred(true).expect("Couldn't set passcred");
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
+ self.0.set_passcred(passcred)
+ }
+
+ /// Get the current value of the socket for passing unix credentials in [`SocketAncillary`].
+ /// This value can be change by [`set_passcred`].
+ ///
+ /// Get the socket option `SO_PASSCRED`.
+ ///
+ /// [`set_passcred`]: UnixStream::set_passcred
+ #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn passcred(&self) -> io::Result<bool> {
+ self.0.passcred()
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// if let Ok(Some(err)) = socket.take_error() {
+ /// println!("Got error: {err:?}");
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// # Platform specific
+ /// On Redox this always returns `None`.
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0.take_error()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of [`Shutdown`]).
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::os::unix::net::UnixStream;
+ /// use std::net::Shutdown;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// socket.shutdown(Shutdown::Both).expect("shutdown function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "unix_socket", since = "1.10.0")]
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.0.shutdown(how)
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_peek)]
+ ///
+ /// use std::os::unix::net::UnixStream;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let mut buf = [0; 10];
+ /// let len = socket.peek(&mut buf).expect("peek failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[unstable(feature = "unix_socket_peek", issue = "76923")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
+ /// Receives data and ancillary data from socket.
+ ///
+ /// On success, returns the number of bytes read.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixStream, SocketAncillary, AncillaryData};
+ /// use std::io::IoSliceMut;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let mut buf1 = [1; 8];
+ /// let mut buf2 = [2; 16];
+ /// let mut buf3 = [3; 8];
+ /// let mut bufs = &mut [
+ /// IoSliceMut::new(&mut buf1),
+ /// IoSliceMut::new(&mut buf2),
+ /// IoSliceMut::new(&mut buf3),
+ /// ][..];
+ /// let mut fds = [0; 8];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// let size = socket.recv_vectored_with_ancillary(bufs, &mut ancillary)?;
+ /// println!("received {size}");
+ /// for ancillary_result in ancillary.messages() {
+ /// if let AncillaryData::ScmRights(scm_rights) = ancillary_result.unwrap() {
+ /// for fd in scm_rights {
+ /// println!("receive file descriptor: {fd}");
+ /// }
+ /// }
+ /// }
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn recv_vectored_with_ancillary(
+ &self,
+ bufs: &mut [IoSliceMut<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ ) -> io::Result<usize> {
+ let (count, _, _) = recv_vectored_with_ancillary_from(&self.0, bufs, ancillary)?;
+
+ Ok(count)
+ }
+
+ /// Sends data and ancillary data on the socket.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
+ #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ /// #![feature(unix_socket_ancillary_data)]
+ /// use std::os::unix::net::{UnixStream, SocketAncillary};
+ /// use std::io::IoSlice;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let socket = UnixStream::connect("/tmp/sock")?;
+ /// let buf1 = [1; 8];
+ /// let buf2 = [2; 16];
+ /// let buf3 = [3; 8];
+ /// let bufs = &[
+ /// IoSlice::new(&buf1),
+ /// IoSlice::new(&buf2),
+ /// IoSlice::new(&buf3),
+ /// ][..];
+ /// let fds = [0, 1, 2];
+ /// let mut ancillary_buffer = [0; 128];
+ /// let mut ancillary = SocketAncillary::new(&mut ancillary_buffer[..]);
+ /// ancillary.add_fds(&fds[..]);
+ /// socket.send_vectored_with_ancillary(bufs, &mut ancillary)
+ /// .expect("send_vectored_with_ancillary function failed");
+ /// Ok(())
+ /// }
+ /// ```
+ #[cfg(any(doc, target_os = "android", target_os = "linux"))]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn send_vectored_with_ancillary(
+ &self,
+ bufs: &[IoSlice<'_>],
+ ancillary: &mut SocketAncillary<'_>,
+ ) -> io::Result<usize> {
+ send_vectored_with_ancillary_to(&self.0, None, bufs, ancillary)
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl io::Read for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ io::Read::read(&mut &*self, buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ io::Read::read_vectored(&mut &*self, bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ io::Read::is_read_vectored(&&*self)
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl<'a> io::Read for &'a UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl io::Write for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ io::Write::write(&mut &*self, buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ io::Write::write_vectored(&mut &*self, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ io::Write::is_write_vectored(&&*self)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ io::Write::flush(&mut &*self)
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl<'a> io::Write for &'a UnixStream {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl AsRawFd for UnixStream {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl FromRawFd for UnixStream {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
+ UnixStream(Socket::from_inner(FromInner::from_inner(OwnedFd::from_raw_fd(fd))))
+ }
+}
+
+#[stable(feature = "unix_socket", since = "1.10.0")]
+impl IntoRawFd for UnixStream {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for UnixStream {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<UnixStream> for OwnedFd {
+ #[inline]
+ fn from(unix_stream: UnixStream) -> OwnedFd {
+ unsafe { OwnedFd::from_raw_fd(unix_stream.into_raw_fd()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for UnixStream {
+ #[inline]
+ fn from(owned: OwnedFd) -> Self {
+ unsafe { Self::from_raw_fd(owned.into_raw_fd()) }
+ }
+}
diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs
new file mode 100644
index 000000000..e4499f9b6
--- /dev/null
+++ b/library/std/src/os/unix/net/tests.rs
@@ -0,0 +1,753 @@
+use super::*;
+use crate::io::prelude::*;
+use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
+#[cfg(any(target_os = "android", target_os = "linux"))]
+use crate::os::unix::io::AsRawFd;
+use crate::sys_common::io::test::tmpdir;
+use crate::thread;
+use crate::time::Duration;
+
+macro_rules! or_panic {
+ ($e:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{e}"),
+ }
+ };
+}
+
+#[test]
+fn basic() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+ let msg1 = b"hello";
+ let msg2 = b"world!";
+
+ let listener = or_panic!(UnixListener::bind(&socket_path));
+ let thread = thread::spawn(move || {
+ let mut stream = or_panic!(listener.accept()).0;
+ let mut buf = [0; 5];
+ or_panic!(stream.read(&mut buf));
+ assert_eq!(&msg1[..], &buf[..]);
+ or_panic!(stream.write_all(msg2));
+ });
+
+ let mut stream = or_panic!(UnixStream::connect(&socket_path));
+ assert_eq!(Some(&*socket_path), stream.peer_addr().unwrap().as_pathname());
+ or_panic!(stream.write_all(msg1));
+ let mut buf = vec![];
+ or_panic!(stream.read_to_end(&mut buf));
+ assert_eq!(&msg2[..], &buf[..]);
+ drop(stream);
+
+ thread.join().unwrap();
+}
+
+#[test]
+fn vectored() {
+ let (mut s1, mut s2) = or_panic!(UnixStream::pair());
+
+ let len = or_panic!(s1.write_vectored(&[
+ IoSlice::new(b"hello"),
+ IoSlice::new(b" "),
+ IoSlice::new(b"world!")
+ ],));
+ assert_eq!(len, 12);
+
+ let mut buf1 = [0; 6];
+ let mut buf2 = [0; 7];
+ let len =
+ or_panic!(s2.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],));
+ assert_eq!(len, 12);
+ assert_eq!(&buf1, b"hello ");
+ assert_eq!(&buf2, b"world!\0");
+}
+
+#[test]
+fn pair() {
+ let msg1 = b"hello";
+ let msg2 = b"world!";
+
+ let (mut s1, mut s2) = or_panic!(UnixStream::pair());
+ let thread = thread::spawn(move || {
+ // s1 must be moved in or the test will hang!
+ let mut buf = [0; 5];
+ or_panic!(s1.read(&mut buf));
+ assert_eq!(&msg1[..], &buf[..]);
+ or_panic!(s1.write_all(msg2));
+ });
+
+ or_panic!(s2.write_all(msg1));
+ let mut buf = vec![];
+ or_panic!(s2.read_to_end(&mut buf));
+ assert_eq!(&msg2[..], &buf[..]);
+ drop(s2);
+
+ thread.join().unwrap();
+}
+
+#[test]
+fn try_clone() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+ let msg1 = b"hello";
+ let msg2 = b"world";
+
+ let listener = or_panic!(UnixListener::bind(&socket_path));
+ let thread = thread::spawn(move || {
+ let mut stream = or_panic!(listener.accept()).0;
+ or_panic!(stream.write_all(msg1));
+ or_panic!(stream.write_all(msg2));
+ });
+
+ let mut stream = or_panic!(UnixStream::connect(&socket_path));
+ let mut stream2 = or_panic!(stream.try_clone());
+
+ let mut buf = [0; 5];
+ or_panic!(stream.read(&mut buf));
+ assert_eq!(&msg1[..], &buf[..]);
+ or_panic!(stream2.read(&mut buf));
+ assert_eq!(&msg2[..], &buf[..]);
+
+ thread.join().unwrap();
+}
+
+#[test]
+fn iter() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+
+ let listener = or_panic!(UnixListener::bind(&socket_path));
+ let thread = thread::spawn(move || {
+ for stream in listener.incoming().take(2) {
+ let mut stream = or_panic!(stream);
+ let mut buf = [0];
+ or_panic!(stream.read(&mut buf));
+ }
+ });
+
+ for _ in 0..2 {
+ let mut stream = or_panic!(UnixStream::connect(&socket_path));
+ or_panic!(stream.write_all(&[0]));
+ }
+
+ thread.join().unwrap();
+}
+
+#[test]
+fn long_path() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join(
+ "asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfa\
+ sasdfasdfasdasdfasdfasdfadfasdfasdfasdfasdfasdf",
+ );
+ match UnixStream::connect(&socket_path) {
+ Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+ Err(e) => panic!("unexpected error {e}"),
+ Ok(_) => panic!("unexpected success"),
+ }
+
+ match UnixListener::bind(&socket_path) {
+ Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+ Err(e) => panic!("unexpected error {e}"),
+ Ok(_) => panic!("unexpected success"),
+ }
+
+ match UnixDatagram::bind(&socket_path) {
+ Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+ Err(e) => panic!("unexpected error {e}"),
+ Ok(_) => panic!("unexpected success"),
+ }
+}
+
+#[test]
+fn timeouts() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+
+ let _listener = or_panic!(UnixListener::bind(&socket_path));
+
+ let stream = or_panic!(UnixStream::connect(&socket_path));
+ let dur = Duration::new(15410, 0);
+
+ assert_eq!(None, or_panic!(stream.read_timeout()));
+
+ or_panic!(stream.set_read_timeout(Some(dur)));
+ assert_eq!(Some(dur), or_panic!(stream.read_timeout()));
+
+ assert_eq!(None, or_panic!(stream.write_timeout()));
+
+ or_panic!(stream.set_write_timeout(Some(dur)));
+ assert_eq!(Some(dur), or_panic!(stream.write_timeout()));
+
+ or_panic!(stream.set_read_timeout(None));
+ assert_eq!(None, or_panic!(stream.read_timeout()));
+
+ or_panic!(stream.set_write_timeout(None));
+ assert_eq!(None, or_panic!(stream.write_timeout()));
+}
+
+#[test]
+fn test_read_timeout() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+
+ let _listener = or_panic!(UnixListener::bind(&socket_path));
+
+ let mut stream = or_panic!(UnixStream::connect(&socket_path));
+ or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ let mut buf = [0; 10];
+ let kind = stream.read_exact(&mut buf).err().expect("expected error").kind();
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+}
+
+#[test]
+fn test_read_with_timeout() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+
+ let listener = or_panic!(UnixListener::bind(&socket_path));
+
+ let mut stream = or_panic!(UnixStream::connect(&socket_path));
+ or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000))));
+
+ let mut other_end = or_panic!(listener.accept()).0;
+ or_panic!(other_end.write_all(b"hello world"));
+
+ let mut buf = [0; 11];
+ or_panic!(stream.read(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+
+ let kind = stream.read_exact(&mut buf).err().expect("expected error").kind();
+ assert!(
+ kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut,
+ "unexpected_error: {:?}",
+ kind
+ );
+}
+
+// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
+// when passed zero Durations
+#[test]
+fn test_unix_stream_timeout_zero_duration() {
+ let dir = tmpdir();
+ let socket_path = dir.path().join("sock");
+
+ let listener = or_panic!(UnixListener::bind(&socket_path));
+ let stream = or_panic!(UnixStream::connect(&socket_path));
+
+ let result = stream.set_write_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ let result = stream.set_read_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ drop(listener);
+}
+
+#[test]
+fn test_unix_datagram() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+ let path2 = dir.path().join("sock2");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::bind(&path2));
+
+ let msg = b"hello world";
+ or_panic!(sock1.send_to(msg, &path2));
+ let mut buf = [0; 11];
+ or_panic!(sock2.recv_from(&mut buf));
+ assert_eq!(msg, &buf[..]);
+}
+
+#[test]
+fn test_unnamed_unix_datagram() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::unbound());
+
+ let msg = b"hello world";
+ or_panic!(sock2.send_to(msg, &path1));
+ let mut buf = [0; 11];
+ let (usize, addr) = or_panic!(sock1.recv_from(&mut buf));
+ assert_eq!(usize, 11);
+ assert!(addr.is_unnamed());
+ assert_eq!(msg, &buf[..]);
+}
+
+#[test]
+fn test_unix_datagram_connect_to_recv_addr() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+ let path2 = dir.path().join("sock2");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::bind(&path2));
+
+ let msg = b"hello world";
+ let sock1_addr = or_panic!(sock1.local_addr());
+ or_panic!(sock2.send_to_addr(msg, &sock1_addr));
+ let mut buf = [0; 11];
+ let (_, addr) = or_panic!(sock1.recv_from(&mut buf));
+
+ let new_msg = b"hello back";
+ let mut new_buf = [0; 10];
+ or_panic!(sock2.connect_addr(&addr));
+ or_panic!(sock2.send(new_msg)); // set by connect_addr
+ let usize = or_panic!(sock2.recv(&mut new_buf));
+ assert_eq!(usize, 10);
+ assert_eq!(new_msg, &new_buf[..]);
+}
+
+#[test]
+fn test_connect_unix_datagram() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+ let path2 = dir.path().join("sock2");
+
+ let bsock1 = or_panic!(UnixDatagram::bind(&path1));
+ let bsock2 = or_panic!(UnixDatagram::bind(&path2));
+ let sock = or_panic!(UnixDatagram::unbound());
+ or_panic!(sock.connect(&path1));
+
+ // Check send()
+ let msg = b"hello there";
+ or_panic!(sock.send(msg));
+ let mut buf = [0; 11];
+ let (usize, addr) = or_panic!(bsock1.recv_from(&mut buf));
+ assert_eq!(usize, 11);
+ assert!(addr.is_unnamed());
+ assert_eq!(msg, &buf[..]);
+
+ // Changing default socket works too
+ or_panic!(sock.connect(&path2));
+ or_panic!(sock.send(msg));
+ or_panic!(bsock2.recv_from(&mut buf));
+}
+
+#[test]
+fn test_unix_datagram_recv() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::unbound());
+ or_panic!(sock2.connect(&path1));
+
+ let msg = b"hello world";
+ or_panic!(sock2.send(msg));
+ let mut buf = [0; 11];
+ let size = or_panic!(sock1.recv(&mut buf));
+ assert_eq!(size, 11);
+ assert_eq!(msg, &buf[..]);
+}
+
+#[test]
+fn datagram_pair() {
+ let msg1 = b"hello";
+ let msg2 = b"world!";
+
+ let (s1, s2) = or_panic!(UnixDatagram::pair());
+ let thread = thread::spawn(move || {
+ // s1 must be moved in or the test will hang!
+ let mut buf = [0; 5];
+ or_panic!(s1.recv(&mut buf));
+ assert_eq!(&msg1[..], &buf[..]);
+ or_panic!(s1.send(msg2));
+ });
+
+ or_panic!(s2.send(msg1));
+ let mut buf = [0; 6];
+ or_panic!(s2.recv(&mut buf));
+ assert_eq!(&msg2[..], &buf[..]);
+ drop(s2);
+
+ thread.join().unwrap();
+}
+
+// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
+// when passed zero Durations
+#[test]
+fn test_unix_datagram_timeout_zero_duration() {
+ let dir = tmpdir();
+ let path = dir.path().join("sock");
+
+ let datagram = or_panic!(UnixDatagram::bind(&path));
+
+ let result = datagram.set_write_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+
+ let result = datagram.set_read_timeout(Some(Duration::new(0, 0)));
+ let err = result.unwrap_err();
+ assert_eq!(err.kind(), ErrorKind::InvalidInput);
+}
+
+#[test]
+fn abstract_namespace_not_allowed_connect() {
+ assert!(UnixStream::connect("\0asdf").is_err());
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_stream_connect() {
+ let msg1 = b"hello";
+ let msg2 = b"world";
+
+ let socket_addr = or_panic!(SocketAddr::from_abstract_namespace(b"namespace"));
+ let listener = or_panic!(UnixListener::bind_addr(&socket_addr));
+
+ let thread = thread::spawn(move || {
+ let mut stream = or_panic!(listener.accept()).0;
+ let mut buf = [0; 5];
+ or_panic!(stream.read(&mut buf));
+ assert_eq!(&msg1[..], &buf[..]);
+ or_panic!(stream.write_all(msg2));
+ });
+
+ let mut stream = or_panic!(UnixStream::connect_addr(&socket_addr));
+
+ let peer = or_panic!(stream.peer_addr());
+ assert_eq!(peer.as_abstract_namespace().unwrap(), b"namespace");
+
+ or_panic!(stream.write_all(msg1));
+ let mut buf = vec![];
+ or_panic!(stream.read_to_end(&mut buf));
+ assert_eq!(&msg2[..], &buf[..]);
+ drop(stream);
+
+ thread.join().unwrap();
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_stream_iter() {
+ let addr = or_panic!(SocketAddr::from_abstract_namespace(b"hidden"));
+ let listener = or_panic!(UnixListener::bind_addr(&addr));
+
+ let thread = thread::spawn(move || {
+ for stream in listener.incoming().take(2) {
+ let mut stream = or_panic!(stream);
+ let mut buf = [0];
+ or_panic!(stream.read(&mut buf));
+ }
+ });
+
+ for _ in 0..2 {
+ let mut stream = or_panic!(UnixStream::connect_addr(&addr));
+ or_panic!(stream.write_all(&[0]));
+ }
+
+ thread.join().unwrap();
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_datagram_bind_send_to_addr() {
+ let addr1 = or_panic!(SocketAddr::from_abstract_namespace(b"ns1"));
+ let sock1 = or_panic!(UnixDatagram::bind_addr(&addr1));
+
+ let local = or_panic!(sock1.local_addr());
+ assert_eq!(local.as_abstract_namespace().unwrap(), b"ns1");
+
+ let addr2 = or_panic!(SocketAddr::from_abstract_namespace(b"ns2"));
+ let sock2 = or_panic!(UnixDatagram::bind_addr(&addr2));
+
+ let msg = b"hello world";
+ or_panic!(sock1.send_to_addr(msg, &addr2));
+ let mut buf = [0; 11];
+ let (len, addr) = or_panic!(sock2.recv_from(&mut buf));
+ assert_eq!(msg, &buf[..]);
+ assert_eq!(len, 11);
+ assert_eq!(addr.as_abstract_namespace().unwrap(), b"ns1");
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_datagram_connect_addr() {
+ let addr1 = or_panic!(SocketAddr::from_abstract_namespace(b"ns3"));
+ let bsock1 = or_panic!(UnixDatagram::bind_addr(&addr1));
+
+ let sock = or_panic!(UnixDatagram::unbound());
+ or_panic!(sock.connect_addr(&addr1));
+
+ let msg = b"hello world";
+ or_panic!(sock.send(msg));
+ let mut buf = [0; 11];
+ let (len, addr) = or_panic!(bsock1.recv_from(&mut buf));
+ assert_eq!(len, 11);
+ assert_eq!(addr.is_unnamed(), true);
+ assert_eq!(msg, &buf[..]);
+
+ let addr2 = or_panic!(SocketAddr::from_abstract_namespace(b"ns4"));
+ let bsock2 = or_panic!(UnixDatagram::bind_addr(&addr2));
+
+ or_panic!(sock.connect_addr(&addr2));
+ or_panic!(sock.send(msg));
+ or_panic!(bsock2.recv_from(&mut buf));
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_namespace_too_long() {
+ match SocketAddr::from_abstract_namespace(
+ b"abcdefghijklmnopqrstuvwxyzabcdefghijklmn\
+ opqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi\
+ jklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
+ ) {
+ Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {}
+ Err(e) => panic!("unexpected error {e}"),
+ Ok(_) => panic!("unexpected success"),
+ }
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_abstract_namespace_no_pathname_and_not_unnamed() {
+ let namespace = b"local";
+ let addr = or_panic!(SocketAddr::from_abstract_namespace(&namespace[..]));
+ assert_eq!(addr.as_pathname(), None);
+ assert_eq!(addr.as_abstract_namespace(), Some(&namespace[..]));
+ assert_eq!(addr.is_unnamed(), false);
+}
+
+#[test]
+fn test_unix_stream_peek() {
+ let (txdone, rxdone) = crate::sync::mpsc::channel();
+
+ let dir = tmpdir();
+ let path = dir.path().join("sock");
+
+ let listener = or_panic!(UnixListener::bind(&path));
+ let thread = thread::spawn(move || {
+ let mut stream = or_panic!(listener.accept()).0;
+ or_panic!(stream.write_all(&[1, 3, 3, 7]));
+ or_panic!(rxdone.recv());
+ });
+
+ let mut stream = or_panic!(UnixStream::connect(&path));
+ let mut buf = [0; 10];
+ for _ in 0..2 {
+ assert_eq!(or_panic!(stream.peek(&mut buf)), 4);
+ }
+ assert_eq!(or_panic!(stream.read(&mut buf)), 4);
+
+ or_panic!(stream.set_nonblocking(true));
+ match stream.peek(&mut buf) {
+ Ok(_) => panic!("expected error"),
+ Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error: {e}"),
+ }
+
+ or_panic!(txdone.send(()));
+ thread.join().unwrap();
+}
+
+#[test]
+fn test_unix_datagram_peek() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::unbound());
+ or_panic!(sock2.connect(&path1));
+
+ let msg = b"hello world";
+ or_panic!(sock2.send(msg));
+ for _ in 0..2 {
+ let mut buf = [0; 11];
+ let size = or_panic!(sock1.peek(&mut buf));
+ assert_eq!(size, 11);
+ assert_eq!(msg, &buf[..]);
+ }
+
+ let mut buf = [0; 11];
+ let size = or_panic!(sock1.recv(&mut buf));
+ assert_eq!(size, 11);
+ assert_eq!(msg, &buf[..]);
+}
+
+#[test]
+fn test_unix_datagram_peek_from() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock");
+
+ let sock1 = or_panic!(UnixDatagram::bind(&path1));
+ let sock2 = or_panic!(UnixDatagram::unbound());
+ or_panic!(sock2.connect(&path1));
+
+ let msg = b"hello world";
+ or_panic!(sock2.send(msg));
+ for _ in 0..2 {
+ let mut buf = [0; 11];
+ let (size, _) = or_panic!(sock1.peek_from(&mut buf));
+ assert_eq!(size, 11);
+ assert_eq!(msg, &buf[..]);
+ }
+
+ let mut buf = [0; 11];
+ let size = or_panic!(sock1.recv(&mut buf));
+ assert_eq!(size, 11);
+ assert_eq!(msg, &buf[..]);
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_send_vectored_fds_unix_stream() {
+ let (s1, s2) = or_panic!(UnixStream::pair());
+
+ let buf1 = [1; 8];
+ let bufs_send = &[IoSlice::new(&buf1[..])][..];
+
+ let mut ancillary1_buffer = [0; 128];
+ let mut ancillary1 = SocketAncillary::new(&mut ancillary1_buffer[..]);
+ assert!(ancillary1.add_fds(&[s1.as_raw_fd()][..]));
+
+ let usize = or_panic!(s1.send_vectored_with_ancillary(&bufs_send, &mut ancillary1));
+ assert_eq!(usize, 8);
+
+ let mut buf2 = [0; 8];
+ let mut bufs_recv = &mut [IoSliceMut::new(&mut buf2[..])][..];
+
+ let mut ancillary2_buffer = [0; 128];
+ let mut ancillary2 = SocketAncillary::new(&mut ancillary2_buffer[..]);
+
+ let usize = or_panic!(s2.recv_vectored_with_ancillary(&mut bufs_recv, &mut ancillary2));
+ assert_eq!(usize, 8);
+ assert_eq!(buf1, buf2);
+
+ let mut ancillary_data_vec = Vec::from_iter(ancillary2.messages());
+ assert_eq!(ancillary_data_vec.len(), 1);
+ if let AncillaryData::ScmRights(scm_rights) = ancillary_data_vec.pop().unwrap().unwrap() {
+ let fd_vec = Vec::from_iter(scm_rights);
+ assert_eq!(fd_vec.len(), 1);
+ unsafe {
+ libc::close(fd_vec[0]);
+ }
+ } else {
+ unreachable!("must be ScmRights");
+ }
+}
+
+#[cfg(any(target_os = "android", target_os = "linux",))]
+#[test]
+fn test_send_vectored_with_ancillary_to_unix_datagram() {
+ fn getpid() -> libc::pid_t {
+ unsafe { libc::getpid() }
+ }
+
+ fn getuid() -> libc::uid_t {
+ unsafe { libc::getuid() }
+ }
+
+ fn getgid() -> libc::gid_t {
+ unsafe { libc::getgid() }
+ }
+
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+ let path2 = dir.path().join("sock2");
+
+ let bsock1 = or_panic!(UnixDatagram::bind(&path1));
+ let bsock2 = or_panic!(UnixDatagram::bind(&path2));
+
+ or_panic!(bsock2.set_passcred(true));
+
+ let buf1 = [1; 8];
+ let bufs_send = &[IoSlice::new(&buf1[..])][..];
+
+ let mut ancillary1_buffer = [0; 128];
+ let mut ancillary1 = SocketAncillary::new(&mut ancillary1_buffer[..]);
+ let mut cred1 = SocketCred::new();
+ cred1.set_pid(getpid());
+ cred1.set_uid(getuid());
+ cred1.set_gid(getgid());
+ assert!(ancillary1.add_creds(&[cred1.clone()][..]));
+
+ let usize =
+ or_panic!(bsock1.send_vectored_with_ancillary_to(&bufs_send, &mut ancillary1, &path2));
+ assert_eq!(usize, 8);
+
+ let mut buf2 = [0; 8];
+ let mut bufs_recv = &mut [IoSliceMut::new(&mut buf2[..])][..];
+
+ let mut ancillary2_buffer = [0; 128];
+ let mut ancillary2 = SocketAncillary::new(&mut ancillary2_buffer[..]);
+
+ let (usize, truncated, _addr) =
+ or_panic!(bsock2.recv_vectored_with_ancillary_from(&mut bufs_recv, &mut ancillary2));
+ assert_eq!(ancillary2.truncated(), false);
+ assert_eq!(usize, 8);
+ assert_eq!(truncated, false);
+ assert_eq!(buf1, buf2);
+
+ let mut ancillary_data_vec = Vec::from_iter(ancillary2.messages());
+ assert_eq!(ancillary_data_vec.len(), 1);
+ if let AncillaryData::ScmCredentials(scm_credentials) =
+ ancillary_data_vec.pop().unwrap().unwrap()
+ {
+ let cred_vec = Vec::from_iter(scm_credentials);
+ assert_eq!(cred_vec.len(), 1);
+ assert_eq!(cred1.get_pid(), cred_vec[0].get_pid());
+ assert_eq!(cred1.get_uid(), cred_vec[0].get_uid());
+ assert_eq!(cred1.get_gid(), cred_vec[0].get_gid());
+ } else {
+ unreachable!("must be ScmCredentials");
+ }
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+#[test]
+fn test_send_vectored_with_ancillary_unix_datagram() {
+ let dir = tmpdir();
+ let path1 = dir.path().join("sock1");
+ let path2 = dir.path().join("sock2");
+
+ let bsock1 = or_panic!(UnixDatagram::bind(&path1));
+ let bsock2 = or_panic!(UnixDatagram::bind(&path2));
+
+ let buf1 = [1; 8];
+ let bufs_send = &[IoSlice::new(&buf1[..])][..];
+
+ let mut ancillary1_buffer = [0; 128];
+ let mut ancillary1 = SocketAncillary::new(&mut ancillary1_buffer[..]);
+ assert!(ancillary1.add_fds(&[bsock1.as_raw_fd()][..]));
+
+ or_panic!(bsock1.connect(&path2));
+ let usize = or_panic!(bsock1.send_vectored_with_ancillary(&bufs_send, &mut ancillary1));
+ assert_eq!(usize, 8);
+
+ let mut buf2 = [0; 8];
+ let mut bufs_recv = &mut [IoSliceMut::new(&mut buf2[..])][..];
+
+ let mut ancillary2_buffer = [0; 128];
+ let mut ancillary2 = SocketAncillary::new(&mut ancillary2_buffer[..]);
+
+ let (usize, truncated) =
+ or_panic!(bsock2.recv_vectored_with_ancillary(&mut bufs_recv, &mut ancillary2));
+ assert_eq!(usize, 8);
+ assert_eq!(truncated, false);
+ assert_eq!(buf1, buf2);
+
+ let mut ancillary_data_vec = Vec::from_iter(ancillary2.messages());
+ assert_eq!(ancillary_data_vec.len(), 1);
+ if let AncillaryData::ScmRights(scm_rights) = ancillary_data_vec.pop().unwrap().unwrap() {
+ let fd_vec = Vec::from_iter(scm_rights);
+ assert_eq!(fd_vec.len(), 1);
+ unsafe {
+ libc::close(fd_vec[0]);
+ }
+ } else {
+ unreachable!("must be ScmRights");
+ }
+}
diff --git a/library/std/src/os/unix/process.rs b/library/std/src/os/unix/process.rs
new file mode 100644
index 000000000..09b2bfe39
--- /dev/null
+++ b/library/std/src/os/unix/process.rs
@@ -0,0 +1,466 @@
+//! Unix-specific extensions to primitives in the [`std::process`] module.
+//!
+//! [`std::process`]: crate::process
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::ffi::OsStr;
+use crate::io;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::process;
+use crate::sealed::Sealed;
+use crate::sys;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+#[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))]
+type UserId = u32;
+#[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))]
+type GroupId = u32;
+
+#[cfg(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon"))]
+type UserId = u16;
+#[cfg(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon"))]
+type GroupId = u16;
+
+/// Unix-specific extensions to the [`process::Command`] builder.
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait CommandExt: Sealed {
+ /// Sets the child process's user ID. This translates to a
+ /// `setuid` call in the child process. Failure in the `setuid`
+ /// call will cause the spawn to fail.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn uid(&mut self, id: UserId) -> &mut process::Command;
+
+ /// Similar to `uid`, but sets the group ID of the child process. This has
+ /// the same semantics as the `uid` field.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn gid(&mut self, id: GroupId) -> &mut process::Command;
+
+ /// Sets the supplementary group IDs for the calling process. Translates to
+ /// a `setgroups` call in the child process.
+ #[unstable(feature = "setgroups", issue = "90747")]
+ fn groups(&mut self, groups: &[GroupId]) -> &mut process::Command;
+
+ /// Schedules a closure to be run just before the `exec` function is
+ /// invoked.
+ ///
+ /// The closure is allowed to return an I/O error whose OS error code will
+ /// be communicated back to the parent and returned as an error from when
+ /// the spawn was requested.
+ ///
+ /// Multiple closures can be registered and they will be called in order of
+ /// their registration. If a closure returns `Err` then no further closures
+ /// will be called and the spawn operation will immediately return with a
+ /// failure.
+ ///
+ /// # Notes and Safety
+ ///
+ /// This closure will be run in the context of the child process after a
+ /// `fork`. This primarily means that any modifications made to memory on
+ /// behalf of this closure will **not** be visible to the parent process.
+ /// This is often a very constrained environment where normal operations
+ /// like `malloc`, accessing environment variables through [`std::env`]
+ /// or acquiring a mutex are not guaranteed to work (due to
+ /// other threads perhaps still running when the `fork` was run).
+ ///
+ /// For further details refer to the [POSIX fork() specification]
+ /// and the equivalent documentation for any targeted
+ /// platform, especially the requirements around *async-signal-safety*.
+ ///
+ /// This also means that all resources such as file descriptors and
+ /// memory-mapped regions got duplicated. It is your responsibility to make
+ /// sure that the closure does not violate library invariants by making
+ /// invalid use of these duplicates.
+ ///
+ /// Panicking in the closure is safe only if all the format arguments for the
+ /// panic message can be safely formatted; this is because although
+ /// `Command` calls [`std::panic::always_abort`](crate::panic::always_abort)
+ /// before calling the pre_exec hook, panic will still try to format the
+ /// panic message.
+ ///
+ /// When this closure is run, aspects such as the stdio file descriptors and
+ /// working directory have successfully been changed, so output to these
+ /// locations might not appear where intended.
+ ///
+ /// [POSIX fork() specification]:
+ /// https://pubs.opengroup.org/onlinepubs/9699919799/functions/fork.html
+ /// [`std::env`]: mod@crate::env
+ #[stable(feature = "process_pre_exec", since = "1.34.0")]
+ unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command
+ where
+ F: FnMut() -> io::Result<()> + Send + Sync + 'static;
+
+ /// Schedules a closure to be run just before the `exec` function is
+ /// invoked.
+ ///
+ /// This method is stable and usable, but it should be unsafe. To fix
+ /// that, it got deprecated in favor of the unsafe [`pre_exec`].
+ ///
+ /// [`pre_exec`]: CommandExt::pre_exec
+ #[stable(feature = "process_exec", since = "1.15.0")]
+ #[deprecated(since = "1.37.0", note = "should be unsafe, use `pre_exec` instead")]
+ fn before_exec<F>(&mut self, f: F) -> &mut process::Command
+ where
+ F: FnMut() -> io::Result<()> + Send + Sync + 'static,
+ {
+ unsafe { self.pre_exec(f) }
+ }
+
+ /// Performs all the required setup by this `Command`, followed by calling
+ /// the `execvp` syscall.
+ ///
+ /// On success this function will not return, and otherwise it will return
+ /// an error indicating why the exec (or another part of the setup of the
+ /// `Command`) failed.
+ ///
+ /// `exec` not returning has the same implications as calling
+ /// [`process::exit`] – no destructors on the current stack or any other
+ /// thread’s stack will be run. Therefore, it is recommended to only call
+ /// `exec` at a point where it is fine to not run any destructors. Note,
+ /// that the `execvp` syscall independently guarantees that all memory is
+ /// freed and all file descriptors with the `CLOEXEC` option (set by default
+ /// on all file descriptors opened by the standard library) are closed.
+ ///
+ /// This function, unlike `spawn`, will **not** `fork` the process to create
+ /// a new child. Like spawn, however, the default behavior for the stdio
+ /// descriptors will be to inherited from the current process.
+ ///
+ /// # Notes
+ ///
+ /// The process may be in a "broken state" if this function returns in
+ /// error. For example the working directory, environment variables, signal
+ /// handling settings, various user/group information, or aspects of stdio
+ /// file descriptors may have changed. If a "transactional spawn" is
+ /// required to gracefully handle errors it is recommended to use the
+ /// cross-platform `spawn` instead.
+ #[stable(feature = "process_exec2", since = "1.9.0")]
+ fn exec(&mut self) -> io::Error;
+
+ /// Set executable argument
+ ///
+ /// Set the first process argument, `argv[0]`, to something other than the
+ /// default executable path.
+ #[stable(feature = "process_set_argv0", since = "1.45.0")]
+ fn arg0<S>(&mut self, arg: S) -> &mut process::Command
+ where
+ S: AsRef<OsStr>;
+
+ /// Sets the process group ID (PGID) of the child process. Equivalent to a
+ /// `setpgid` call in the child process, but may be more efficient.
+ ///
+ /// Process groups determine which processes receive signals.
+ ///
+ /// # Examples
+ ///
+ /// Pressing Ctrl-C in a terminal will send SIGINT to all processes in
+ /// the current foreground process group. By spawning the `sleep`
+ /// subprocess in a new process group, it will not receive SIGINT from the
+ /// terminal.
+ ///
+ /// The parent process could install a signal handler and manage the
+ /// subprocess on its own terms.
+ ///
+ /// A process group ID of 0 will use the process ID as the PGID.
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ /// use std::os::unix::process::CommandExt;
+ ///
+ /// Command::new("sleep")
+ /// .arg("10")
+ /// .process_group(0)
+ /// .spawn()?
+ /// .wait()?;
+ /// #
+ /// # Ok::<_, Box<dyn std::error::Error>>(())
+ /// ```
+ #[stable(feature = "process_set_process_group", since = "1.64.0")]
+ fn process_group(&mut self, pgroup: i32) -> &mut process::Command;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl CommandExt for process::Command {
+ fn uid(&mut self, id: UserId) -> &mut process::Command {
+ self.as_inner_mut().uid(id);
+ self
+ }
+
+ fn gid(&mut self, id: GroupId) -> &mut process::Command {
+ self.as_inner_mut().gid(id);
+ self
+ }
+
+ fn groups(&mut self, groups: &[GroupId]) -> &mut process::Command {
+ self.as_inner_mut().groups(groups);
+ self
+ }
+
+ unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command
+ where
+ F: FnMut() -> io::Result<()> + Send + Sync + 'static,
+ {
+ self.as_inner_mut().pre_exec(Box::new(f));
+ self
+ }
+
+ fn exec(&mut self) -> io::Error {
+ // NOTE: This may *not* be safe to call after `libc::fork`, because it
+ // may allocate. That may be worth fixing at some point in the future.
+ self.as_inner_mut().exec(sys::process::Stdio::Inherit)
+ }
+
+ fn arg0<S>(&mut self, arg: S) -> &mut process::Command
+ where
+ S: AsRef<OsStr>,
+ {
+ self.as_inner_mut().set_arg_0(arg.as_ref());
+ self
+ }
+
+ fn process_group(&mut self, pgroup: i32) -> &mut process::Command {
+ self.as_inner_mut().pgroup(pgroup);
+ self
+ }
+}
+
+/// Unix-specific extensions to [`process::ExitStatus`] and
+/// [`ExitStatusError`](process::ExitStatusError).
+///
+/// On Unix, `ExitStatus` **does not necessarily represent an exit status**, as
+/// passed to the `_exit` system call or returned by
+/// [`ExitStatus::code()`](crate::process::ExitStatus::code). It represents **any wait status**
+/// as returned by one of the `wait` family of system
+/// calls.
+///
+/// A Unix wait status (a Rust `ExitStatus`) can represent a Unix exit status, but can also
+/// represent other kinds of process event.
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ExitStatusExt: Sealed {
+ /// Creates a new `ExitStatus` or `ExitStatusError` from the raw underlying integer status
+ /// value from `wait`
+ ///
+ /// The value should be a **wait status, not an exit status**.
+ ///
+ /// # Panics
+ ///
+ /// Panics on an attempt to make an `ExitStatusError` from a wait status of `0`.
+ ///
+ /// Making an `ExitStatus` always succeeds and never panics.
+ #[stable(feature = "exit_status_from", since = "1.12.0")]
+ fn from_raw(raw: i32) -> Self;
+
+ /// If the process was terminated by a signal, returns that signal.
+ ///
+ /// In other words, if `WIFSIGNALED`, this returns `WTERMSIG`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn signal(&self) -> Option<i32>;
+
+ /// If the process was terminated by a signal, says whether it dumped core.
+ #[stable(feature = "unix_process_wait_more", since = "1.58.0")]
+ fn core_dumped(&self) -> bool;
+
+ /// If the process was stopped by a signal, returns that signal.
+ ///
+ /// In other words, if `WIFSTOPPED`, this returns `WSTOPSIG`. This is only possible if the status came from
+ /// a `wait` system call which was passed `WUNTRACED`, and was then converted into an `ExitStatus`.
+ #[stable(feature = "unix_process_wait_more", since = "1.58.0")]
+ fn stopped_signal(&self) -> Option<i32>;
+
+ /// Whether the process was continued from a stopped status.
+ ///
+ /// Ie, `WIFCONTINUED`. This is only possible if the status came from a `wait` system call
+ /// which was passed `WCONTINUED`, and was then converted into an `ExitStatus`.
+ #[stable(feature = "unix_process_wait_more", since = "1.58.0")]
+ fn continued(&self) -> bool;
+
+ /// Returns the underlying raw `wait` status.
+ ///
+ /// The returned integer is a **wait status, not an exit status**.
+ #[stable(feature = "unix_process_wait_more", since = "1.58.0")]
+ fn into_raw(self) -> i32;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExitStatusExt for process::ExitStatus {
+ fn from_raw(raw: i32) -> Self {
+ process::ExitStatus::from_inner(From::from(raw))
+ }
+
+ fn signal(&self) -> Option<i32> {
+ self.as_inner().signal()
+ }
+
+ fn core_dumped(&self) -> bool {
+ self.as_inner().core_dumped()
+ }
+
+ fn stopped_signal(&self) -> Option<i32> {
+ self.as_inner().stopped_signal()
+ }
+
+ fn continued(&self) -> bool {
+ self.as_inner().continued()
+ }
+
+ fn into_raw(self) -> i32 {
+ self.as_inner().into_raw().into()
+ }
+}
+
+#[unstable(feature = "exit_status_error", issue = "84908")]
+impl ExitStatusExt for process::ExitStatusError {
+ fn from_raw(raw: i32) -> Self {
+ process::ExitStatus::from_raw(raw)
+ .exit_ok()
+ .expect_err("<ExitStatusError as ExitStatusExt>::from_raw(0) but zero is not an error")
+ }
+
+ fn signal(&self) -> Option<i32> {
+ self.into_status().signal()
+ }
+
+ fn core_dumped(&self) -> bool {
+ self.into_status().core_dumped()
+ }
+
+ fn stopped_signal(&self) -> Option<i32> {
+ self.into_status().stopped_signal()
+ }
+
+ fn continued(&self) -> bool {
+ self.into_status().continued()
+ }
+
+ fn into_raw(self) -> i32 {
+ self.into_status().into_raw()
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl FromRawFd for process::Stdio {
+ #[inline]
+ unsafe fn from_raw_fd(fd: RawFd) -> process::Stdio {
+ let fd = sys::fd::FileDesc::from_raw_fd(fd);
+ let io = sys::process::Stdio::Fd(fd);
+ process::Stdio::from_inner(io)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedFd> for process::Stdio {
+ #[inline]
+ fn from(fd: OwnedFd) -> process::Stdio {
+ let fd = sys::fd::FileDesc::from_inner(fd);
+ let io = sys::process::Stdio::Fd(fd);
+ process::Stdio::from_inner(io)
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawFd for process::ChildStdin {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().as_raw_fd()
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawFd for process::ChildStdout {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().as_raw_fd()
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawFd for process::ChildStderr {
+ #[inline]
+ fn as_raw_fd(&self) -> RawFd {
+ self.as_inner().as_raw_fd()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawFd for process::ChildStdin {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawFd for process::ChildStdout {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawFd for process::ChildStderr {
+ #[inline]
+ fn into_raw_fd(self) -> RawFd {
+ self.into_inner().into_inner().into_raw_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::process::ChildStdin {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStdin> for OwnedFd {
+ #[inline]
+ fn from(child_stdin: crate::process::ChildStdin) -> OwnedFd {
+ child_stdin.into_inner().into_inner().into_inner()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::process::ChildStdout {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStdout> for OwnedFd {
+ #[inline]
+ fn from(child_stdout: crate::process::ChildStdout) -> OwnedFd {
+ child_stdout.into_inner().into_inner().into_inner()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for crate::process::ChildStderr {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.as_inner().as_fd()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStderr> for OwnedFd {
+ #[inline]
+ fn from(child_stderr: crate::process::ChildStderr) -> OwnedFd {
+ child_stderr.into_inner().into_inner().into_inner()
+ }
+}
+
+/// Returns the OS-assigned process identifier associated with this process's parent.
+#[must_use]
+#[stable(feature = "unix_ppid", since = "1.27.0")]
+pub fn parent_id() -> u32 {
+ crate::sys::os::getppid()
+}
diff --git a/library/std/src/os/unix/raw.rs b/library/std/src/os/unix/raw.rs
new file mode 100644
index 000000000..fe761627b
--- /dev/null
+++ b/library/std/src/os/unix/raw.rs
@@ -0,0 +1,33 @@
+//! Unix-specific primitives available on all unix platforms.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![deprecated(
+ since = "1.8.0",
+ note = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+#[allow(non_camel_case_types)]
+pub type uid_t = u32;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+#[allow(non_camel_case_types)]
+pub type gid_t = u32;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+#[allow(non_camel_case_types)]
+pub type pid_t = i32;
+
+#[doc(inline)]
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub use super::platform::raw::pthread_t;
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use super::platform::raw::{blkcnt_t, time_t};
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use super::platform::raw::{blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t};
diff --git a/library/std/src/os/unix/thread.rs b/library/std/src/os/unix/thread.rs
new file mode 100644
index 000000000..03dcc3a4f
--- /dev/null
+++ b/library/std/src/os/unix/thread.rs
@@ -0,0 +1,41 @@
+//! Unix-specific extensions to primitives in the [`std::thread`] module.
+//!
+//! [`std::thread`]: crate::thread
+
+#![stable(feature = "thread_extensions", since = "1.9.0")]
+
+#[allow(deprecated)]
+use crate::os::unix::raw::pthread_t;
+use crate::sys_common::{AsInner, IntoInner};
+use crate::thread::JoinHandle;
+
+#[stable(feature = "thread_extensions", since = "1.9.0")]
+#[allow(deprecated)]
+pub type RawPthread = pthread_t;
+
+/// Unix-specific extensions to [`JoinHandle`].
+#[stable(feature = "thread_extensions", since = "1.9.0")]
+pub trait JoinHandleExt {
+ /// Extracts the raw pthread_t without taking ownership
+ #[stable(feature = "thread_extensions", since = "1.9.0")]
+ fn as_pthread_t(&self) -> RawPthread;
+
+ /// Consumes the thread, returning the raw pthread_t
+ ///
+ /// This function **transfers ownership** of the underlying pthread_t to
+ /// the caller. Callers are then the unique owners of the pthread_t and
+ /// must either detach or join the pthread_t once it's no longer needed.
+ #[stable(feature = "thread_extensions", since = "1.9.0")]
+ fn into_pthread_t(self) -> RawPthread;
+}
+
+#[stable(feature = "thread_extensions", since = "1.9.0")]
+impl<T> JoinHandleExt for JoinHandle<T> {
+ fn as_pthread_t(&self) -> RawPthread {
+ self.as_inner().id() as RawPthread
+ }
+
+ fn into_pthread_t(self) -> RawPthread {
+ self.into_inner().into_id() as RawPthread
+ }
+}
diff --git a/library/std/src/os/unix/ucred.rs b/library/std/src/os/unix/ucred.rs
new file mode 100644
index 000000000..ae4faf27b
--- /dev/null
+++ b/library/std/src/os/unix/ucred.rs
@@ -0,0 +1,136 @@
+//! Unix peer credentials.
+
+// NOTE: Code in this file is heavily based on work done in PR 13 from the tokio-uds repository on
+// GitHub.
+//
+// For reference, the link is here: https://github.com/tokio-rs/tokio-uds/pull/13
+// Credit to Martin Habovštiak (GitHub username Kixunil) and contributors for this work.
+
+use libc::{gid_t, pid_t, uid_t};
+
+/// Credentials for a UNIX process for credentials passing.
+#[unstable(feature = "peer_credentials_unix_socket", issue = "42839", reason = "unstable")]
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct UCred {
+ /// The UID part of the peer credential. This is the effective UID of the process at the domain
+ /// socket's endpoint.
+ pub uid: uid_t,
+ /// The GID part of the peer credential. This is the effective GID of the process at the domain
+ /// socket's endpoint.
+ pub gid: gid_t,
+ /// The PID part of the peer credential. This field is optional because the PID part of the
+ /// peer credentials is not supported on every platform. On platforms where the mechanism to
+ /// discover the PID exists, this field will be populated to the PID of the process at the
+ /// domain socket's endpoint. Otherwise, it will be set to None.
+ pub pid: Option<pid_t>,
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+pub use self::impl_linux::peer_cred;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "netbsd"
+))]
+pub use self::impl_bsd::peer_cred;
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+pub use self::impl_mac::peer_cred;
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub mod impl_linux {
+ use super::UCred;
+ use crate::os::unix::io::AsRawFd;
+ use crate::os::unix::net::UnixStream;
+ use crate::{io, mem};
+ use libc::{c_void, getsockopt, socklen_t, ucred, SOL_SOCKET, SO_PEERCRED};
+
+ pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
+ let ucred_size = mem::size_of::<ucred>();
+
+ // Trivial sanity checks.
+ assert!(mem::size_of::<u32>() <= mem::size_of::<usize>());
+ assert!(ucred_size <= u32::MAX as usize);
+
+ let mut ucred_size = ucred_size as socklen_t;
+ let mut ucred: ucred = ucred { pid: 1, uid: 1, gid: 1 };
+
+ unsafe {
+ let ret = getsockopt(
+ socket.as_raw_fd(),
+ SOL_SOCKET,
+ SO_PEERCRED,
+ &mut ucred as *mut ucred as *mut c_void,
+ &mut ucred_size,
+ );
+
+ if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() {
+ Ok(UCred { uid: ucred.uid, gid: ucred.gid, pid: Some(ucred.pid) })
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+}
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "netbsd"
+))]
+pub mod impl_bsd {
+ use super::UCred;
+ use crate::io;
+ use crate::os::unix::io::AsRawFd;
+ use crate::os::unix::net::UnixStream;
+
+ pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
+ let mut cred = UCred { uid: 1, gid: 1, pid: None };
+ unsafe {
+ let ret = libc::getpeereid(socket.as_raw_fd(), &mut cred.uid, &mut cred.gid);
+
+ if ret == 0 { Ok(cred) } else { Err(io::Error::last_os_error()) }
+ }
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+pub mod impl_mac {
+ use super::UCred;
+ use crate::os::unix::io::AsRawFd;
+ use crate::os::unix::net::UnixStream;
+ use crate::{io, mem};
+ use libc::{c_void, getpeereid, getsockopt, pid_t, socklen_t, LOCAL_PEERPID, SOL_LOCAL};
+
+ pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
+ let mut cred = UCred { uid: 1, gid: 1, pid: None };
+ unsafe {
+ let ret = getpeereid(socket.as_raw_fd(), &mut cred.uid, &mut cred.gid);
+
+ if ret != 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ let mut pid: pid_t = 1;
+ let mut pid_size = mem::size_of::<pid_t>() as socklen_t;
+
+ let ret = getsockopt(
+ socket.as_raw_fd(),
+ SOL_LOCAL,
+ LOCAL_PEERPID,
+ &mut pid as *mut pid_t as *mut c_void,
+ &mut pid_size,
+ );
+
+ if ret == 0 && pid_size as usize == mem::size_of::<pid_t>() {
+ cred.pid = Some(pid);
+ Ok(cred)
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+}
diff --git a/library/std/src/os/unix/ucred/tests.rs b/library/std/src/os/unix/ucred/tests.rs
new file mode 100644
index 000000000..e63a2fc24
--- /dev/null
+++ b/library/std/src/os/unix/ucred/tests.rs
@@ -0,0 +1,39 @@
+use crate::os::unix::net::UnixStream;
+use libc::{getegid, geteuid, getpid};
+
+#[test]
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "watchos",
+ target_os = "openbsd"
+))]
+fn test_socket_pair() {
+ // Create two connected sockets and get their peer credentials. They should be equal.
+ let (sock_a, sock_b) = UnixStream::pair().unwrap();
+ let (cred_a, cred_b) = (sock_a.peer_cred().unwrap(), sock_b.peer_cred().unwrap());
+ assert_eq!(cred_a, cred_b);
+
+ // Check that the UID and GIDs match up.
+ let uid = unsafe { geteuid() };
+ let gid = unsafe { getegid() };
+ assert_eq!(cred_a.uid, uid);
+ assert_eq!(cred_a.gid, gid);
+}
+
+#[test]
+#[cfg(any(target_os = "linux", target_os = "ios", target_os = "macos", target_os = "watchos"))]
+fn test_socket_pair_pids(arg: Type) -> RetType {
+ // Create two connected sockets and get their peer credentials.
+ let (sock_a, sock_b) = UnixStream::pair().unwrap();
+ let (cred_a, cred_b) = (sock_a.peer_cred().unwrap(), sock_b.peer_cred().unwrap());
+
+ // On supported platforms (see the cfg above), the credentials should always include the PID.
+ let pid = unsafe { getpid() };
+ assert_eq!(cred_a.pid, Some(pid));
+ assert_eq!(cred_b.pid, Some(pid));
+}
diff --git a/library/std/src/os/vxworks/fs.rs b/library/std/src/os/vxworks/fs.rs
new file mode 100644
index 000000000..77e6238ca
--- /dev/null
+++ b/library/std/src/os/vxworks/fs.rs
@@ -0,0 +1,99 @@
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_attrib(&self) -> u8;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ 0
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+ fn st_attrib(&self) -> u8 {
+ self.as_inner().as_inner().st_attrib as u8
+ }
+}
diff --git a/library/std/src/os/vxworks/mod.rs b/library/std/src/os/vxworks/mod.rs
new file mode 100644
index 000000000..0a7ac641d
--- /dev/null
+++ b/library/std/src/os/vxworks/mod.rs
@@ -0,0 +1,6 @@
+//! VxWorks-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod fs;
+pub mod raw;
diff --git a/library/std/src/os/vxworks/raw.rs b/library/std/src/os/vxworks/raw.rs
new file mode 100644
index 000000000..cb41ddfe2
--- /dev/null
+++ b/library/std/src/os/vxworks/raw.rs
@@ -0,0 +1,10 @@
+//! VxWorks-specific raw type definitions
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::os::raw::c_ulong;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use libc::{blkcnt_t, blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t, time_t};
diff --git a/library/std/src/os/wasi/ffi.rs b/library/std/src/os/wasi/ffi.rs
new file mode 100644
index 000000000..41dd8702e
--- /dev/null
+++ b/library/std/src/os/wasi/ffi.rs
@@ -0,0 +1,11 @@
+//! WASI-specific extensions to primitives in the [`std::ffi`] module
+//!
+//! [`std::ffi`]: crate::ffi
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[path = "../unix/ffi/os_str.rs"]
+mod os_str;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::os_str::{OsStrExt, OsStringExt};
diff --git a/library/std/src/os/wasi/fs.rs b/library/std/src/os/wasi/fs.rs
new file mode 100644
index 000000000..160c8f1ec
--- /dev/null
+++ b/library/std/src/os/wasi/fs.rs
@@ -0,0 +1,558 @@
+//! WASI-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![deny(unsafe_op_in_unsafe_fn)]
+#![unstable(feature = "wasi_ext", issue = "71213")]
+
+use crate::ffi::OsStr;
+use crate::fs::{self, File, Metadata, OpenOptions};
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::path::{Path, PathBuf};
+use crate::sys_common::{AsInner, AsInnerMut, FromInner};
+// Used for `File::read` on intra-doc links
+#[allow(unused_imports)]
+use io::{Read, Write};
+
+/// WASI-specific extensions to [`File`].
+pub trait FileExt {
+ /// Reads a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes read.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// Note that similar to [`File::read`], it is not an error to return with a
+ /// short read.
+ fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ let bufs = &mut [IoSliceMut::new(buf)];
+ self.read_vectored_at(bufs, offset)
+ }
+
+ /// Reads a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes read.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// Note that similar to [`File::read_vectored`], it is not an error to
+ /// return with a short read.
+ fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize>;
+
+ /// Reads the exact number of byte required to fill `buf` from the given offset.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// Similar to [`Read::read_exact`] but uses [`read_at`] instead of `read`.
+ ///
+ /// [`read_at`]: FileExt::read_at
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters an error of the kind
+ /// [`io::ErrorKind::Interrupted`] then the error is ignored and the operation
+ /// will continue.
+ ///
+ /// If this function encounters an "end of file" before completely filling
+ /// the buffer, it returns an error of the kind [`io::ErrorKind::UnexpectedEof`].
+ /// The contents of `buf` are unspecified in this case.
+ ///
+ /// If any other read error is encountered then this function immediately
+ /// returns. The contents of `buf` are unspecified in this case.
+ ///
+ /// If this function returns an error, it is unspecified how many bytes it
+ /// has read, but it will never read more than would be necessary to
+ /// completely fill the buffer.
+ #[stable(feature = "rw_exact_all_at", since = "1.33.0")]
+ fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> {
+ while !buf.is_empty() {
+ match self.read_at(buf, offset) {
+ Ok(0) => break,
+ Ok(n) => {
+ let tmp = buf;
+ buf = &mut tmp[n..];
+ offset += n as u64;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ if !buf.is_empty() {
+ Err(io::const_io_error!(io::ErrorKind::UnexpectedEof, "failed to fill whole buffer"))
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Writes a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes written.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// When writing beyond the end of the file, the file is appropriately
+ /// extended and the intermediate bytes are initialized with the value 0.
+ ///
+ /// Note that similar to [`File::write`], it is not an error to return a
+ /// short write.
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ let bufs = &[IoSlice::new(buf)];
+ self.write_vectored_at(bufs, offset)
+ }
+
+ /// Writes a number of bytes starting from a given offset.
+ ///
+ /// Returns the number of bytes written.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// When writing beyond the end of the file, the file is appropriately
+ /// extended and the intermediate bytes are initialized with the value 0.
+ ///
+ /// Note that similar to [`File::write_vectored`], it is not an error to return a
+ /// short write.
+ fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize>;
+
+ /// Attempts to write an entire buffer starting from a given offset.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor.
+ ///
+ /// The current file cursor is not affected by this function.
+ ///
+ /// This method will continuously call [`write_at`] until there is no more data
+ /// to be written or an error of non-[`io::ErrorKind::Interrupted`] kind is
+ /// returned. This method will not return until the entire buffer has been
+ /// successfully written or such an error occurs. The first error that is
+ /// not of [`io::ErrorKind::Interrupted`] kind generated from this method will be
+ /// returned.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the first error of
+ /// non-[`io::ErrorKind::Interrupted`] kind that [`write_at`] returns.
+ ///
+ /// [`write_at`]: FileExt::write_at
+ #[stable(feature = "rw_exact_all_at", since = "1.33.0")]
+ fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> {
+ while !buf.is_empty() {
+ match self.write_at(buf, offset) {
+ Ok(0) => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ ));
+ }
+ Ok(n) => {
+ buf = &buf[n..];
+ offset += n as u64
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => return Err(e),
+ }
+ }
+ Ok(())
+ }
+
+ /// Returns the current position within the file.
+ ///
+ /// This corresponds to the `fd_tell` syscall and is similar to
+ /// `seek` where you offset 0 bytes from the current position.
+ fn tell(&self) -> io::Result<u64>;
+
+ /// Adjust the flags associated with this file.
+ ///
+ /// This corresponds to the `fd_fdstat_set_flags` syscall.
+ fn fdstat_set_flags(&self, flags: u16) -> io::Result<()>;
+
+ /// Adjust the rights associated with this file.
+ ///
+ /// This corresponds to the `fd_fdstat_set_rights` syscall.
+ fn fdstat_set_rights(&self, rights: u64, inheriting: u64) -> io::Result<()>;
+
+ /// Provide file advisory information on a file descriptor.
+ ///
+ /// This corresponds to the `fd_advise` syscall.
+ fn advise(&self, offset: u64, len: u64, advice: u8) -> io::Result<()>;
+
+ /// Force the allocation of space in a file.
+ ///
+ /// This corresponds to the `fd_allocate` syscall.
+ fn allocate(&self, offset: u64, len: u64) -> io::Result<()>;
+
+ /// Create a directory.
+ ///
+ /// This corresponds to the `path_create_directory` syscall.
+ fn create_directory<P: AsRef<Path>>(&self, dir: P) -> io::Result<()>;
+
+ /// Read the contents of a symbolic link.
+ ///
+ /// This corresponds to the `path_readlink` syscall.
+ fn read_link<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf>;
+
+ /// Return the attributes of a file or directory.
+ ///
+ /// This corresponds to the `path_filestat_get` syscall.
+ fn metadata_at<P: AsRef<Path>>(&self, lookup_flags: u32, path: P) -> io::Result<Metadata>;
+
+ /// Unlink a file.
+ ///
+ /// This corresponds to the `path_unlink_file` syscall.
+ fn remove_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()>;
+
+ /// Remove a directory.
+ ///
+ /// This corresponds to the `path_remove_directory` syscall.
+ fn remove_directory<P: AsRef<Path>>(&self, path: P) -> io::Result<()>;
+}
+
+// FIXME: bind fd_fdstat_get - need to define a custom return type
+// FIXME: bind fd_readdir - can't return `ReadDir` since we only have entry name
+// FIXME: bind fd_filestat_set_times maybe? - on crates.io for unix
+// FIXME: bind path_filestat_set_times maybe? - on crates.io for unix
+// FIXME: bind poll_oneoff maybe? - probably should wait for I/O to settle
+// FIXME: bind random_get maybe? - on crates.io for unix
+
+impl FileExt for fs::File {
+ fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> {
+ self.as_inner().as_inner().pread(bufs, offset)
+ }
+
+ fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> {
+ self.as_inner().as_inner().pwrite(bufs, offset)
+ }
+
+ fn tell(&self) -> io::Result<u64> {
+ self.as_inner().as_inner().tell()
+ }
+
+ fn fdstat_set_flags(&self, flags: u16) -> io::Result<()> {
+ self.as_inner().as_inner().set_flags(flags)
+ }
+
+ fn fdstat_set_rights(&self, rights: u64, inheriting: u64) -> io::Result<()> {
+ self.as_inner().as_inner().set_rights(rights, inheriting)
+ }
+
+ fn advise(&self, offset: u64, len: u64, advice: u8) -> io::Result<()> {
+ let advice = match advice {
+ a if a == wasi::ADVICE_NORMAL.raw() => wasi::ADVICE_NORMAL,
+ a if a == wasi::ADVICE_SEQUENTIAL.raw() => wasi::ADVICE_SEQUENTIAL,
+ a if a == wasi::ADVICE_RANDOM.raw() => wasi::ADVICE_RANDOM,
+ a if a == wasi::ADVICE_WILLNEED.raw() => wasi::ADVICE_WILLNEED,
+ a if a == wasi::ADVICE_DONTNEED.raw() => wasi::ADVICE_DONTNEED,
+ a if a == wasi::ADVICE_NOREUSE.raw() => wasi::ADVICE_NOREUSE,
+ _ => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "invalid parameter 'advice'",
+ ));
+ }
+ };
+
+ self.as_inner().as_inner().advise(offset, len, advice)
+ }
+
+ fn allocate(&self, offset: u64, len: u64) -> io::Result<()> {
+ self.as_inner().as_inner().allocate(offset, len)
+ }
+
+ fn create_directory<P: AsRef<Path>>(&self, dir: P) -> io::Result<()> {
+ self.as_inner().as_inner().create_directory(osstr2str(dir.as_ref().as_ref())?)
+ }
+
+ fn read_link<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf> {
+ self.as_inner().read_link(path.as_ref())
+ }
+
+ fn metadata_at<P: AsRef<Path>>(&self, lookup_flags: u32, path: P) -> io::Result<Metadata> {
+ let m = self.as_inner().metadata_at(lookup_flags, path.as_ref())?;
+ Ok(FromInner::from_inner(m))
+ }
+
+ fn remove_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self.as_inner().as_inner().unlink_file(osstr2str(path.as_ref().as_ref())?)
+ }
+
+ fn remove_directory<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self.as_inner().as_inner().remove_directory(osstr2str(path.as_ref().as_ref())?)
+ }
+}
+
+/// WASI-specific extensions to [`fs::OpenOptions`].
+pub trait OpenOptionsExt {
+ /// Pass custom `dirflags` argument to `path_open`.
+ ///
+ /// This option configures the `dirflags` argument to the
+ /// `path_open` syscall which `OpenOptions` will eventually call. The
+ /// `dirflags` argument configures how the file is looked up, currently
+ /// primarily affecting whether symlinks are followed or not.
+ ///
+ /// By default this value is `__WASI_LOOKUP_SYMLINK_FOLLOW`, or symlinks are
+ /// followed. You can call this method with 0 to disable following symlinks
+ fn lookup_flags(&mut self, flags: u32) -> &mut Self;
+
+ /// Indicates whether `OpenOptions` must open a directory or not.
+ ///
+ /// This method will configure whether the `__WASI_O_DIRECTORY` flag is
+ /// passed when opening a file. When passed it will require that the opened
+ /// path is a directory.
+ ///
+ /// This option is by default `false`
+ fn directory(&mut self, dir: bool) -> &mut Self;
+
+ /// Indicates whether `__WASI_FDFLAG_DSYNC` is passed in the `fs_flags`
+ /// field of `path_open`.
+ ///
+ /// This option is by default `false`
+ fn dsync(&mut self, dsync: bool) -> &mut Self;
+
+ /// Indicates whether `__WASI_FDFLAG_NONBLOCK` is passed in the `fs_flags`
+ /// field of `path_open`.
+ ///
+ /// This option is by default `false`
+ fn nonblock(&mut self, nonblock: bool) -> &mut Self;
+
+ /// Indicates whether `__WASI_FDFLAG_RSYNC` is passed in the `fs_flags`
+ /// field of `path_open`.
+ ///
+ /// This option is by default `false`
+ fn rsync(&mut self, rsync: bool) -> &mut Self;
+
+ /// Indicates whether `__WASI_FDFLAG_SYNC` is passed in the `fs_flags`
+ /// field of `path_open`.
+ ///
+ /// This option is by default `false`
+ fn sync(&mut self, sync: bool) -> &mut Self;
+
+ /// Indicates the value that should be passed in for the `fs_rights_base`
+ /// parameter of `path_open`.
+ ///
+ /// This option defaults based on the `read` and `write` configuration of
+ /// this `OpenOptions` builder. If this method is called, however, the
+ /// exact mask passed in will be used instead.
+ fn fs_rights_base(&mut self, rights: u64) -> &mut Self;
+
+ /// Indicates the value that should be passed in for the
+ /// `fs_rights_inheriting` parameter of `path_open`.
+ ///
+ /// The default for this option is the same value as what will be passed
+ /// for the `fs_rights_base` parameter but if this method is called then
+ /// the specified value will be used instead.
+ fn fs_rights_inheriting(&mut self, rights: u64) -> &mut Self;
+
+ /// Open a file or directory.
+ ///
+ /// This corresponds to the `path_open` syscall.
+ fn open_at<P: AsRef<Path>>(&self, file: &File, path: P) -> io::Result<File>;
+}
+
+impl OpenOptionsExt for OpenOptions {
+ fn lookup_flags(&mut self, flags: u32) -> &mut OpenOptions {
+ self.as_inner_mut().lookup_flags(flags);
+ self
+ }
+
+ fn directory(&mut self, dir: bool) -> &mut OpenOptions {
+ self.as_inner_mut().directory(dir);
+ self
+ }
+
+ fn dsync(&mut self, enabled: bool) -> &mut OpenOptions {
+ self.as_inner_mut().dsync(enabled);
+ self
+ }
+
+ fn nonblock(&mut self, enabled: bool) -> &mut OpenOptions {
+ self.as_inner_mut().nonblock(enabled);
+ self
+ }
+
+ fn rsync(&mut self, enabled: bool) -> &mut OpenOptions {
+ self.as_inner_mut().rsync(enabled);
+ self
+ }
+
+ fn sync(&mut self, enabled: bool) -> &mut OpenOptions {
+ self.as_inner_mut().sync(enabled);
+ self
+ }
+
+ fn fs_rights_base(&mut self, rights: u64) -> &mut OpenOptions {
+ self.as_inner_mut().fs_rights_base(rights);
+ self
+ }
+
+ fn fs_rights_inheriting(&mut self, rights: u64) -> &mut OpenOptions {
+ self.as_inner_mut().fs_rights_inheriting(rights);
+ self
+ }
+
+ fn open_at<P: AsRef<Path>>(&self, file: &File, path: P) -> io::Result<File> {
+ let inner = file.as_inner().open_at(path.as_ref(), self.as_inner())?;
+ Ok(File::from_inner(inner))
+ }
+}
+
+/// WASI-specific extensions to [`fs::Metadata`].
+pub trait MetadataExt {
+ /// Returns the `st_dev` field of the internal `filestat_t`
+ fn dev(&self) -> u64;
+ /// Returns the `st_ino` field of the internal `filestat_t`
+ fn ino(&self) -> u64;
+ /// Returns the `st_nlink` field of the internal `filestat_t`
+ fn nlink(&self) -> u64;
+ /// Returns the `st_size` field of the internal `filestat_t`
+ fn size(&self) -> u64;
+ /// Returns the `st_atim` field of the internal `filestat_t`
+ fn atim(&self) -> u64;
+ /// Returns the `st_mtim` field of the internal `filestat_t`
+ fn mtim(&self) -> u64;
+ /// Returns the `st_ctim` field of the internal `filestat_t`
+ fn ctim(&self) -> u64;
+}
+
+impl MetadataExt for fs::Metadata {
+ fn dev(&self) -> u64 {
+ self.as_inner().as_wasi().dev
+ }
+ fn ino(&self) -> u64 {
+ self.as_inner().as_wasi().ino
+ }
+ fn nlink(&self) -> u64 {
+ self.as_inner().as_wasi().nlink
+ }
+ fn size(&self) -> u64 {
+ self.as_inner().as_wasi().size
+ }
+ fn atim(&self) -> u64 {
+ self.as_inner().as_wasi().atim
+ }
+ fn mtim(&self) -> u64 {
+ self.as_inner().as_wasi().mtim
+ }
+ fn ctim(&self) -> u64 {
+ self.as_inner().as_wasi().ctim
+ }
+}
+
+/// WASI-specific extensions for [`fs::FileType`].
+///
+/// Adds support for special WASI file types such as block/character devices,
+/// pipes, and sockets.
+pub trait FileTypeExt {
+ /// Returns `true` if this file type is a block device.
+ fn is_block_device(&self) -> bool;
+ /// Returns `true` if this file type is a character device.
+ fn is_char_device(&self) -> bool;
+ /// Returns `true` if this file type is a socket datagram.
+ fn is_socket_dgram(&self) -> bool;
+ /// Returns `true` if this file type is a socket stream.
+ fn is_socket_stream(&self) -> bool;
+ /// Returns `true` if this file type is any type of socket.
+ fn is_socket(&self) -> bool {
+ self.is_socket_stream() || self.is_socket_dgram()
+ }
+}
+
+impl FileTypeExt for fs::FileType {
+ fn is_block_device(&self) -> bool {
+ self.as_inner().bits() == wasi::FILETYPE_BLOCK_DEVICE
+ }
+ fn is_char_device(&self) -> bool {
+ self.as_inner().bits() == wasi::FILETYPE_CHARACTER_DEVICE
+ }
+ fn is_socket_dgram(&self) -> bool {
+ self.as_inner().bits() == wasi::FILETYPE_SOCKET_DGRAM
+ }
+ fn is_socket_stream(&self) -> bool {
+ self.as_inner().bits() == wasi::FILETYPE_SOCKET_STREAM
+ }
+}
+
+/// WASI-specific extension methods for [`fs::DirEntry`].
+pub trait DirEntryExt {
+ /// Returns the underlying `d_ino` field of the `dirent_t`
+ fn ino(&self) -> u64;
+}
+
+impl DirEntryExt for fs::DirEntry {
+ fn ino(&self) -> u64 {
+ self.as_inner().ino()
+ }
+}
+
+/// Create a hard link.
+///
+/// This corresponds to the `path_link` syscall.
+pub fn link<P: AsRef<Path>, U: AsRef<Path>>(
+ old_fd: &File,
+ old_flags: u32,
+ old_path: P,
+ new_fd: &File,
+ new_path: U,
+) -> io::Result<()> {
+ old_fd.as_inner().as_inner().link(
+ old_flags,
+ osstr2str(old_path.as_ref().as_ref())?,
+ new_fd.as_inner().as_inner(),
+ osstr2str(new_path.as_ref().as_ref())?,
+ )
+}
+
+/// Rename a file or directory.
+///
+/// This corresponds to the `path_rename` syscall.
+pub fn rename<P: AsRef<Path>, U: AsRef<Path>>(
+ old_fd: &File,
+ old_path: P,
+ new_fd: &File,
+ new_path: U,
+) -> io::Result<()> {
+ old_fd.as_inner().as_inner().rename(
+ osstr2str(old_path.as_ref().as_ref())?,
+ new_fd.as_inner().as_inner(),
+ osstr2str(new_path.as_ref().as_ref())?,
+ )
+}
+
+/// Create a symbolic link.
+///
+/// This corresponds to the `path_symlink` syscall.
+pub fn symlink<P: AsRef<Path>, U: AsRef<Path>>(
+ old_path: P,
+ fd: &File,
+ new_path: U,
+) -> io::Result<()> {
+ fd.as_inner()
+ .as_inner()
+ .symlink(osstr2str(old_path.as_ref().as_ref())?, osstr2str(new_path.as_ref().as_ref())?)
+}
+
+/// Create a symbolic link.
+///
+/// This is a convenience API similar to `std::os::unix::fs::symlink` and
+/// `std::os::windows::fs::symlink_file` and `std::os::windows::fs::symlink_dir`.
+pub fn symlink_path<P: AsRef<Path>, U: AsRef<Path>>(old_path: P, new_path: U) -> io::Result<()> {
+ crate::sys::fs::symlink(old_path.as_ref(), new_path.as_ref())
+}
+
+fn osstr2str(f: &OsStr) -> io::Result<&str> {
+ f.to_str()
+ .ok_or_else(|| io::const_io_error!(io::ErrorKind::Uncategorized, "input must be utf-8"))
+}
diff --git a/library/std/src/os/wasi/io/fd.rs b/library/std/src/os/wasi/io/fd.rs
new file mode 100644
index 000000000..930aca887
--- /dev/null
+++ b/library/std/src/os/wasi/io/fd.rs
@@ -0,0 +1,9 @@
+//! Owned and borrowed file descriptors.
+
+#![unstable(feature = "wasi_ext", issue = "71213")]
+
+// Tests for this module
+#[cfg(test)]
+mod tests;
+
+pub use crate::os::fd::owned::*;
diff --git a/library/std/src/os/wasi/io/fd/tests.rs b/library/std/src/os/wasi/io/fd/tests.rs
new file mode 100644
index 000000000..418274752
--- /dev/null
+++ b/library/std/src/os/wasi/io/fd/tests.rs
@@ -0,0 +1,11 @@
+use crate::mem::size_of;
+use crate::os::wasi::io::RawFd;
+
+#[test]
+fn test_raw_fd_layout() {
+ // `OwnedFd` and `BorrowedFd` use `rustc_layout_scalar_valid_range_start`
+ // and `rustc_layout_scalar_valid_range_end`, with values that depend on
+ // the bit width of `RawFd`. If this ever changes, those values will need
+ // to be updated.
+ assert_eq!(size_of::<RawFd>(), 4);
+}
diff --git a/library/std/src/os/wasi/io/mod.rs b/library/std/src/os/wasi/io/mod.rs
new file mode 100644
index 000000000..6c884e2ea
--- /dev/null
+++ b/library/std/src/os/wasi/io/mod.rs
@@ -0,0 +1,12 @@
+//! WASI-specific extensions to general I/O primitives.
+
+#![deny(unsafe_op_in_unsafe_fn)]
+#![unstable(feature = "wasi_ext", issue = "71213")]
+
+mod fd;
+mod raw;
+
+#[unstable(feature = "wasi_ext", issue = "71213")]
+pub use fd::*;
+#[unstable(feature = "wasi_ext", issue = "71213")]
+pub use raw::*;
diff --git a/library/std/src/os/wasi/io/raw.rs b/library/std/src/os/wasi/io/raw.rs
new file mode 100644
index 000000000..da3b36ada
--- /dev/null
+++ b/library/std/src/os/wasi/io/raw.rs
@@ -0,0 +1,20 @@
+//! WASI-specific extensions to general I/O primitives.
+
+#![unstable(feature = "wasi_ext", issue = "71213")]
+
+// NOTE: despite the fact that this module is unstable,
+// stable Rust had the capability to access the stable
+// re-exported items from os::fd::raw through this
+// unstable module.
+// In PR #95956 the stability checker was changed to check
+// all path segments of an item rather than just the last,
+// which caused the aforementioned stable usage to regress
+// (see issue #99502).
+// As a result, the items in os::fd::raw were given the
+// rustc_allowed_through_unstable_modules attribute.
+// No regression tests were added to ensure this property,
+// as CI is not configured to test wasm32-wasi.
+// If this module is stabilized,
+// you may want to remove those attributes
+// (assuming no other unstable modules need them).
+pub use crate::os::fd::raw::*;
diff --git a/library/std/src/os/wasi/mod.rs b/library/std/src/os/wasi/mod.rs
new file mode 100644
index 000000000..bbaf328f4
--- /dev/null
+++ b/library/std/src/os/wasi/mod.rs
@@ -0,0 +1,57 @@
+//! Platform-specific extensions to `std` for the WebAssembly System Interface (WASI).
+//!
+//! Provides access to platform-level information on WASI, and exposes
+//! WASI-specific functions that would otherwise be inappropriate as
+//! part of the core `std` library.
+//!
+//! It exposes more ways to deal with platform-specific strings (`OsStr`,
+//! `OsString`), allows to set permissions more granularly, extract low-level
+//! file descriptors from files and sockets, and has platform-specific helpers
+//! for spawning processes.
+//!
+//! # Examples
+//!
+//! ```no_run
+//! use std::fs::File;
+//! use std::os::wasi::prelude::*;
+//!
+//! fn main() -> std::io::Result<()> {
+//! let f = File::create("foo.txt")?;
+//! let fd = f.as_raw_fd();
+//!
+//! // use fd with native WASI bindings
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`OsStr`]: crate::ffi::OsStr
+//! [`OsString`]: crate::ffi::OsString
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![doc(cfg(target_os = "wasi"))]
+
+pub mod ffi;
+pub mod fs;
+pub mod io;
+pub mod net;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::fs::FileTypeExt;
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::fs::{DirEntryExt, FileExt, MetadataExt, OpenOptionsExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+}
diff --git a/library/std/src/os/wasi/net/mod.rs b/library/std/src/os/wasi/net/mod.rs
new file mode 100644
index 000000000..73c097d4a
--- /dev/null
+++ b/library/std/src/os/wasi/net/mod.rs
@@ -0,0 +1,23 @@
+//! WASI-specific networking functionality
+
+#![unstable(feature = "wasi_ext", issue = "71213")]
+
+use crate::io;
+use crate::net;
+use crate::sys_common::AsInner;
+
+/// WASI-specific extensions to [`std::net::TcpListener`].
+///
+/// [`std::net::TcpListener`]: crate::net::TcpListener
+pub trait TcpListenerExt {
+ /// Accept a socket.
+ ///
+ /// This corresponds to the `sock_accept` syscall.
+ fn sock_accept(&self, flags: u16) -> io::Result<u32>;
+}
+
+impl TcpListenerExt for net::TcpListener {
+ fn sock_accept(&self, flags: u16) -> io::Result<u32> {
+ self.as_inner().as_inner().as_inner().sock_accept(flags)
+ }
+}
diff --git a/library/std/src/os/windows/ffi.rs b/library/std/src/os/windows/ffi.rs
new file mode 100644
index 000000000..96bab59d3
--- /dev/null
+++ b/library/std/src/os/windows/ffi.rs
@@ -0,0 +1,136 @@
+//! Windows-specific extensions to primitives in the [`std::ffi`] module.
+//!
+//! # Overview
+//!
+//! For historical reasons, the Windows API uses a form of potentially
+//! ill-formed UTF-16 encoding for strings. Specifically, the 16-bit
+//! code units in Windows strings may contain [isolated surrogate code
+//! points which are not paired together][ill-formed-utf-16]. The
+//! Unicode standard requires that surrogate code points (those in the
+//! range U+D800 to U+DFFF) always be *paired*, because in the UTF-16
+//! encoding a *surrogate code unit pair* is used to encode a single
+//! character. For compatibility with code that does not enforce
+//! these pairings, Windows does not enforce them, either.
+//!
+//! While it is not always possible to convert such a string losslessly into
+//! a valid UTF-16 string (or even UTF-8), it is often desirable to be
+//! able to round-trip such a string from and to Windows APIs
+//! losslessly. For example, some Rust code may be "bridging" some
+//! Windows APIs together, just passing `WCHAR` strings among those
+//! APIs without ever really looking into the strings.
+//!
+//! If Rust code *does* need to look into those strings, it can
+//! convert them to valid UTF-8, possibly lossily, by substituting
+//! invalid sequences with [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], as is
+//! conventionally done in other Rust APIs that deal with string
+//! encodings.
+//!
+//! # `OsStringExt` and `OsStrExt`
+//!
+//! [`OsString`] is the Rust wrapper for owned strings in the
+//! preferred representation of the operating system. On Windows,
+//! this struct gets augmented with an implementation of the
+//! [`OsStringExt`] trait, which has an [`OsStringExt::from_wide`] method. This
+//! lets you create an [`OsString`] from a `&[u16]` slice; presumably
+//! you get such a slice out of a `WCHAR` Windows API.
+//!
+//! Similarly, [`OsStr`] is the Rust wrapper for borrowed strings from
+//! preferred representation of the operating system. On Windows, the
+//! [`OsStrExt`] trait provides the [`OsStrExt::encode_wide`] method, which
+//! outputs an [`EncodeWide`] iterator. You can [`collect`] this
+//! iterator, for example, to obtain a `Vec<u16>`; you can later get a
+//! pointer to this vector's contents and feed it to Windows APIs.
+//!
+//! These traits, along with [`OsString`] and [`OsStr`], work in
+//! conjunction so that it is possible to **round-trip** strings from
+//! Windows and back, with no loss of data, even if the strings are
+//! ill-formed UTF-16.
+//!
+//! [ill-formed-utf-16]: https://simonsapin.github.io/wtf-8/#ill-formed-utf-16
+//! [`collect`]: crate::iter::Iterator::collect
+//! [U+FFFD]: crate::char::REPLACEMENT_CHARACTER
+//! [`std::ffi`]: crate::ffi
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::ffi::{OsStr, OsString};
+use crate::sealed::Sealed;
+use crate::sys::os_str::Buf;
+use crate::sys_common::wtf8::Wtf8Buf;
+use crate::sys_common::{AsInner, FromInner};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use crate::sys_common::wtf8::EncodeWide;
+
+/// Windows-specific extensions to [`OsString`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait OsStringExt: Sealed {
+ /// Creates an `OsString` from a potentially ill-formed UTF-16 slice of
+ /// 16-bit code units.
+ ///
+ /// This is lossless: calling [`OsStrExt::encode_wide`] on the resulting string
+ /// will always return the original code units.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// // UTF-16 encoding for "Unicode".
+ /// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065];
+ ///
+ /// let string = OsString::from_wide(&source[..]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_wide(wide: &[u16]) -> Self;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl OsStringExt for OsString {
+ fn from_wide(wide: &[u16]) -> OsString {
+ FromInner::from_inner(Buf { inner: Wtf8Buf::from_wide(wide) })
+ }
+}
+
+/// Windows-specific extensions to [`OsStr`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait OsStrExt: Sealed {
+ /// Re-encodes an `OsStr` as a wide character sequence, i.e., potentially
+ /// ill-formed UTF-16.
+ ///
+ /// This is lossless: calling [`OsStringExt::from_wide`] and then
+ /// `encode_wide` on the result will yield the original code units.
+ /// Note that the encoding does not add a final null terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// // UTF-16 encoding for "Unicode".
+ /// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065];
+ ///
+ /// let string = OsString::from_wide(&source[..]);
+ ///
+ /// let result: Vec<u16> = string.encode_wide().collect();
+ /// assert_eq!(&source[..], &result[..]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn encode_wide(&self) -> EncodeWide<'_>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl OsStrExt for OsStr {
+ #[inline]
+ fn encode_wide(&self) -> EncodeWide<'_> {
+ self.as_inner().inner.encode_wide()
+ }
+}
diff --git a/library/std/src/os/windows/fs.rs b/library/std/src/os/windows/fs.rs
new file mode 100644
index 000000000..a091f06dd
--- /dev/null
+++ b/library/std/src/os/windows/fs.rs
@@ -0,0 +1,605 @@
+//! Windows-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fs::{self, Metadata, OpenOptions};
+use crate::io;
+use crate::path::Path;
+use crate::sealed::Sealed;
+use crate::sys;
+use crate::sys_common::{AsInner, AsInnerMut};
+
+/// Windows-specific extensions to [`fs::File`].
+#[stable(feature = "file_offset", since = "1.15.0")]
+pub trait FileExt {
+ /// Seeks to a given position and reads a number of bytes.
+ ///
+ /// Returns the number of bytes read.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor. The current cursor **is** affected by this
+ /// function, it is set to the end of the read.
+ ///
+ /// Reading beyond the end of the file will always return with a length of
+ /// 0\.
+ ///
+ /// Note that similar to `File::read`, it is not an error to return with a
+ /// short read. When returning from such a short read, the file pointer is
+ /// still updated.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs::File;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // Read 10 bytes, starting 72 bytes from the
+ /// // start of the file.
+ /// file.seek_read(&mut buffer[..], 72)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ fn seek_read(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>;
+
+ /// Seeks to a given position and writes a number of bytes.
+ ///
+ /// Returns the number of bytes written.
+ ///
+ /// The offset is relative to the start of the file and thus independent
+ /// from the current cursor. The current cursor **is** affected by this
+ /// function, it is set to the end of the write.
+ ///
+ /// When writing beyond the end of the file, the file is appropriately
+ /// extended and the intermediate bytes are left uninitialized.
+ ///
+ /// Note that similar to `File::write`, it is not an error to return a
+ /// short write. When returning from such a short write, the file pointer
+ /// is still updated.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let mut buffer = File::create("foo.txt")?;
+ ///
+ /// // Write a byte string starting 72 bytes from
+ /// // the start of the file.
+ /// buffer.seek_write(b"some bytes", 72)?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ fn seek_write(&self, buf: &[u8], offset: u64) -> io::Result<usize>;
+}
+
+#[stable(feature = "file_offset", since = "1.15.0")]
+impl FileExt for fs::File {
+ fn seek_read(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ self.as_inner().read_at(buf, offset)
+ }
+
+ fn seek_write(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ self.as_inner().write_at(buf, offset)
+ }
+}
+
+/// Windows-specific extensions to [`fs::OpenOptions`].
+#[stable(feature = "open_options_ext", since = "1.10.0")]
+pub trait OpenOptionsExt {
+ /// Overrides the `dwDesiredAccess` argument to the call to [`CreateFile`]
+ /// with the specified value.
+ ///
+ /// This will override the `read`, `write`, and `append` flags on the
+ /// `OpenOptions` structure. This method provides fine-grained control over
+ /// the permissions to read, write and append data, attributes (like hidden
+ /// and system), and extended attributes.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// // Open without read and write permission, for example if you only need
+ /// // to call `stat` on the file
+ /// let file = OpenOptions::new().access_mode(0).open("foo.txt");
+ /// ```
+ ///
+ /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn access_mode(&mut self, access: u32) -> &mut Self;
+
+ /// Overrides the `dwShareMode` argument to the call to [`CreateFile`] with
+ /// the specified value.
+ ///
+ /// By default `share_mode` is set to
+ /// `FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE`. This allows
+ /// other processes to read, write, and delete/rename the same file
+ /// while it is open. Removing any of the flags will prevent other
+ /// processes from performing the corresponding operation until the file
+ /// handle is closed.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs::OpenOptions;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// // Do not allow others to read or modify this file while we have it open
+ /// // for writing.
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .share_mode(0)
+ /// .open("foo.txt");
+ /// ```
+ ///
+ /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn share_mode(&mut self, val: u32) -> &mut Self;
+
+ /// Sets extra flags for the `dwFileFlags` argument to the call to
+ /// [`CreateFile2`] to the specified value (or combines it with
+ /// `attributes` and `security_qos_flags` to set the `dwFlagsAndAttributes`
+ /// for [`CreateFile`]).
+ ///
+ /// Custom flags can only set flags, not remove flags set by Rust's options.
+ /// This option overwrites any previously set custom flags.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # #![allow(unexpected_cfgs)]
+ /// # #[cfg(for_demonstration_only)]
+ /// extern crate winapi;
+ /// # mod winapi { pub const FILE_FLAG_DELETE_ON_CLOSE: u32 = 0x04000000; }
+ ///
+ /// use std::fs::OpenOptions;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// let file = OpenOptions::new()
+ /// .create(true)
+ /// .write(true)
+ /// .custom_flags(winapi::FILE_FLAG_DELETE_ON_CLOSE)
+ /// .open("foo.txt");
+ /// ```
+ ///
+ /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
+ /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn custom_flags(&mut self, flags: u32) -> &mut Self;
+
+ /// Sets the `dwFileAttributes` argument to the call to [`CreateFile2`] to
+ /// the specified value (or combines it with `custom_flags` and
+ /// `security_qos_flags` to set the `dwFlagsAndAttributes` for
+ /// [`CreateFile`]).
+ ///
+ /// If a _new_ file is created because it does not yet exist and
+ /// `.create(true)` or `.create_new(true)` are specified, the new file is
+ /// given the attributes declared with `.attributes()`.
+ ///
+ /// If an _existing_ file is opened with `.create(true).truncate(true)`, its
+ /// existing attributes are preserved and combined with the ones declared
+ /// with `.attributes()`.
+ ///
+ /// In all other cases the attributes get ignored.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # #![allow(unexpected_cfgs)]
+ /// # #[cfg(for_demonstration_only)]
+ /// extern crate winapi;
+ /// # mod winapi { pub const FILE_ATTRIBUTE_HIDDEN: u32 = 2; }
+ ///
+ /// use std::fs::OpenOptions;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .create(true)
+ /// .attributes(winapi::FILE_ATTRIBUTE_HIDDEN)
+ /// .open("foo.txt");
+ /// ```
+ ///
+ /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
+ /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn attributes(&mut self, val: u32) -> &mut Self;
+
+ /// Sets the `dwSecurityQosFlags` argument to the call to [`CreateFile2`] to
+ /// the specified value (or combines it with `custom_flags` and `attributes`
+ /// to set the `dwFlagsAndAttributes` for [`CreateFile`]).
+ ///
+ /// By default `security_qos_flags` is not set. It should be specified when
+ /// opening a named pipe, to control to which degree a server process can
+ /// act on behalf of a client process (security impersonation level).
+ ///
+ /// When `security_qos_flags` is not set, a malicious program can gain the
+ /// elevated privileges of a privileged Rust process when it allows opening
+ /// user-specified paths, by tricking it into opening a named pipe. So
+ /// arguably `security_qos_flags` should also be set when opening arbitrary
+ /// paths. However the bits can then conflict with other flags, specifically
+ /// `FILE_FLAG_OPEN_NO_RECALL`.
+ ///
+ /// For information about possible values, see [Impersonation Levels] on the
+ /// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set
+ /// automatically when using this method.
+
+ /// # Examples
+ ///
+ /// ```no_run
+ /// # #![allow(unexpected_cfgs)]
+ /// # #[cfg(for_demonstration_only)]
+ /// extern crate winapi;
+ /// # mod winapi { pub const SECURITY_IDENTIFICATION: u32 = 0; }
+ /// use std::fs::OpenOptions;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .create(true)
+ ///
+ /// // Sets the flag value to `SecurityIdentification`.
+ /// .security_qos_flags(winapi::SECURITY_IDENTIFICATION)
+ ///
+ /// .open(r"\\.\pipe\MyPipe");
+ /// ```
+ ///
+ /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
+ /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
+ /// [Impersonation Levels]:
+ /// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
+ #[stable(feature = "open_options_ext", since = "1.10.0")]
+ fn security_qos_flags(&mut self, flags: u32) -> &mut Self;
+}
+
+#[stable(feature = "open_options_ext", since = "1.10.0")]
+impl OpenOptionsExt for OpenOptions {
+ fn access_mode(&mut self, access: u32) -> &mut OpenOptions {
+ self.as_inner_mut().access_mode(access);
+ self
+ }
+
+ fn share_mode(&mut self, share: u32) -> &mut OpenOptions {
+ self.as_inner_mut().share_mode(share);
+ self
+ }
+
+ fn custom_flags(&mut self, flags: u32) -> &mut OpenOptions {
+ self.as_inner_mut().custom_flags(flags);
+ self
+ }
+
+ fn attributes(&mut self, attributes: u32) -> &mut OpenOptions {
+ self.as_inner_mut().attributes(attributes);
+ self
+ }
+
+ fn security_qos_flags(&mut self, flags: u32) -> &mut OpenOptions {
+ self.as_inner_mut().security_qos_flags(flags);
+ self
+ }
+}
+
+/// Windows-specific extensions to [`fs::Metadata`].
+///
+/// The data members that this trait exposes correspond to the members
+/// of the [`BY_HANDLE_FILE_INFORMATION`] structure.
+///
+/// [`BY_HANDLE_FILE_INFORMATION`]:
+/// https://docs.microsoft.com/en-us/windows/win32/api/fileapi/ns-fileapi-by_handle_file_information
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Returns the value of the `dwFileAttributes` field of this metadata.
+ ///
+ /// This field contains the file system attribute information for a file
+ /// or directory. For possible values and their descriptions, see
+ /// [File Attribute Constants] in the Windows Dev Center.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let attributes = metadata.file_attributes();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [File Attribute Constants]:
+ /// https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn file_attributes(&self) -> u32;
+
+ /// Returns the value of the `ftCreationTime` field of this metadata.
+ ///
+ /// The returned 64-bit value is equivalent to a [`FILETIME`] struct,
+ /// which represents the number of 100-nanosecond intervals since
+ /// January 1, 1601 (UTC). The struct is automatically
+ /// converted to a `u64` value, as that is the recommended way
+ /// to use it.
+ ///
+ /// If the underlying filesystem does not support creation time, the
+ /// returned value is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let creation_time = metadata.creation_time();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn creation_time(&self) -> u64;
+
+ /// Returns the value of the `ftLastAccessTime` field of this metadata.
+ ///
+ /// The returned 64-bit value is equivalent to a [`FILETIME`] struct,
+ /// which represents the number of 100-nanosecond intervals since
+ /// January 1, 1601 (UTC). The struct is automatically
+ /// converted to a `u64` value, as that is the recommended way
+ /// to use it.
+ ///
+ /// For a file, the value specifies the last time that a file was read
+ /// from or written to. For a directory, the value specifies when
+ /// the directory was created. For both files and directories, the
+ /// specified date is correct, but the time of day is always set to
+ /// midnight.
+ ///
+ /// If the underlying filesystem does not support last access time, the
+ /// returned value is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let last_access_time = metadata.last_access_time();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn last_access_time(&self) -> u64;
+
+ /// Returns the value of the `ftLastWriteTime` field of this metadata.
+ ///
+ /// The returned 64-bit value is equivalent to a [`FILETIME`] struct,
+ /// which represents the number of 100-nanosecond intervals since
+ /// January 1, 1601 (UTC). The struct is automatically
+ /// converted to a `u64` value, as that is the recommended way
+ /// to use it.
+ ///
+ /// For a file, the value specifies the last time that a file was written
+ /// to. For a directory, the structure specifies when the directory was
+ /// created.
+ ///
+ /// If the underlying filesystem does not support the last write time,
+ /// the returned value is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let last_write_time = metadata.last_write_time();
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn last_write_time(&self) -> u64;
+
+ /// Returns the value of the `nFileSize{High,Low}` fields of this
+ /// metadata.
+ ///
+ /// The returned value does not have meaning for directories.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::io;
+ /// use std::fs;
+ /// use std::os::windows::prelude::*;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let metadata = fs::metadata("foo.txt")?;
+ /// let file_size = metadata.file_size();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ fn file_size(&self) -> u64;
+
+ /// Returns the value of the `dwVolumeSerialNumber` field of this
+ /// metadata.
+ ///
+ /// This will return `None` if the `Metadata` instance was created from a
+ /// call to `DirEntry::metadata`. If this `Metadata` was created by using
+ /// `fs::metadata` or `File::metadata`, then this will return `Some`.
+ #[unstable(feature = "windows_by_handle", issue = "63010")]
+ fn volume_serial_number(&self) -> Option<u32>;
+
+ /// Returns the value of the `nNumberOfLinks` field of this
+ /// metadata.
+ ///
+ /// This will return `None` if the `Metadata` instance was created from a
+ /// call to `DirEntry::metadata`. If this `Metadata` was created by using
+ /// `fs::metadata` or `File::metadata`, then this will return `Some`.
+ #[unstable(feature = "windows_by_handle", issue = "63010")]
+ fn number_of_links(&self) -> Option<u32>;
+
+ /// Returns the value of the `nFileIndex{Low,High}` fields of this
+ /// metadata.
+ ///
+ /// This will return `None` if the `Metadata` instance was created from a
+ /// call to `DirEntry::metadata`. If this `Metadata` was created by using
+ /// `fs::metadata` or `File::metadata`, then this will return `Some`.
+ #[unstable(feature = "windows_by_handle", issue = "63010")]
+ fn file_index(&self) -> Option<u64>;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ fn file_attributes(&self) -> u32 {
+ self.as_inner().attrs()
+ }
+ fn creation_time(&self) -> u64 {
+ self.as_inner().created_u64()
+ }
+ fn last_access_time(&self) -> u64 {
+ self.as_inner().accessed_u64()
+ }
+ fn last_write_time(&self) -> u64 {
+ self.as_inner().modified_u64()
+ }
+ fn file_size(&self) -> u64 {
+ self.as_inner().size()
+ }
+ fn volume_serial_number(&self) -> Option<u32> {
+ self.as_inner().volume_serial_number()
+ }
+ fn number_of_links(&self) -> Option<u32> {
+ self.as_inner().number_of_links()
+ }
+ fn file_index(&self) -> Option<u64> {
+ self.as_inner().file_index()
+ }
+}
+
+/// Windows-specific extensions to [`fs::FileType`].
+///
+/// On Windows, a symbolic link knows whether it is a file or directory.
+#[stable(feature = "windows_file_type_ext", since = "1.64.0")]
+pub trait FileTypeExt: Sealed {
+ /// Returns `true` if this file type is a symbolic link that is also a directory.
+ #[stable(feature = "windows_file_type_ext", since = "1.64.0")]
+ fn is_symlink_dir(&self) -> bool;
+ /// Returns `true` if this file type is a symbolic link that is also a file.
+ #[stable(feature = "windows_file_type_ext", since = "1.64.0")]
+ fn is_symlink_file(&self) -> bool;
+}
+
+#[stable(feature = "windows_file_type_ext", since = "1.64.0")]
+impl Sealed for fs::FileType {}
+
+#[stable(feature = "windows_file_type_ext", since = "1.64.0")]
+impl FileTypeExt for fs::FileType {
+ fn is_symlink_dir(&self) -> bool {
+ self.as_inner().is_symlink_dir()
+ }
+ fn is_symlink_file(&self) -> bool {
+ self.as_inner().is_symlink_file()
+ }
+}
+
+/// Creates a new symlink to a non-directory file on the filesystem.
+///
+/// The `link` path will be a file symbolic link pointing to the `original`
+/// path.
+///
+/// The `original` path should not be a directory or a symlink to a directory,
+/// otherwise the symlink will be broken. Use [`symlink_dir`] for directories.
+///
+/// This function currently corresponds to [`CreateSymbolicLinkW`][CreateSymbolicLinkW].
+/// Note that this [may change in the future][changes].
+///
+/// [CreateSymbolicLinkW]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createsymboliclinkw
+/// [changes]: io#platform-specific-behavior
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::windows::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::symlink_file("a.txt", "b.txt")?;
+/// Ok(())
+/// }
+/// ```
+///
+/// # Limitations
+///
+/// Windows treats symlink creation as a [privileged action][symlink-security],
+/// therefore this function is likely to fail unless the user makes changes to
+/// their system to permit symlink creation. Users can try enabling Developer
+/// Mode, granting the `SeCreateSymbolicLinkPrivilege` privilege, or running
+/// the process as an administrator.
+///
+/// [symlink-security]: https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/create-symbolic-links
+#[stable(feature = "symlink", since = "1.1.0")]
+pub fn symlink_file<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
+ sys::fs::symlink_inner(original.as_ref(), link.as_ref(), false)
+}
+
+/// Creates a new symlink to a directory on the filesystem.
+///
+/// The `link` path will be a directory symbolic link pointing to the `original`
+/// path.
+///
+/// The `original` path must be a directory or a symlink to a directory,
+/// otherwise the symlink will be broken. Use [`symlink_file`] for other files.
+///
+/// This function currently corresponds to [`CreateSymbolicLinkW`][CreateSymbolicLinkW].
+/// Note that this [may change in the future][changes].
+///
+/// [CreateSymbolicLinkW]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createsymboliclinkw
+/// [changes]: io#platform-specific-behavior
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::os::windows::fs;
+///
+/// fn main() -> std::io::Result<()> {
+/// fs::symlink_dir("a", "b")?;
+/// Ok(())
+/// }
+/// ```
+///
+/// # Limitations
+///
+/// Windows treats symlink creation as a [privileged action][symlink-security],
+/// therefore this function is likely to fail unless the user makes changes to
+/// their system to permit symlink creation. Users can try enabling Developer
+/// Mode, granting the `SeCreateSymbolicLinkPrivilege` privilege, or running
+/// the process as an administrator.
+///
+/// [symlink-security]: https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/create-symbolic-links
+#[stable(feature = "symlink", since = "1.1.0")]
+pub fn symlink_dir<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
+ sys::fs::symlink_inner(original.as_ref(), link.as_ref(), true)
+}
diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs
new file mode 100644
index 000000000..16cc8fa27
--- /dev/null
+++ b/library/std/src/os/windows/io/handle.rs
@@ -0,0 +1,576 @@
+//! Owned and borrowed OS handles.
+
+#![stable(feature = "io_safety", since = "1.63.0")]
+
+use super::raw::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
+use crate::fmt;
+use crate::fs;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::mem::forget;
+use crate::ptr;
+use crate::sys::c;
+use crate::sys::cvt;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+/// A borrowed handle.
+///
+/// This has a lifetime parameter to tie it to the lifetime of something that
+/// owns the handle.
+///
+/// This uses `repr(transparent)` and has the representation of a host handle,
+/// so it can be used in FFI in places where a handle is passed as an argument,
+/// it is not captured or consumed.
+///
+/// Note that it *may* have the value `-1`, which in `BorrowedHandle` always
+/// represents a valid handle value, such as [the current process handle], and
+/// not `INVALID_HANDLE_VALUE`, despite the two having the same value. See
+/// [here] for the full story.
+///
+/// And, it *may* have the value `NULL` (0), which can occur when consoles are
+/// detached from processes, or when `windows_subsystem` is used.
+///
+/// This type's `.to_owned()` implementation returns another `BorrowedHandle`
+/// rather than an `OwnedHandle`. It just makes a trivial copy of the raw
+/// handle, which is then borrowed under the same lifetime.
+///
+/// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+/// [the current process handle]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentprocess#remarks
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct BorrowedHandle<'handle> {
+ handle: RawHandle,
+ _phantom: PhantomData<&'handle OwnedHandle>,
+}
+
+/// An owned handle.
+///
+/// This closes the handle on drop.
+///
+/// Note that it *may* have the value `-1`, which in `OwnedHandle` always
+/// represents a valid handle value, such as [the current process handle], and
+/// not `INVALID_HANDLE_VALUE`, despite the two having the same value. See
+/// [here] for the full story.
+///
+/// And, it *may* have the value `NULL` (0), which can occur when consoles are
+/// detached from processes, or when `windows_subsystem` is used.
+///
+/// `OwnedHandle` uses [`CloseHandle`] to close its handle on drop. As such,
+/// it must not be used with handles to open registry keys which need to be
+/// closed with [`RegCloseKey`] instead.
+///
+/// [`CloseHandle`]: https://docs.microsoft.com/en-us/windows/win32/api/handleapi/nf-handleapi-closehandle
+/// [`RegCloseKey`]: https://docs.microsoft.com/en-us/windows/win32/api/winreg/nf-winreg-regclosekey
+///
+/// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+/// [the current process handle]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentprocess#remarks
+#[repr(transparent)]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct OwnedHandle {
+ handle: RawHandle,
+}
+
+/// FFI type for handles in return values or out parameters, where `NULL` is used
+/// as a sentry value to indicate errors, such as in the return value of `CreateThread`. This uses
+/// `repr(transparent)` and has the representation of a host handle, so that it can be used in such
+/// FFI declarations.
+///
+/// The only thing you can usefully do with a `HandleOrNull` is to convert it into an
+/// `OwnedHandle` using its [`TryFrom`] implementation; this conversion takes care of the check for
+/// `NULL`. This ensures that such FFI calls cannot start using the handle without
+/// checking for `NULL` first.
+///
+/// This type may hold any handle value that [`OwnedHandle`] may hold. As with `OwnedHandle`, when
+/// it holds `-1`, that value is interpreted as a valid handle value, such as
+/// [the current process handle], and not `INVALID_HANDLE_VALUE`.
+///
+/// If this holds a non-null handle, it will close the handle on drop.
+///
+/// [the current process handle]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getcurrentprocess#remarks
+#[repr(transparent)]
+#[stable(feature = "io_safety", since = "1.63.0")]
+#[derive(Debug)]
+pub struct HandleOrNull(OwnedHandle);
+
+/// FFI type for handles in return values or out parameters, where `INVALID_HANDLE_VALUE` is used
+/// as a sentry value to indicate errors, such as in the return value of `CreateFileW`. This uses
+/// `repr(transparent)` and has the representation of a host handle, so that it can be used in such
+/// FFI declarations.
+///
+/// The only thing you can usefully do with a `HandleOrInvalid` is to convert it into an
+/// `OwnedHandle` using its [`TryFrom`] implementation; this conversion takes care of the check for
+/// `INVALID_HANDLE_VALUE`. This ensures that such FFI calls cannot start using the handle without
+/// checking for `INVALID_HANDLE_VALUE` first.
+///
+/// This type may hold any handle value that [`OwnedHandle`] may hold, except that when it holds
+/// `-1`, that value is interpreted to mean `INVALID_HANDLE_VALUE`.
+///
+/// If holds a handle other than `INVALID_HANDLE_VALUE`, it will close the handle on drop.
+#[repr(transparent)]
+#[stable(feature = "io_safety", since = "1.63.0")]
+#[derive(Debug)]
+pub struct HandleOrInvalid(OwnedHandle);
+
+// The Windows [`HANDLE`] type may be transferred across and shared between
+// thread boundaries (despite containing a `*mut void`, which in general isn't
+// `Send` or `Sync`).
+//
+// [`HANDLE`]: std::os::windows::raw::HANDLE
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Send for OwnedHandle {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Send for HandleOrNull {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Send for HandleOrInvalid {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Send for BorrowedHandle<'_> {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Sync for OwnedHandle {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Sync for HandleOrNull {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Sync for HandleOrInvalid {}
+#[stable(feature = "io_safety", since = "1.63.0")]
+unsafe impl Sync for BorrowedHandle<'_> {}
+
+impl BorrowedHandle<'_> {
+ /// Return a `BorrowedHandle` holding the given raw handle.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `handle` must be a valid open handle, it
+ /// must remain open for the duration of the returned `BorrowedHandle`.
+ ///
+ /// Note that it *may* have the value `INVALID_HANDLE_VALUE` (-1), which is
+ /// sometimes a valid handle value. See [here] for the full story.
+ ///
+ /// And, it *may* have the value `NULL` (0), which can occur when consoles are
+ /// detached from processes, or when `windows_subsystem` is used.
+ ///
+ /// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+ #[inline]
+ #[rustc_const_stable(feature = "io_safety", since = "1.63.0")]
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub const unsafe fn borrow_raw(handle: RawHandle) -> Self {
+ Self { handle, _phantom: PhantomData }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl TryFrom<HandleOrNull> for OwnedHandle {
+ type Error = NullHandleError;
+
+ #[inline]
+ fn try_from(handle_or_null: HandleOrNull) -> Result<Self, NullHandleError> {
+ let owned_handle = handle_or_null.0;
+ if owned_handle.handle.is_null() {
+ // Don't call `CloseHandle`; it'd be harmless, except that it could
+ // overwrite the `GetLastError` error.
+ forget(owned_handle);
+
+ Err(NullHandleError(()))
+ } else {
+ Ok(owned_handle)
+ }
+ }
+}
+
+impl OwnedHandle {
+ /// Creates a new `OwnedHandle` instance that shares the same underlying
+ /// object as the existing `OwnedHandle` instance.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone(&self) -> crate::io::Result<Self> {
+ self.as_handle().try_clone_to_owned()
+ }
+}
+
+impl BorrowedHandle<'_> {
+ /// Creates a new `OwnedHandle` instance that shares the same underlying
+ /// object as the existing `BorrowedHandle` instance.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedHandle> {
+ self.duplicate(0, false, c::DUPLICATE_SAME_ACCESS)
+ }
+
+ pub(crate) fn duplicate(
+ &self,
+ access: c::DWORD,
+ inherit: bool,
+ options: c::DWORD,
+ ) -> io::Result<OwnedHandle> {
+ let handle = self.as_raw_handle();
+
+ // `Stdin`, `Stdout`, and `Stderr` can all hold null handles, such as
+ // in a process with a detached console. `DuplicateHandle` would fail
+ // if we passed it a null handle, but we can treat null as a valid
+ // handle which doesn't do any I/O, and allow it to be duplicated.
+ if handle.is_null() {
+ return unsafe { Ok(OwnedHandle::from_raw_handle(handle)) };
+ }
+
+ let mut ret = ptr::null_mut();
+ cvt(unsafe {
+ let cur_proc = c::GetCurrentProcess();
+ c::DuplicateHandle(
+ cur_proc,
+ handle,
+ cur_proc,
+ &mut ret,
+ access,
+ inherit as c::BOOL,
+ options,
+ )
+ })?;
+ unsafe { Ok(OwnedHandle::from_raw_handle(ret)) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl TryFrom<HandleOrInvalid> for OwnedHandle {
+ type Error = InvalidHandleError;
+
+ #[inline]
+ fn try_from(handle_or_invalid: HandleOrInvalid) -> Result<Self, InvalidHandleError> {
+ let owned_handle = handle_or_invalid.0;
+ if owned_handle.handle == c::INVALID_HANDLE_VALUE {
+ // Don't call `CloseHandle`; it'd be harmless, except that it could
+ // overwrite the `GetLastError` error.
+ forget(owned_handle);
+
+ Err(InvalidHandleError(()))
+ } else {
+ Ok(owned_handle)
+ }
+ }
+}
+
+/// This is the error type used by [`HandleOrNull`] when attempting to convert
+/// into a handle, to indicate that the value is null.
+// The empty field prevents constructing this, and allows extending it in the future.
+#[stable(feature = "io_safety", since = "1.63.0")]
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct NullHandleError(());
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Display for NullHandleError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "A HandleOrNull could not be converted to a handle because it was null".fmt(fmt)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl crate::error::Error for NullHandleError {}
+
+/// This is the error type used by [`HandleOrInvalid`] when attempting to
+/// convert into a handle, to indicate that the value is
+/// `INVALID_HANDLE_VALUE`.
+// The empty field prevents constructing this, and allows extending it in the future.
+#[stable(feature = "io_safety", since = "1.63.0")]
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct InvalidHandleError(());
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Display for InvalidHandleError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "A HandleOrInvalid could not be converted to a handle because it was INVALID_HANDLE_VALUE"
+ .fmt(fmt)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl crate::error::Error for InvalidHandleError {}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawHandle for BorrowedHandle<'_> {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.handle
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawHandle for OwnedHandle {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.handle
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl IntoRawHandle for OwnedHandle {
+ #[inline]
+ fn into_raw_handle(self) -> RawHandle {
+ let handle = self.handle;
+ forget(self);
+ handle
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl FromRawHandle for OwnedHandle {
+ #[inline]
+ unsafe fn from_raw_handle(handle: RawHandle) -> Self {
+ Self { handle }
+ }
+}
+
+impl HandleOrNull {
+ /// Constructs a new instance of `Self` from the given `RawHandle` returned
+ /// from a Windows API that uses null to indicate failure, such as
+ /// `CreateThread`.
+ ///
+ /// Use `HandleOrInvalid` instead of `HandleOrNull` for APIs that
+ /// use `INVALID_HANDLE_VALUE` to indicate failure.
+ ///
+ /// # Safety
+ ///
+ /// The passed `handle` value must either satisfy the safety requirements
+ /// of [`FromRawHandle::from_raw_handle`], or be null. Note that not all
+ /// Windows APIs use null for errors; see [here] for the full story.
+ ///
+ /// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ #[inline]
+ pub unsafe fn from_raw_handle(handle: RawHandle) -> Self {
+ Self(OwnedHandle::from_raw_handle(handle))
+ }
+}
+
+impl HandleOrInvalid {
+ /// Constructs a new instance of `Self` from the given `RawHandle` returned
+ /// from a Windows API that uses `INVALID_HANDLE_VALUE` to indicate
+ /// failure, such as `CreateFileW`.
+ ///
+ /// Use `HandleOrNull` instead of `HandleOrInvalid` for APIs that
+ /// use null to indicate failure.
+ ///
+ /// # Safety
+ ///
+ /// The passed `handle` value must either satisfy the safety requirements
+ /// of [`FromRawHandle::from_raw_handle`], or be
+ /// `INVALID_HANDLE_VALUE` (-1). Note that not all Windows APIs use
+ /// `INVALID_HANDLE_VALUE` for errors; see [here] for the full story.
+ ///
+ /// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ #[inline]
+ pub unsafe fn from_raw_handle(handle: RawHandle) -> Self {
+ Self(OwnedHandle::from_raw_handle(handle))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl Drop for OwnedHandle {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ let _ = c::CloseHandle(self.handle);
+ }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for BorrowedHandle<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowedHandle").field("handle", &self.handle).finish()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for OwnedHandle {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OwnedHandle").field("handle", &self.handle).finish()
+ }
+}
+
+/// A trait to borrow the handle from an underlying object.
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub trait AsHandle {
+ /// Borrows the handle.
+ ///
+ /// # Example
+ ///
+ /// ```rust,no_run
+ /// use std::fs::File;
+ /// # use std::io;
+ /// use std::os::windows::io::{AsHandle, BorrowedHandle};
+ ///
+ /// let mut f = File::open("foo.txt")?;
+ /// let borrowed_handle: BorrowedHandle<'_> = f.as_handle();
+ /// # Ok::<(), io::Error>(())
+ /// ```
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ fn as_handle(&self) -> BorrowedHandle<'_>;
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsHandle> AsHandle for &T {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ T::as_handle(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsHandle> AsHandle for &mut T {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ T::as_handle(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for BorrowedHandle<'_> {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ *self
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for OwnedHandle {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ // Safety: `OwnedHandle` and `BorrowedHandle` have the same validity
+ // invariants, and the `BorrowdHandle` is bounded by the lifetime
+ // of `&self`.
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for fs::File {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ self.as_inner().as_handle()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<fs::File> for OwnedHandle {
+ #[inline]
+ fn from(file: fs::File) -> OwnedHandle {
+ file.into_inner().into_inner().into_inner().into()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedHandle> for fs::File {
+ #[inline]
+ fn from(owned: OwnedHandle) -> Self {
+ Self::from_inner(FromInner::from_inner(FromInner::from_inner(owned)))
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::io::Stdin {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsHandle for crate::io::StdinLock<'a> {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::io::Stdout {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsHandle for crate::io::StdoutLock<'a> {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::io::Stderr {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsHandle for crate::io::StderrLock<'a> {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::process::ChildStdin {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStdin> for OwnedHandle {
+ #[inline]
+ fn from(child_stdin: crate::process::ChildStdin) -> OwnedHandle {
+ unsafe { OwnedHandle::from_raw_handle(child_stdin.into_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::process::ChildStdout {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStdout> for OwnedHandle {
+ #[inline]
+ fn from(child_stdout: crate::process::ChildStdout) -> OwnedHandle {
+ unsafe { OwnedHandle::from_raw_handle(child_stdout.into_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for crate::process::ChildStderr {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::process::ChildStderr> for OwnedHandle {
+ #[inline]
+ fn from(child_stderr: crate::process::ChildStderr) -> OwnedHandle {
+ unsafe { OwnedHandle::from_raw_handle(child_stderr.into_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T> AsHandle for crate::thread::JoinHandle<T> {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T> From<crate::thread::JoinHandle<T>> for OwnedHandle {
+ #[inline]
+ fn from(join_handle: crate::thread::JoinHandle<T>) -> OwnedHandle {
+ join_handle.into_inner().into_handle().into_inner()
+ }
+}
diff --git a/library/std/src/os/windows/io/mod.rs b/library/std/src/os/windows/io/mod.rs
new file mode 100644
index 000000000..e2a401fb6
--- /dev/null
+++ b/library/std/src/os/windows/io/mod.rs
@@ -0,0 +1,65 @@
+//! Windows-specific extensions to general I/O primitives.
+//!
+//! Just like raw pointers, raw Windows handles and sockets point to resources
+//! with dynamic lifetimes, and they can dangle if they outlive their resources
+//! or be forged if they're created from invalid values.
+//!
+//! This module provides three types for representing raw handles and sockets
+//! with different ownership properties: raw, borrowed, and owned, which are
+//! analogous to types used for representing pointers:
+//!
+//! | Type | Analogous to |
+//! | ---------------------- | ------------ |
+//! | [`RawHandle`] | `*const _` |
+//! | [`RawSocket`] | `*const _` |
+//! | | |
+//! | [`BorrowedHandle<'a>`] | `&'a _` |
+//! | [`BorrowedSocket<'a>`] | `&'a _` |
+//! | | |
+//! | [`OwnedHandle`] | `Box<_>` |
+//! | [`OwnedSocket`] | `Box<_>` |
+//!
+//! Like raw pointers, `RawHandle` and `RawSocket` values are primitive values.
+//! And in new code, they should be considered unsafe to do I/O on (analogous
+//! to dereferencing them). Rust did not always provide this guidance, so
+//! existing code in the Rust ecosystem often doesn't mark `RawHandle` and
+//! `RawSocket` usage as unsafe. Once the `io_safety` feature is stable,
+//! libraries will be encouraged to migrate, either by adding `unsafe` to APIs
+//! that dereference `RawHandle` and `RawSocket` values, or by using to
+//! `BorrowedHandle`, `BorrowedSocket`, `OwnedHandle`, or `OwnedSocket`.
+//!
+//! Like references, `BorrowedHandle` and `BorrowedSocket` values are tied to a
+//! lifetime, to ensure that they don't outlive the resource they point to.
+//! These are safe to use. `BorrowedHandle` and `BorrowedSocket` values may be
+//! used in APIs which provide safe access to any system call except for
+//! `CloseHandle`, `closesocket`, or any other call that would end the
+//! dynamic lifetime of the resource without ending the lifetime of the
+//! handle or socket.
+//!
+//! `BorrowedHandle` and `BorrowedSocket` values may be used in APIs which
+//! provide safe access to `DuplicateHandle` and `WSADuplicateSocketW` and
+//! related functions, so types implementing `AsHandle`, `AsSocket`,
+//! `From<OwnedHandle>`, or `From<OwnedSocket>` should not assume they always
+//! have exclusive access to the underlying object.
+//!
+//! Like boxes, `OwnedHandle` and `OwnedSocket` values conceptually own the
+//! resource they point to, and free (close) it when they are dropped.
+//!
+//! [`BorrowedHandle<'a>`]: crate::os::windows::io::BorrowedHandle
+//! [`BorrowedSocket<'a>`]: crate::os::windows::io::BorrowedSocket
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod handle;
+mod raw;
+mod socket;
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub use handle::*;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use raw::*;
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub use socket::*;
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/os/windows/io/raw.rs b/library/std/src/os/windows/io/raw.rs
new file mode 100644
index 000000000..49e4f304f
--- /dev/null
+++ b/library/std/src/os/windows/io/raw.rs
@@ -0,0 +1,305 @@
+//! Windows-specific extensions to general I/O primitives.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fs;
+use crate::io;
+use crate::net;
+#[cfg(doc)]
+use crate::os::windows::io::{AsHandle, AsSocket};
+use crate::os::windows::io::{OwnedHandle, OwnedSocket};
+use crate::os::windows::raw;
+use crate::ptr;
+use crate::sys;
+use crate::sys::c;
+use crate::sys_common::{self, AsInner, FromInner, IntoInner};
+
+/// Raw HANDLEs.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type RawHandle = raw::HANDLE;
+
+/// Raw SOCKETs.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type RawSocket = raw::SOCKET;
+
+/// Extracts raw handles.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait AsRawHandle {
+ /// Extracts the raw handle.
+ ///
+ /// This function is typically used to **borrow** an owned handle.
+ /// When used in this way, this method does **not** pass ownership of the
+ /// raw handle to the caller, and the handle is only guaranteed
+ /// to be valid while the original object has not yet been destroyed.
+ ///
+ /// This function may return null, such as when called on [`Stdin`],
+ /// [`Stdout`], or [`Stderr`] when the console is detached.
+ ///
+ /// However, borrowing is not strictly required. See [`AsHandle::as_handle`]
+ /// for an API which strictly borrows a handle.
+ ///
+ /// [`Stdin`]: io::Stdin
+ /// [`Stdout`]: io::Stdout
+ /// [`Stderr`]: io::Stderr
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_raw_handle(&self) -> RawHandle;
+}
+
+/// Construct I/O objects from raw handles.
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+pub trait FromRawHandle {
+ /// Constructs a new I/O object from the specified raw handle.
+ ///
+ /// This function is typically used to **consume ownership** of the handle
+ /// given, passing responsibility for closing the handle to the returned
+ /// object. When used in this way, the returned object
+ /// will take responsibility for closing it when the object goes out of
+ /// scope.
+ ///
+ /// However, consuming ownership is not strictly required. Use a
+ /// `From<OwnedHandle>::from` implementation for an API which strictly
+ /// consumes ownership.
+ ///
+ /// # Safety
+ ///
+ /// The `handle` passed in must:
+ /// - be a valid an open handle,
+ /// - be a handle for a resource that may be freed via [`CloseHandle`]
+ /// (as opposed to `RegCloseKey` or other close functions).
+ ///
+ /// Note that the handle *may* have the value `INVALID_HANDLE_VALUE` (-1),
+ /// which is sometimes a valid handle value. See [here] for the full story.
+ ///
+ /// [`CloseHandle`]: https://docs.microsoft.com/en-us/windows/win32/api/handleapi/nf-handleapi-closehandle
+ /// [here]: https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
+ #[stable(feature = "from_raw_os", since = "1.1.0")]
+ unsafe fn from_raw_handle(handle: RawHandle) -> Self;
+}
+
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw `HANDLE`.
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+pub trait IntoRawHandle {
+ /// Consumes this object, returning the raw underlying handle.
+ ///
+ /// This function is typically used to **transfer ownership** of the underlying
+ /// handle to the caller. When used in this way, callers are then the unique
+ /// owners of the handle and must close it once it's no longer needed.
+ ///
+ /// However, transferring ownership is not strictly required. Use a
+ /// `Into<OwnedHandle>::into` implementation for an API which strictly
+ /// transfers ownership.
+ #[stable(feature = "into_raw_os", since = "1.4.0")]
+ fn into_raw_handle(self) -> RawHandle;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRawHandle for fs::File {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().as_raw_handle() as RawHandle
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawHandle for io::Stdin {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle })
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawHandle for io::Stdout {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle })
+ }
+}
+
+#[stable(feature = "asraw_stdio", since = "1.21.0")]
+impl AsRawHandle for io::Stderr {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle })
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawHandle for io::StdinLock<'a> {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle })
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawHandle for io::StdoutLock<'a> {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle })
+ }
+}
+
+#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
+impl<'a> AsRawHandle for io::StderrLock<'a> {
+ fn as_raw_handle(&self) -> RawHandle {
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle })
+ }
+}
+
+// Translate a handle returned from `GetStdHandle` into a handle to return to
+// the user.
+fn stdio_handle(raw: RawHandle) -> RawHandle {
+ // `GetStdHandle` isn't expected to actually fail, so when it returns
+ // `INVALID_HANDLE_VALUE`, it means we were launched from a parent which
+ // didn't provide us with stdio handles, such as a parent with a detached
+ // console. In that case, return null to the user, which is consistent
+ // with what they'd get in the parent, and which avoids the problem that
+ // `INVALID_HANDLE_VALUE` aliases the current process handle.
+ if raw == c::INVALID_HANDLE_VALUE { ptr::null_mut() } else { raw }
+}
+
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+impl FromRawHandle for fs::File {
+ #[inline]
+ unsafe fn from_raw_handle(handle: RawHandle) -> fs::File {
+ let handle = handle as c::HANDLE;
+ fs::File::from_inner(sys::fs::File::from_inner(FromInner::from_inner(
+ OwnedHandle::from_raw_handle(handle),
+ )))
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawHandle for fs::File {
+ #[inline]
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_raw_handle() as *mut _
+ }
+}
+
+/// Extracts raw sockets.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait AsRawSocket {
+ /// Extracts the raw socket.
+ ///
+ /// This function is typically used to **borrow** an owned socket.
+ /// When used in this way, this method does **not** pass ownership of the
+ /// raw socket to the caller, and the socket is only guaranteed
+ /// to be valid while the original object has not yet been destroyed.
+ ///
+ /// However, borrowing is not strictly required. See [`AsSocket::as_socket`]
+ /// for an API which strictly borrows a socket.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_raw_socket(&self) -> RawSocket;
+}
+
+/// Creates I/O objects from raw sockets.
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+pub trait FromRawSocket {
+ /// Constructs a new I/O object from the specified raw socket.
+ ///
+ /// This function is typically used to **consume ownership** of the socket
+ /// given, passing responsibility for closing the socket to the returned
+ /// object. When used in this way, the returned object
+ /// will take responsibility for closing it when the object goes out of
+ /// scope.
+ ///
+ /// However, consuming ownership is not strictly required. Use a
+ /// `From<OwnedSocket>::from` implementation for an API which strictly
+ /// consumes ownership.
+ ///
+ /// # Safety
+ ///
+ /// The `socket` passed in must:
+ /// - be a valid an open socket,
+ /// - be a socket that may be freed via [`closesocket`].
+ ///
+ /// [`closesocket`]: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-closesocket
+ #[stable(feature = "from_raw_os", since = "1.1.0")]
+ unsafe fn from_raw_socket(sock: RawSocket) -> Self;
+}
+
+/// A trait to express the ability to consume an object and acquire ownership of
+/// its raw `SOCKET`.
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+pub trait IntoRawSocket {
+ /// Consumes this object, returning the raw underlying socket.
+ ///
+ /// This function is typically used to **transfer ownership** of the underlying
+ /// socket to the caller. When used in this way, callers are then the unique
+ /// owners of the socket and must close it once it's no longer needed.
+ ///
+ /// However, transferring ownership is not strictly required. Use a
+ /// `Into<OwnedSocket>::into` implementation for an API which strictly
+ /// transfers ownership.
+ #[stable(feature = "into_raw_os", since = "1.4.0")]
+ fn into_raw_socket(self) -> RawSocket;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRawSocket for net::TcpStream {
+ #[inline]
+ fn as_raw_socket(&self) -> RawSocket {
+ self.as_inner().socket().as_raw_socket()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRawSocket for net::TcpListener {
+ #[inline]
+ fn as_raw_socket(&self) -> RawSocket {
+ self.as_inner().socket().as_raw_socket()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRawSocket for net::UdpSocket {
+ #[inline]
+ fn as_raw_socket(&self) -> RawSocket {
+ self.as_inner().socket().as_raw_socket()
+ }
+}
+
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+impl FromRawSocket for net::TcpStream {
+ #[inline]
+ unsafe fn from_raw_socket(sock: RawSocket) -> net::TcpStream {
+ let sock = sys::net::Socket::from_inner(OwnedSocket::from_raw_socket(sock));
+ net::TcpStream::from_inner(sys_common::net::TcpStream::from_inner(sock))
+ }
+}
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+impl FromRawSocket for net::TcpListener {
+ #[inline]
+ unsafe fn from_raw_socket(sock: RawSocket) -> net::TcpListener {
+ let sock = sys::net::Socket::from_inner(OwnedSocket::from_raw_socket(sock));
+ net::TcpListener::from_inner(sys_common::net::TcpListener::from_inner(sock))
+ }
+}
+#[stable(feature = "from_raw_os", since = "1.1.0")]
+impl FromRawSocket for net::UdpSocket {
+ #[inline]
+ unsafe fn from_raw_socket(sock: RawSocket) -> net::UdpSocket {
+ let sock = sys::net::Socket::from_inner(OwnedSocket::from_raw_socket(sock));
+ net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(sock))
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawSocket for net::TcpStream {
+ #[inline]
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner().into_raw_socket()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawSocket for net::TcpListener {
+ #[inline]
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner().into_raw_socket()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawSocket for net::UdpSocket {
+ #[inline]
+ fn into_raw_socket(self) -> RawSocket {
+ self.into_inner().into_socket().into_inner().into_raw_socket()
+ }
+}
diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs
new file mode 100644
index 000000000..72cb3406d
--- /dev/null
+++ b/library/std/src/os/windows/io/socket.rs
@@ -0,0 +1,338 @@
+//! Owned and borrowed OS sockets.
+
+#![stable(feature = "io_safety", since = "1.63.0")]
+
+use super::raw::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::mem::forget;
+use crate::sys;
+use crate::sys::c;
+#[cfg(not(target_vendor = "uwp"))]
+use crate::sys::cvt;
+
+/// A borrowed socket.
+///
+/// This has a lifetime parameter to tie it to the lifetime of something that
+/// owns the socket.
+///
+/// This uses `repr(transparent)` and has the representation of a host socket,
+/// so it can be used in FFI in places where a socket is passed as an argument,
+/// it is not captured or consumed, and it never has the value
+/// `INVALID_SOCKET`.
+///
+/// This type's `.to_owned()` implementation returns another `BorrowedSocket`
+/// rather than an `OwnedSocket`. It just makes a trivial copy of the raw
+/// socket, which is then borrowed under the same lifetime.
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// This is -2, in two's complement. -1 is `INVALID_SOCKET`.
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE))]
+#[cfg_attr(
+ target_pointer_width = "64",
+ rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FF_FF_FF_FF_FE)
+)]
+#[rustc_nonnull_optimization_guaranteed]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct BorrowedSocket<'socket> {
+ socket: RawSocket,
+ _phantom: PhantomData<&'socket OwnedSocket>,
+}
+
+/// An owned socket.
+///
+/// This closes the socket on drop.
+///
+/// This uses `repr(transparent)` and has the representation of a host socket,
+/// so it can be used in FFI in places where a socket is passed as a consumed
+/// argument or returned as an owned value, and it never has the value
+/// `INVALID_SOCKET`.
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// This is -2, in two's complement. -1 is `INVALID_SOCKET`.
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE))]
+#[cfg_attr(
+ target_pointer_width = "64",
+ rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FF_FF_FF_FF_FE)
+)]
+#[rustc_nonnull_optimization_guaranteed]
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub struct OwnedSocket {
+ socket: RawSocket,
+}
+
+impl BorrowedSocket<'_> {
+ /// Return a `BorrowedSocket` holding the given raw socket.
+ ///
+ /// # Safety
+ ///
+ /// The resource pointed to by `raw` must remain open for the duration of
+ /// the returned `BorrowedSocket`, and it must not have the value
+ /// `INVALID_SOCKET`.
+ #[inline]
+ #[rustc_const_stable(feature = "io_safety", since = "1.63.0")]
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub const unsafe fn borrow_raw(socket: RawSocket) -> Self {
+ assert!(socket != c::INVALID_SOCKET as RawSocket);
+ Self { socket, _phantom: PhantomData }
+ }
+}
+
+impl OwnedSocket {
+ /// Creates a new `OwnedSocket` instance that shares the same underlying
+ /// object as the existing `OwnedSocket` instance.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone(&self) -> io::Result<Self> {
+ self.as_socket().try_clone_to_owned()
+ }
+
+ // FIXME(strict_provenance_magic): we defined RawSocket to be a u64 ;-;
+ #[cfg(not(target_vendor = "uwp"))]
+ pub(crate) fn set_no_inherit(&self) -> io::Result<()> {
+ cvt(unsafe {
+ c::SetHandleInformation(self.as_raw_socket() as c::HANDLE, c::HANDLE_FLAG_INHERIT, 0)
+ })
+ .map(drop)
+ }
+
+ #[cfg(target_vendor = "uwp")]
+ pub(crate) fn set_no_inherit(&self) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "Unavailable on UWP"))
+ }
+}
+
+impl BorrowedSocket<'_> {
+ /// Creates a new `OwnedSocket` instance that shares the same underlying
+ /// object as the existing `BorrowedSocket` instance.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ pub fn try_clone_to_owned(&self) -> io::Result<OwnedSocket> {
+ let mut info = unsafe { mem::zeroed::<c::WSAPROTOCOL_INFO>() };
+ let result = unsafe {
+ c::WSADuplicateSocketW(self.as_raw_socket(), c::GetCurrentProcessId(), &mut info)
+ };
+ sys::net::cvt(result)?;
+ let socket = unsafe {
+ c::WSASocketW(
+ info.iAddressFamily,
+ info.iSocketType,
+ info.iProtocol,
+ &mut info,
+ 0,
+ c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT,
+ )
+ };
+
+ if socket != c::INVALID_SOCKET {
+ unsafe { Ok(OwnedSocket::from_raw_socket(socket)) }
+ } else {
+ let error = unsafe { c::WSAGetLastError() };
+
+ if error != c::WSAEPROTOTYPE && error != c::WSAEINVAL {
+ return Err(io::Error::from_raw_os_error(error));
+ }
+
+ let socket = unsafe {
+ c::WSASocketW(
+ info.iAddressFamily,
+ info.iSocketType,
+ info.iProtocol,
+ &mut info,
+ 0,
+ c::WSA_FLAG_OVERLAPPED,
+ )
+ };
+
+ if socket == c::INVALID_SOCKET {
+ return Err(last_error());
+ }
+
+ unsafe {
+ let socket = OwnedSocket::from_raw_socket(socket);
+ socket.set_no_inherit()?;
+ Ok(socket)
+ }
+ }
+ }
+}
+
+/// Returns the last error from the Windows socket interface.
+fn last_error() -> io::Error {
+ io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawSocket for BorrowedSocket<'_> {
+ #[inline]
+ fn as_raw_socket(&self) -> RawSocket {
+ self.socket
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsRawSocket for OwnedSocket {
+ #[inline]
+ fn as_raw_socket(&self) -> RawSocket {
+ self.socket
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl IntoRawSocket for OwnedSocket {
+ #[inline]
+ fn into_raw_socket(self) -> RawSocket {
+ let socket = self.socket;
+ forget(self);
+ socket
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl FromRawSocket for OwnedSocket {
+ #[inline]
+ unsafe fn from_raw_socket(socket: RawSocket) -> Self {
+ debug_assert_ne!(socket, c::INVALID_SOCKET as RawSocket);
+ Self { socket }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl Drop for OwnedSocket {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ let _ = c::closesocket(self.socket);
+ }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for BorrowedSocket<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowedSocket").field("socket", &self.socket).finish()
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl fmt::Debug for OwnedSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OwnedSocket").field("socket", &self.socket).finish()
+ }
+}
+
+/// A trait to borrow the socket from an underlying object.
+#[stable(feature = "io_safety", since = "1.63.0")]
+pub trait AsSocket {
+ /// Borrows the socket.
+ #[stable(feature = "io_safety", since = "1.63.0")]
+ fn as_socket(&self) -> BorrowedSocket<'_>;
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsSocket> AsSocket for &T {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ T::as_socket(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<T: AsSocket> AsSocket for &mut T {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ T::as_socket(self)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsSocket for BorrowedSocket<'_> {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ *self
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsSocket for OwnedSocket {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ // Safety: `OwnedSocket` and `BorrowedSocket` have the same validity
+ // invariants, and the `BorrowdSocket` is bounded by the lifetime
+ // of `&self`.
+ unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsSocket for crate::net::TcpStream {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::TcpStream> for OwnedSocket {
+ #[inline]
+ fn from(tcp_stream: crate::net::TcpStream) -> OwnedSocket {
+ unsafe { OwnedSocket::from_raw_socket(tcp_stream.into_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedSocket> for crate::net::TcpStream {
+ #[inline]
+ fn from(owned: OwnedSocket) -> Self {
+ unsafe { Self::from_raw_socket(owned.into_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsSocket for crate::net::TcpListener {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::TcpListener> for OwnedSocket {
+ #[inline]
+ fn from(tcp_listener: crate::net::TcpListener) -> OwnedSocket {
+ unsafe { OwnedSocket::from_raw_socket(tcp_listener.into_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedSocket> for crate::net::TcpListener {
+ #[inline]
+ fn from(owned: OwnedSocket) -> Self {
+ unsafe { Self::from_raw_socket(owned.into_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsSocket for crate::net::UdpSocket {
+ #[inline]
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<crate::net::UdpSocket> for OwnedSocket {
+ #[inline]
+ fn from(udp_socket: crate::net::UdpSocket) -> OwnedSocket {
+ unsafe { OwnedSocket::from_raw_socket(udp_socket.into_raw_socket()) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedSocket> for crate::net::UdpSocket {
+ #[inline]
+ fn from(owned: OwnedSocket) -> Self {
+ unsafe { Self::from_raw_socket(owned.into_raw_socket()) }
+ }
+}
diff --git a/library/std/src/os/windows/io/tests.rs b/library/std/src/os/windows/io/tests.rs
new file mode 100644
index 000000000..41734e52e
--- /dev/null
+++ b/library/std/src/os/windows/io/tests.rs
@@ -0,0 +1,21 @@
+#[test]
+fn test_niche_optimizations_socket() {
+ use crate::mem::size_of;
+ use crate::os::windows::io::{
+ BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
+ };
+
+ assert_eq!(size_of::<Option<OwnedSocket>>(), size_of::<RawSocket>());
+ assert_eq!(size_of::<Option<BorrowedSocket<'static>>>(), size_of::<RawSocket>(),);
+ unsafe {
+ #[cfg(target_pointer_width = "32")]
+ let (min, max) = (i32::MIN as u32, i32::MAX as u32);
+ #[cfg(target_pointer_width = "64")]
+ let (min, max) = (i64::MIN as u64, i64::MAX as u64);
+
+ assert_eq!(OwnedSocket::from_raw_socket(min).into_raw_socket(), min);
+ assert_eq!(OwnedSocket::from_raw_socket(max).into_raw_socket(), max);
+ assert_eq!(Some(OwnedSocket::from_raw_socket(min)).unwrap().into_raw_socket(), min);
+ assert_eq!(Some(OwnedSocket::from_raw_socket(max)).unwrap().into_raw_socket(), max);
+ }
+}
diff --git a/library/std/src/os/windows/mod.rs b/library/std/src/os/windows/mod.rs
new file mode 100644
index 000000000..52eb3b7c0
--- /dev/null
+++ b/library/std/src/os/windows/mod.rs
@@ -0,0 +1,58 @@
+//! Platform-specific extensions to `std` for Windows.
+//!
+//! Provides access to platform-level information for Windows, and exposes
+//! Windows-specific idioms that would otherwise be inappropriate as part
+//! the core `std` library. These extensions allow developers to use
+//! `std` types and idioms with Windows in a way that the normal
+//! platform-agnostic idioms would not normally support.
+//!
+//! # Examples
+//!
+//! ```no_run
+//! use std::fs::File;
+//! use std::os::windows::prelude::*;
+//!
+//! fn main() -> std::io::Result<()> {
+//! let f = File::create("foo.txt")?;
+//! let handle = f.as_raw_handle();
+//!
+//! // use handle with native windows bindings
+//!
+//! Ok(())
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![doc(cfg(windows))]
+
+pub mod ffi;
+pub mod fs;
+pub mod io;
+pub mod process;
+pub mod raw;
+pub mod thread;
+
+/// A prelude for conveniently writing platform-specific code.
+///
+/// Includes all extension traits, and some important type definitions.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod prelude {
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::ffi::{OsStrExt, OsStringExt};
+ #[doc(no_inline)]
+ #[stable(feature = "file_offset", since = "1.15.0")]
+ pub use super::fs::FileExt;
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::fs::{MetadataExt, OpenOptionsExt};
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::io::{
+ AsHandle, AsSocket, BorrowedHandle, BorrowedSocket, FromRawHandle, FromRawSocket,
+ HandleOrInvalid, IntoRawHandle, IntoRawSocket, OwnedHandle, OwnedSocket,
+ };
+ #[doc(no_inline)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::io::{AsRawHandle, AsRawSocket, RawHandle, RawSocket};
+}
diff --git a/library/std/src/os/windows/process.rs b/library/std/src/os/windows/process.rs
new file mode 100644
index 000000000..073168cf2
--- /dev/null
+++ b/library/std/src/os/windows/process.rs
@@ -0,0 +1,259 @@
+//! Windows-specific extensions to primitives in the [`std::process`] module.
+//!
+//! [`std::process`]: crate::process
+
+#![stable(feature = "process_extensions", since = "1.2.0")]
+
+use crate::ffi::OsStr;
+use crate::os::windows::io::{
+ AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle,
+};
+use crate::process;
+use crate::sealed::Sealed;
+use crate::sys;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl FromRawHandle for process::Stdio {
+ unsafe fn from_raw_handle(handle: RawHandle) -> process::Stdio {
+ let handle = sys::handle::Handle::from_raw_handle(handle as *mut _);
+ let io = sys::process::Stdio::Handle(handle);
+ process::Stdio::from_inner(io)
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<OwnedHandle> for process::Stdio {
+ fn from(handle: OwnedHandle) -> process::Stdio {
+ let handle = sys::handle::Handle::from_inner(handle);
+ let io = sys::process::Stdio::Handle(handle);
+ process::Stdio::from_inner(io)
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawHandle for process::Child {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().handle().as_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsHandle for process::Child {
+ #[inline]
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ self.as_inner().handle().as_handle()
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawHandle for process::Child {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl From<process::Child> for OwnedHandle {
+ fn from(child: process::Child) -> OwnedHandle {
+ child.into_inner().into_handle().into_inner()
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawHandle for process::ChildStdin {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().handle().as_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawHandle for process::ChildStdout {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().handle().as_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "process_extensions", since = "1.2.0")]
+impl AsRawHandle for process::ChildStderr {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().handle().as_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawHandle for process::ChildStdin {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawHandle for process::ChildStdout {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "into_raw_os", since = "1.4.0")]
+impl IntoRawHandle for process::ChildStderr {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw_handle() as *mut _
+ }
+}
+
+/// Windows-specific extensions to [`process::ExitStatus`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "exit_status_from", since = "1.12.0")]
+pub trait ExitStatusExt: Sealed {
+ /// Creates a new `ExitStatus` from the raw underlying `u32` return value of
+ /// a process.
+ #[stable(feature = "exit_status_from", since = "1.12.0")]
+ fn from_raw(raw: u32) -> Self;
+}
+
+#[stable(feature = "exit_status_from", since = "1.12.0")]
+impl ExitStatusExt for process::ExitStatus {
+ fn from_raw(raw: u32) -> Self {
+ process::ExitStatus::from_inner(From::from(raw))
+ }
+}
+
+/// Windows-specific extensions to the [`process::Command`] builder.
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[stable(feature = "windows_process_extensions", since = "1.16.0")]
+pub trait CommandExt: Sealed {
+ /// Sets the [process creation flags][1] to be passed to `CreateProcess`.
+ ///
+ /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`.
+ ///
+ /// [1]: https://docs.microsoft.com/en-us/windows/win32/procthread/process-creation-flags
+ #[stable(feature = "windows_process_extensions", since = "1.16.0")]
+ fn creation_flags(&mut self, flags: u32) -> &mut process::Command;
+
+ /// Forces all arguments to be wrapped in quote (`"`) characters.
+ ///
+ /// This is useful for passing arguments to [MSYS2/Cygwin][1] based
+ /// executables: these programs will expand unquoted arguments containing
+ /// wildcard characters (`?` and `*`) by searching for any file paths
+ /// matching the wildcard pattern.
+ ///
+ /// Adding quotes has no effect when passing arguments to programs
+ /// that use [msvcrt][2]. This includes programs built with both
+ /// MinGW and MSVC.
+ ///
+ /// [1]: <https://github.com/msys2/MSYS2-packages/issues/2176>
+ /// [2]: <https://msdn.microsoft.com/en-us/library/17w5ykft.aspx>
+ #[unstable(feature = "windows_process_extensions_force_quotes", issue = "82227")]
+ fn force_quotes(&mut self, enabled: bool) -> &mut process::Command;
+
+ /// Append literal text to the command line without any quoting or escaping.
+ ///
+ /// This is useful for passing arguments to `cmd.exe /c`, which doesn't follow
+ /// `CommandLineToArgvW` escaping rules.
+ #[stable(feature = "windows_process_extensions_raw_arg", since = "1.62.0")]
+ fn raw_arg<S: AsRef<OsStr>>(&mut self, text_to_append_as_is: S) -> &mut process::Command;
+
+ /// When [`process::Command`] creates pipes, request that our side is always async.
+ ///
+ /// By default [`process::Command`] may choose to use pipes where both ends
+ /// are opened for synchronous read or write operations. By using
+ /// `async_pipes(true)`, this behavior is overridden so that our side is
+ /// always async.
+ ///
+ /// This is important because if doing async I/O a pipe or a file has to be
+ /// opened for async access.
+ ///
+ /// The end of the pipe sent to the child process will always be synchronous
+ /// regardless of this option.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(windows_process_extensions_async_pipes)]
+ /// use std::os::windows::process::CommandExt;
+ /// use std::process::{Command, Stdio};
+ ///
+ /// # let program = "";
+ ///
+ /// Command::new(program)
+ /// .async_pipes(true)
+ /// .stdin(Stdio::piped())
+ /// .stdout(Stdio::piped())
+ /// .stderr(Stdio::piped());
+ /// ```
+ #[unstable(feature = "windows_process_extensions_async_pipes", issue = "98289")]
+ fn async_pipes(&mut self, always_async: bool) -> &mut process::Command;
+}
+
+#[stable(feature = "windows_process_extensions", since = "1.16.0")]
+impl CommandExt for process::Command {
+ fn creation_flags(&mut self, flags: u32) -> &mut process::Command {
+ self.as_inner_mut().creation_flags(flags);
+ self
+ }
+
+ fn force_quotes(&mut self, enabled: bool) -> &mut process::Command {
+ self.as_inner_mut().force_quotes(enabled);
+ self
+ }
+
+ fn raw_arg<S: AsRef<OsStr>>(&mut self, raw_text: S) -> &mut process::Command {
+ self.as_inner_mut().raw_arg(raw_text.as_ref());
+ self
+ }
+
+ fn async_pipes(&mut self, always_async: bool) -> &mut process::Command {
+ // FIXME: This currently has an intentional no-op implementation.
+ // For the time being our side of the pipes will always be async.
+ // Once the ecosystem has adjusted, we may then be able to start making
+ // use of synchronous pipes within the standard library.
+ let _ = always_async;
+ self
+ }
+}
+
+#[unstable(feature = "windows_process_extensions_main_thread_handle", issue = "96723")]
+pub trait ChildExt: Sealed {
+ /// Extracts the main thread raw handle, without taking ownership
+ #[unstable(feature = "windows_process_extensions_main_thread_handle", issue = "96723")]
+ fn main_thread_handle(&self) -> BorrowedHandle<'_>;
+}
+
+#[unstable(feature = "windows_process_extensions_main_thread_handle", issue = "96723")]
+impl ChildExt for process::Child {
+ fn main_thread_handle(&self) -> BorrowedHandle<'_> {
+ self.handle.main_thread_handle()
+ }
+}
+
+/// Windows-specific extensions to [`process::ExitCode`].
+///
+/// This trait is sealed: it cannot be implemented outside the standard library.
+/// This is so that future additional methods are not breaking changes.
+#[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+pub trait ExitCodeExt: Sealed {
+ /// Creates a new `ExitCode` from the raw underlying `u32` return value of
+ /// a process.
+ ///
+ /// The exit code should not be 259, as this conflicts with the `STILL_ACTIVE`
+ /// macro returned from the `GetExitCodeProcess` function to signal that the
+ /// process has yet to run to completion.
+ #[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+ fn from_raw(raw: u32) -> Self;
+}
+
+#[unstable(feature = "windows_process_exit_code_from", issue = "none")]
+impl ExitCodeExt for process::ExitCode {
+ fn from_raw(raw: u32) -> Self {
+ process::ExitCode::from_inner(From::from(raw))
+ }
+}
diff --git a/library/std/src/os/windows/raw.rs b/library/std/src/os/windows/raw.rs
new file mode 100644
index 000000000..0ef3adade
--- /dev/null
+++ b/library/std/src/os/windows/raw.rs
@@ -0,0 +1,16 @@
+//! Windows-specific primitives.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+use crate::os::raw::c_void;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type HANDLE = *mut c_void;
+#[cfg(target_pointer_width = "32")]
+#[doc(cfg(all()))]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type SOCKET = u32;
+#[cfg(target_pointer_width = "64")]
+#[doc(cfg(all()))]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type SOCKET = u64;
diff --git a/library/std/src/os/windows/thread.rs b/library/std/src/os/windows/thread.rs
new file mode 100644
index 000000000..d81d6d0ac
--- /dev/null
+++ b/library/std/src/os/windows/thread.rs
@@ -0,0 +1,25 @@
+//! Windows-specific extensions to primitives in the [`std::thread`] module.
+//!
+//! [`std::thread`]: crate::thread
+
+#![stable(feature = "thread_extensions", since = "1.9.0")]
+
+use crate::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle};
+use crate::sys_common::{AsInner, IntoInner};
+use crate::thread;
+
+#[stable(feature = "thread_extensions", since = "1.9.0")]
+impl<T> AsRawHandle for thread::JoinHandle<T> {
+ #[inline]
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().handle().as_raw_handle() as *mut _
+ }
+}
+
+#[stable(feature = "thread_extensions", since = "1.9.0")]
+impl<T> IntoRawHandle for thread::JoinHandle<T> {
+ #[inline]
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_handle().into_raw_handle() as *mut _
+ }
+}
diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
new file mode 100644
index 000000000..45bc56efb
--- /dev/null
+++ b/library/std/src/panic.rs
@@ -0,0 +1,320 @@
+//! Panic support in the standard library.
+
+#![stable(feature = "std_panic", since = "1.9.0")]
+
+use crate::any::Any;
+use crate::collections;
+use crate::panicking;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sync::{Mutex, RwLock};
+use crate::thread::Result;
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
+#[allow_internal_unstable(libstd_sys_internals, const_format_args, core_panic, rt)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "std_panic_2015_macro")]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro panic_2015 {
+ () => ({
+ $crate::rt::begin_panic("explicit panic")
+ }),
+ ($msg:expr $(,)?) => ({
+ $crate::rt::begin_panic($msg)
+ }),
+ // Special-case the single-argument case for const_panic.
+ ("{}", $arg:expr $(,)?) => ({
+ $crate::rt::panic_display(&$arg)
+ }),
+ ($fmt:expr, $($arg:tt)+) => ({
+ $crate::rt::panic_fmt($crate::const_format_args!($fmt, $($arg)+))
+ }),
+}
+
+#[doc(hidden)]
+#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
+pub use core::panic::panic_2021;
+
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub use crate::panicking::{set_hook, take_hook};
+
+#[unstable(feature = "panic_update_hook", issue = "92649")]
+pub use crate::panicking::update_hook;
+
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub use core::panic::{Location, PanicInfo};
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub use core::panic::{AssertUnwindSafe, RefUnwindSafe, UnwindSafe};
+
+/// Panic the current thread with the given message as the panic payload.
+///
+/// The message can be of any (`Any + Send`) type, not just strings.
+///
+/// The message is wrapped in a `Box<'static + Any + Send>`, which can be
+/// accessed later using [`PanicInfo::payload`].
+///
+/// See the [`panic!`] macro for more information about panicking.
+#[stable(feature = "panic_any", since = "1.51.0")]
+#[inline]
+#[track_caller]
+pub fn panic_any<M: 'static + Any + Send>(msg: M) -> ! {
+ crate::panicking::begin_panic(msg);
+}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> UnwindSafe for Mutex<T> {}
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: ?Sized> UnwindSafe for RwLock<T> {}
+
+#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
+impl<T: ?Sized> RefUnwindSafe for Mutex<T> {}
+#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
+impl<T: ?Sized> RefUnwindSafe for RwLock<T> {}
+
+// https://github.com/rust-lang/rust/issues/62301
+#[stable(feature = "hashbrown", since = "1.36.0")]
+impl<K, V, S> UnwindSafe for collections::HashMap<K, V, S>
+where
+ K: UnwindSafe,
+ V: UnwindSafe,
+ S: UnwindSafe,
+{
+}
+
+/// Invokes a closure, capturing the cause of an unwinding panic if one occurs.
+///
+/// This function will return `Ok` with the closure's result if the closure
+/// does not panic, and will return `Err(cause)` if the closure panics. The
+/// `cause` returned is the object with which panic was originally invoked.
+///
+/// It is currently undefined behavior to unwind from Rust code into foreign
+/// code, so this function is particularly useful when Rust is called from
+/// another language (normally C). This can run arbitrary Rust code, capturing a
+/// panic and allowing a graceful handling of the error.
+///
+/// It is **not** recommended to use this function for a general try/catch
+/// mechanism. The [`Result`] type is more appropriate to use for functions that
+/// can fail on a regular basis. Additionally, this function is not guaranteed
+/// to catch all panics, see the "Notes" section below.
+///
+/// The closure provided is required to adhere to the [`UnwindSafe`] trait to ensure
+/// that all captured variables are safe to cross this boundary. The purpose of
+/// this bound is to encode the concept of [exception safety][rfc] in the type
+/// system. Most usage of this function should not need to worry about this
+/// bound as programs are naturally unwind safe without `unsafe` code. If it
+/// becomes a problem the [`AssertUnwindSafe`] wrapper struct can be used to quickly
+/// assert that the usage here is indeed unwind safe.
+///
+/// [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md
+///
+/// # Notes
+///
+/// Note that this function **might not catch all panics** in Rust. A panic in
+/// Rust is not always implemented via unwinding, but can be implemented by
+/// aborting the process as well. This function *only* catches unwinding panics,
+/// not those that abort the process.
+///
+/// Also note that unwinding into Rust code with a foreign exception (e.g.
+/// an exception thrown from C++ code) is undefined behavior.
+///
+/// # Examples
+///
+/// ```
+/// use std::panic;
+///
+/// let result = panic::catch_unwind(|| {
+/// println!("hello!");
+/// });
+/// assert!(result.is_ok());
+///
+/// let result = panic::catch_unwind(|| {
+/// panic!("oh no!");
+/// });
+/// assert!(result.is_err());
+/// ```
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+pub fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
+ unsafe { panicking::r#try(f) }
+}
+
+/// Triggers a panic without invoking the panic hook.
+///
+/// This is designed to be used in conjunction with [`catch_unwind`] to, for
+/// example, carry a panic across a layer of C code.
+///
+/// # Notes
+///
+/// Note that panics in Rust are not always implemented via unwinding, but they
+/// may be implemented by aborting the process. If this function is called when
+/// panics are implemented this way then this function will abort the process,
+/// not trigger an unwind.
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// let result = panic::catch_unwind(|| {
+/// panic!("oh no!");
+/// });
+///
+/// if let Err(err) = result {
+/// panic::resume_unwind(err);
+/// }
+/// ```
+#[stable(feature = "resume_unwind", since = "1.9.0")]
+pub fn resume_unwind(payload: Box<dyn Any + Send>) -> ! {
+ panicking::rust_panic_without_hook(payload)
+}
+
+/// Make all future panics abort directly without running the panic hook or unwinding.
+///
+/// There is no way to undo this; the effect lasts until the process exits or
+/// execs (or the equivalent).
+///
+/// # Use after fork
+///
+/// This function is particularly useful for calling after `libc::fork`. After `fork`, in a
+/// multithreaded program it is (on many platforms) not safe to call the allocator. It is also
+/// generally highly undesirable for an unwind to unwind past the `fork`, because that results in
+/// the unwind propagating to code that was only ever expecting to run in the parent.
+///
+/// `panic::always_abort()` helps avoid both of these. It directly avoids any further unwinding,
+/// and if there is a panic, the abort will occur without allocating provided that the arguments to
+/// panic can be formatted without allocating.
+///
+/// Examples
+///
+/// ```no_run
+/// #![feature(panic_always_abort)]
+/// use std::panic;
+///
+/// panic::always_abort();
+///
+/// let _ = panic::catch_unwind(|| {
+/// panic!("inside the catch");
+/// });
+///
+/// // We will have aborted already, due to the panic.
+/// unreachable!();
+/// ```
+#[unstable(feature = "panic_always_abort", issue = "84438")]
+pub fn always_abort() {
+ crate::panicking::panic_count::set_always_abort();
+}
+
+/// The configuration for whether and how the default panic hook will capture
+/// and display the backtrace.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+#[unstable(feature = "panic_backtrace_config", issue = "93346")]
+#[non_exhaustive]
+pub enum BacktraceStyle {
+ /// Prints a terser backtrace which ideally only contains relevant
+ /// information.
+ Short,
+ /// Prints a backtrace with all possible information.
+ Full,
+ /// Disable collecting and displaying backtraces.
+ Off,
+}
+
+impl BacktraceStyle {
+ pub(crate) fn full() -> Option<Self> {
+ if cfg!(feature = "backtrace") { Some(BacktraceStyle::Full) } else { None }
+ }
+
+ fn as_usize(self) -> usize {
+ match self {
+ BacktraceStyle::Short => 1,
+ BacktraceStyle::Full => 2,
+ BacktraceStyle::Off => 3,
+ }
+ }
+
+ fn from_usize(s: usize) -> Option<Self> {
+ Some(match s {
+ 0 => return None,
+ 1 => BacktraceStyle::Short,
+ 2 => BacktraceStyle::Full,
+ 3 => BacktraceStyle::Off,
+ _ => unreachable!(),
+ })
+ }
+}
+
+// Tracks whether we should/can capture a backtrace, and how we should display
+// that backtrace.
+//
+// Internally stores equivalent of an Option<BacktraceStyle>.
+static SHOULD_CAPTURE: AtomicUsize = AtomicUsize::new(0);
+
+/// Configure whether the default panic hook will capture and display a
+/// backtrace.
+///
+/// The default value for this setting may be set by the `RUST_BACKTRACE`
+/// environment variable; see the details in [`get_backtrace_style`].
+#[unstable(feature = "panic_backtrace_config", issue = "93346")]
+pub fn set_backtrace_style(style: BacktraceStyle) {
+ if !cfg!(feature = "backtrace") {
+ // If the `backtrace` feature of this crate isn't enabled, skip setting.
+ return;
+ }
+ SHOULD_CAPTURE.store(style.as_usize(), Ordering::Release);
+}
+
+/// Checks whether the standard library's panic hook will capture and print a
+/// backtrace.
+///
+/// This function will, if a backtrace style has not been set via
+/// [`set_backtrace_style`], read the environment variable `RUST_BACKTRACE` to
+/// determine a default value for the backtrace formatting:
+///
+/// The first call to `get_backtrace_style` may read the `RUST_BACKTRACE`
+/// environment variable if `set_backtrace_style` has not been called to
+/// override the default value. After a call to `set_backtrace_style` or
+/// `get_backtrace_style`, any changes to `RUST_BACKTRACE` will have no effect.
+///
+/// `RUST_BACKTRACE` is read according to these rules:
+///
+/// * `0` for `BacktraceStyle::Off`
+/// * `full` for `BacktraceStyle::Full`
+/// * `1` for `BacktraceStyle::Short`
+/// * Other values are currently `BacktraceStyle::Short`, but this may change in
+/// the future
+///
+/// Returns `None` if backtraces aren't currently supported.
+#[unstable(feature = "panic_backtrace_config", issue = "93346")]
+pub fn get_backtrace_style() -> Option<BacktraceStyle> {
+ if !cfg!(feature = "backtrace") {
+ // If the `backtrace` feature of this crate isn't enabled quickly return
+ // `Unsupported` so this can be constant propagated all over the place
+ // to optimize away callers.
+ return None;
+ }
+ if let Some(style) = BacktraceStyle::from_usize(SHOULD_CAPTURE.load(Ordering::Acquire)) {
+ return Some(style);
+ }
+
+ // Setting environment variables for Fuchsia components isn't a standard
+ // or easily supported workflow. For now, display backtraces by default.
+ let format = if cfg!(target_os = "fuchsia") {
+ BacktraceStyle::Full
+ } else {
+ crate::env::var_os("RUST_BACKTRACE")
+ .map(|x| {
+ if &x == "0" {
+ BacktraceStyle::Off
+ } else if &x == "full" {
+ BacktraceStyle::Full
+ } else {
+ BacktraceStyle::Short
+ }
+ })
+ .unwrap_or(BacktraceStyle::Off)
+ };
+ set_backtrace_style(format);
+ Some(format)
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/panic/tests.rs b/library/std/src/panic/tests.rs
new file mode 100644
index 000000000..b37d74011
--- /dev/null
+++ b/library/std/src/panic/tests.rs
@@ -0,0 +1,56 @@
+#![allow(dead_code)]
+
+use crate::cell::RefCell;
+use crate::panic::{AssertUnwindSafe, UnwindSafe};
+use crate::rc::Rc;
+use crate::sync::{Arc, Mutex, RwLock};
+
+struct Foo {
+ a: i32,
+}
+
+fn assert<T: UnwindSafe + ?Sized>() {}
+
+#[test]
+fn panic_safety_traits() {
+ assert::<i32>();
+ assert::<&i32>();
+ assert::<*mut i32>();
+ assert::<*const i32>();
+ assert::<usize>();
+ assert::<str>();
+ assert::<&str>();
+ assert::<Foo>();
+ assert::<&Foo>();
+ assert::<Vec<i32>>();
+ assert::<String>();
+ assert::<RefCell<i32>>();
+ assert::<Box<i32>>();
+ assert::<Mutex<i32>>();
+ assert::<RwLock<i32>>();
+ assert::<&Mutex<i32>>();
+ assert::<&RwLock<i32>>();
+ assert::<Rc<i32>>();
+ assert::<Arc<i32>>();
+ assert::<Box<[u8]>>();
+
+ {
+ trait Trait: UnwindSafe {}
+ assert::<Box<dyn Trait>>();
+ }
+
+ fn bar<T>() {
+ assert::<Mutex<T>>();
+ assert::<RwLock<T>>();
+ }
+
+ fn baz<T: UnwindSafe>() {
+ assert::<Box<T>>();
+ assert::<Vec<T>>();
+ assert::<RefCell<T>>();
+ assert::<AssertUnwindSafe<T>>();
+ assert::<&AssertUnwindSafe<T>>();
+ assert::<Rc<AssertUnwindSafe<T>>>();
+ assert::<Arc<AssertUnwindSafe<T>>>();
+ }
+}
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
new file mode 100644
index 000000000..25c9201f2
--- /dev/null
+++ b/library/std/src/panicking.rs
@@ -0,0 +1,749 @@
+//! Implementation of various bits and pieces of the `panic!` macro and
+//! associated runtime pieces.
+//!
+//! Specifically, this module contains the implementation of:
+//!
+//! * Panic hooks
+//! * Executing a panic up to doing the actual implementation
+//! * Shims around "try"
+
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::panic::BacktraceStyle;
+use core::panic::{BoxMeUp, Location, PanicInfo};
+
+use crate::any::Any;
+use crate::fmt;
+use crate::intrinsics;
+use crate::mem::{self, ManuallyDrop};
+use crate::process;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sys::stdio::panic_output;
+use crate::sys_common::backtrace;
+use crate::sys_common::rwlock::StaticRwLock;
+use crate::sys_common::thread_info;
+use crate::thread;
+
+#[cfg(not(test))]
+use crate::io::set_output_capture;
+// make sure to use the stderr output configured
+// by libtest in the real copy of std
+#[cfg(test)]
+use realstd::io::set_output_capture;
+
+// Binary interface to the panic runtime that the standard library depends on.
+//
+// The standard library is tagged with `#![needs_panic_runtime]` (introduced in
+// RFC 1513) to indicate that it requires some other crate tagged with
+// `#![panic_runtime]` to exist somewhere. Each panic runtime is intended to
+// implement these symbols (with the same signatures) so we can get matched up
+// to them.
+//
+// One day this may look a little less ad-hoc with the compiler helping out to
+// hook up these functions, but it is not this day!
+#[allow(improper_ctypes)]
+extern "C" {
+ fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static);
+}
+
+#[allow(improper_ctypes)]
+extern "Rust" {
+ /// `payload` is passed through another layer of raw pointers as `&mut dyn Trait` is not
+ /// FFI-safe. `BoxMeUp` lazily performs allocation only when needed (this avoids allocations
+ /// when using the "abort" panic runtime).
+ fn __rust_start_panic(payload: *mut &mut dyn BoxMeUp) -> u32;
+}
+
+/// This function is called by the panic runtime if FFI code catches a Rust
+/// panic but doesn't rethrow it. We don't support this case since it messes
+/// with our panic count.
+#[cfg(not(test))]
+#[rustc_std_internal_symbol]
+extern "C" fn __rust_drop_panic() -> ! {
+ rtabort!("Rust panics must be rethrown");
+}
+
+/// This function is called by the panic runtime if it catches an exception
+/// object which does not correspond to a Rust panic.
+#[cfg(not(test))]
+#[rustc_std_internal_symbol]
+extern "C" fn __rust_foreign_exception() -> ! {
+ rtabort!("Rust cannot catch foreign exceptions");
+}
+
+#[derive(Copy, Clone)]
+enum Hook {
+ Default,
+ Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)),
+}
+
+impl Hook {
+ fn custom(f: impl Fn(&PanicInfo<'_>) + 'static + Sync + Send) -> Self {
+ Self::Custom(Box::into_raw(Box::new(f)))
+ }
+}
+
+static HOOK_LOCK: StaticRwLock = StaticRwLock::new();
+static mut HOOK: Hook = Hook::Default;
+
+/// Registers a custom panic hook, replacing any that was previously registered.
+///
+/// The panic hook is invoked when a thread panics, but before the panic runtime
+/// is invoked. As such, the hook will run with both the aborting and unwinding
+/// runtimes. The default hook prints a message to standard error and generates
+/// a backtrace if requested, but this behavior can be customized with the
+/// `set_hook` and [`take_hook`] functions.
+///
+/// [`take_hook`]: ./fn.take_hook.html
+///
+/// The hook is provided with a `PanicInfo` struct which contains information
+/// about the origin of the panic, including the payload passed to `panic!` and
+/// the source code location from which the panic originated.
+///
+/// The panic hook is a global resource.
+///
+/// # Panics
+///
+/// Panics if called from a panicking thread.
+///
+/// # Examples
+///
+/// The following will print "Custom panic hook":
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|_| {
+/// println!("Custom panic hook");
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) {
+ if thread::panicking() {
+ panic!("cannot modify the panic hook from a panicking thread");
+ }
+
+ // SAFETY:
+ //
+ // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
+ // - The argument of `Box::from_raw` is always a valid pointer that was created using
+ // `Box::into_raw`.
+ unsafe {
+ let guard = HOOK_LOCK.write();
+ let old_hook = HOOK;
+ HOOK = Hook::Custom(Box::into_raw(hook));
+ drop(guard);
+
+ if let Hook::Custom(ptr) = old_hook {
+ #[allow(unused_must_use)]
+ {
+ Box::from_raw(ptr);
+ }
+ }
+ }
+}
+
+/// Unregisters the current panic hook, returning it.
+///
+/// *See also the function [`set_hook`].*
+///
+/// [`set_hook`]: ./fn.set_hook.html
+///
+/// If no custom hook is registered, the default hook will be returned.
+///
+/// # Panics
+///
+/// Panics if called from a panicking thread.
+///
+/// # Examples
+///
+/// The following will print "Normal panic":
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|_| {
+/// println!("Custom panic hook");
+/// }));
+///
+/// let _ = panic::take_hook();
+///
+/// panic!("Normal panic");
+/// ```
+#[must_use]
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub fn take_hook() -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
+ if thread::panicking() {
+ panic!("cannot modify the panic hook from a panicking thread");
+ }
+
+ // SAFETY:
+ //
+ // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
+ // - The argument of `Box::from_raw` is always a valid pointer that was created using
+ // `Box::into_raw`.
+ unsafe {
+ let guard = HOOK_LOCK.write();
+ let hook = HOOK;
+ HOOK = Hook::Default;
+ drop(guard);
+
+ match hook {
+ Hook::Default => Box::new(default_hook),
+ Hook::Custom(ptr) => Box::from_raw(ptr),
+ }
+ }
+}
+
+/// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with
+/// a new panic handler that does something and then executes the old handler.
+///
+/// [`take_hook`]: ./fn.take_hook.html
+/// [`set_hook`]: ./fn.set_hook.html
+///
+/// # Panics
+///
+/// Panics if called from a panicking thread.
+///
+/// # Examples
+///
+/// The following will print the custom message, and then the normal output of panic.
+///
+/// ```should_panic
+/// #![feature(panic_update_hook)]
+/// use std::panic;
+///
+/// // Equivalent to
+/// // let prev = panic::take_hook();
+/// // panic::set_hook(move |info| {
+/// // println!("...");
+/// // prev(info);
+/// // );
+/// panic::update_hook(move |prev, info| {
+/// println!("Print custom message and execute panic handler as usual");
+/// prev(info);
+/// });
+///
+/// panic!("Custom and then normal");
+/// ```
+#[unstable(feature = "panic_update_hook", issue = "92649")]
+pub fn update_hook<F>(hook_fn: F)
+where
+ F: Fn(&(dyn Fn(&PanicInfo<'_>) + Send + Sync + 'static), &PanicInfo<'_>)
+ + Sync
+ + Send
+ + 'static,
+{
+ if thread::panicking() {
+ panic!("cannot modify the panic hook from a panicking thread");
+ }
+
+ // SAFETY:
+ //
+ // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`.
+ // - The argument of `Box::from_raw` is always a valid pointer that was created using
+ // `Box::into_raw`.
+ unsafe {
+ let guard = HOOK_LOCK.write();
+ let old_hook = HOOK;
+ HOOK = Hook::Default;
+
+ let prev = match old_hook {
+ Hook::Default => Box::new(default_hook),
+ Hook::Custom(ptr) => Box::from_raw(ptr),
+ };
+
+ HOOK = Hook::custom(move |info| hook_fn(&prev, info));
+ drop(guard);
+ }
+}
+
+fn default_hook(info: &PanicInfo<'_>) {
+ // If this is a double panic, make sure that we print a backtrace
+ // for this panic. Otherwise only print it if logging is enabled.
+ let backtrace = if panic_count::get_count() >= 2 {
+ BacktraceStyle::full()
+ } else {
+ crate::panic::get_backtrace_style()
+ };
+
+ // The current implementation always returns `Some`.
+ let location = info.location().unwrap();
+
+ let msg = match info.payload().downcast_ref::<&'static str>() {
+ Some(s) => *s,
+ None => match info.payload().downcast_ref::<String>() {
+ Some(s) => &s[..],
+ None => "Box<dyn Any>",
+ },
+ };
+ let thread = thread_info::current_thread();
+ let name = thread.as_ref().and_then(|t| t.name()).unwrap_or("<unnamed>");
+
+ let write = |err: &mut dyn crate::io::Write| {
+ let _ = writeln!(err, "thread '{name}' panicked at '{msg}', {location}");
+
+ static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
+
+ match backtrace {
+ Some(BacktraceStyle::Short) => {
+ drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Short))
+ }
+ Some(BacktraceStyle::Full) => {
+ drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Full))
+ }
+ Some(BacktraceStyle::Off) => {
+ if FIRST_PANIC.swap(false, Ordering::SeqCst) {
+ let _ = writeln!(
+ err,
+ "note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace"
+ );
+ }
+ }
+ // If backtraces aren't supported, do nothing.
+ None => {}
+ }
+ };
+
+ if let Some(local) = set_output_capture(None) {
+ write(&mut *local.lock().unwrap_or_else(|e| e.into_inner()));
+ set_output_capture(Some(local));
+ } else if let Some(mut out) = panic_output() {
+ write(&mut out);
+ }
+}
+
+#[cfg(not(test))]
+#[doc(hidden)]
+#[unstable(feature = "update_panic_count", issue = "none")]
+pub mod panic_count {
+ use crate::cell::Cell;
+ use crate::sync::atomic::{AtomicUsize, Ordering};
+
+ pub const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1);
+
+ // Panic count for the current thread.
+ thread_local! { static LOCAL_PANIC_COUNT: Cell<usize> = const { Cell::new(0) } }
+
+ // Sum of panic counts from all threads. The purpose of this is to have
+ // a fast path in `is_zero` (which is used by `panicking`). In any particular
+ // thread, if that thread currently views `GLOBAL_PANIC_COUNT` as being zero,
+ // then `LOCAL_PANIC_COUNT` in that thread is zero. This invariant holds before
+ // and after increase and decrease, but not necessarily during their execution.
+ //
+ // Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG)
+ // records whether panic::always_abort() has been called. This can only be
+ // set, never cleared.
+ //
+ // This could be viewed as a struct containing a single bit and an n-1-bit
+ // value, but if we wrote it like that it would be more than a single word,
+ // and even a newtype around usize would be clumsy because we need atomics.
+ // But we use such a tuple for the return type of increase().
+ //
+ // Stealing a bit is fine because it just amounts to assuming that each
+ // panicking thread consumes at least 2 bytes of address space.
+ static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+ pub fn increase() -> (bool, usize) {
+ (
+ GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed) & ALWAYS_ABORT_FLAG != 0,
+ LOCAL_PANIC_COUNT.with(|c| {
+ let next = c.get() + 1;
+ c.set(next);
+ next
+ }),
+ )
+ }
+
+ pub fn decrease() {
+ GLOBAL_PANIC_COUNT.fetch_sub(1, Ordering::Relaxed);
+ LOCAL_PANIC_COUNT.with(|c| {
+ let next = c.get() - 1;
+ c.set(next);
+ next
+ });
+ }
+
+ pub fn set_always_abort() {
+ GLOBAL_PANIC_COUNT.fetch_or(ALWAYS_ABORT_FLAG, Ordering::Relaxed);
+ }
+
+ // Disregards ALWAYS_ABORT_FLAG
+ #[must_use]
+ pub fn get_count() -> usize {
+ LOCAL_PANIC_COUNT.with(|c| c.get())
+ }
+
+ // Disregards ALWAYS_ABORT_FLAG
+ #[must_use]
+ #[inline]
+ pub fn count_is_zero() -> bool {
+ if GLOBAL_PANIC_COUNT.load(Ordering::Relaxed) & !ALWAYS_ABORT_FLAG == 0 {
+ // Fast path: if `GLOBAL_PANIC_COUNT` is zero, all threads
+ // (including the current one) will have `LOCAL_PANIC_COUNT`
+ // equal to zero, so TLS access can be avoided.
+ //
+ // In terms of performance, a relaxed atomic load is similar to a normal
+ // aligned memory read (e.g., a mov instruction in x86), but with some
+ // compiler optimization restrictions. On the other hand, a TLS access
+ // might require calling a non-inlinable function (such as `__tls_get_addr`
+ // when using the GD TLS model).
+ true
+ } else {
+ is_zero_slow_path()
+ }
+ }
+
+ // Slow path is in a separate function to reduce the amount of code
+ // inlined from `is_zero`.
+ #[inline(never)]
+ #[cold]
+ fn is_zero_slow_path() -> bool {
+ LOCAL_PANIC_COUNT.with(|c| c.get() == 0)
+ }
+}
+
+#[cfg(test)]
+pub use realstd::rt::panic_count;
+
+/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
+pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> {
+ union Data<F, R> {
+ f: ManuallyDrop<F>,
+ r: ManuallyDrop<R>,
+ p: ManuallyDrop<Box<dyn Any + Send>>,
+ }
+
+ // We do some sketchy operations with ownership here for the sake of
+ // performance. We can only pass pointers down to `do_call` (can't pass
+ // objects by value), so we do all the ownership tracking here manually
+ // using a union.
+ //
+ // We go through a transition where:
+ //
+ // * First, we set the data field `f` to be the argumentless closure that we're going to call.
+ // * When we make the function call, the `do_call` function below, we take
+ // ownership of the function pointer. At this point the `data` union is
+ // entirely uninitialized.
+ // * If the closure successfully returns, we write the return value into the
+ // data's return slot (field `r`).
+ // * If the closure panics (`do_catch` below), we write the panic payload into field `p`.
+ // * Finally, when we come back out of the `try` intrinsic we're
+ // in one of two states:
+ //
+ // 1. The closure didn't panic, in which case the return value was
+ // filled in. We move it out of `data.r` and return it.
+ // 2. The closure panicked, in which case the panic payload was
+ // filled in. We move it out of `data.p` and return it.
+ //
+ // Once we stack all that together we should have the "most efficient'
+ // method of calling a catch panic whilst juggling ownership.
+ let mut data = Data { f: ManuallyDrop::new(f) };
+
+ let data_ptr = &mut data as *mut _ as *mut u8;
+ // SAFETY:
+ //
+ // Access to the union's fields: this is `std` and we know that the `r#try`
+ // intrinsic fills in the `r` or `p` union field based on its return value.
+ //
+ // The call to `intrinsics::r#try` is made safe by:
+ // - `do_call`, the first argument, can be called with the initial `data_ptr`.
+ // - `do_catch`, the second argument, can be called with the `data_ptr` as well.
+ // See their safety preconditions for more information
+ unsafe {
+ return if intrinsics::r#try(do_call::<F, R>, data_ptr, do_catch::<F, R>) == 0 {
+ Ok(ManuallyDrop::into_inner(data.r))
+ } else {
+ Err(ManuallyDrop::into_inner(data.p))
+ };
+ }
+
+ // We consider unwinding to be rare, so mark this function as cold. However,
+ // do not mark it no-inline -- that decision is best to leave to the
+ // optimizer (in most cases this function is not inlined even as a normal,
+ // non-cold function, though, as of the writing of this comment).
+ #[cold]
+ unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send + 'static> {
+ // SAFETY: The whole unsafe block hinges on a correct implementation of
+ // the panic handler `__rust_panic_cleanup`. As such we can only
+ // assume it returns the correct thing for `Box::from_raw` to work
+ // without undefined behavior.
+ let obj = unsafe { Box::from_raw(__rust_panic_cleanup(payload)) };
+ panic_count::decrease();
+ obj
+ }
+
+ // SAFETY:
+ // data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>`
+ // Its must contains a valid `f` (type: F) value that can be use to fill
+ // `data.r`.
+ //
+ // This function cannot be marked as `unsafe` because `intrinsics::r#try`
+ // expects normal function pointers.
+ #[inline]
+ fn do_call<F: FnOnce() -> R, R>(data: *mut u8) {
+ // SAFETY: this is the responsibility of the caller, see above.
+ unsafe {
+ let data = data as *mut Data<F, R>;
+ let data = &mut (*data);
+ let f = ManuallyDrop::take(&mut data.f);
+ data.r = ManuallyDrop::new(f());
+ }
+ }
+
+ // We *do* want this part of the catch to be inlined: this allows the
+ // compiler to properly track accesses to the Data union and optimize it
+ // away most of the time.
+ //
+ // SAFETY:
+ // data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>`
+ // Since this uses `cleanup` it also hinges on a correct implementation of
+ // `__rustc_panic_cleanup`.
+ //
+ // This function cannot be marked as `unsafe` because `intrinsics::r#try`
+ // expects normal function pointers.
+ #[inline]
+ fn do_catch<F: FnOnce() -> R, R>(data: *mut u8, payload: *mut u8) {
+ // SAFETY: this is the responsibility of the caller, see above.
+ //
+ // When `__rustc_panic_cleaner` is correctly implemented we can rely
+ // on `obj` being the correct thing to pass to `data.p` (after wrapping
+ // in `ManuallyDrop`).
+ unsafe {
+ let data = data as *mut Data<F, R>;
+ let data = &mut (*data);
+ let obj = cleanup(payload);
+ data.p = ManuallyDrop::new(obj);
+ }
+ }
+}
+
+/// Determines whether the current thread is unwinding because of panic.
+#[inline]
+pub fn panicking() -> bool {
+ !panic_count::count_is_zero()
+}
+
+/// Entry point of panics from the libcore crate (`panic_impl` lang item).
+#[cfg(not(test))]
+#[panic_handler]
+pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
+ struct PanicPayload<'a> {
+ inner: &'a fmt::Arguments<'a>,
+ string: Option<String>,
+ }
+
+ impl<'a> PanicPayload<'a> {
+ fn new(inner: &'a fmt::Arguments<'a>) -> PanicPayload<'a> {
+ PanicPayload { inner, string: None }
+ }
+
+ fn fill(&mut self) -> &mut String {
+ use crate::fmt::Write;
+
+ let inner = self.inner;
+ // Lazily, the first time this gets called, run the actual string formatting.
+ self.string.get_or_insert_with(|| {
+ let mut s = String::new();
+ drop(s.write_fmt(*inner));
+ s
+ })
+ }
+ }
+
+ unsafe impl<'a> BoxMeUp for PanicPayload<'a> {
+ fn take_box(&mut self) -> *mut (dyn Any + Send) {
+ // We do two allocations here, unfortunately. But (a) they're required with the current
+ // scheme, and (b) we don't handle panic + OOM properly anyway (see comment in
+ // begin_panic below).
+ let contents = mem::take(self.fill());
+ Box::into_raw(Box::new(contents))
+ }
+
+ fn get(&mut self) -> &(dyn Any + Send) {
+ self.fill()
+ }
+ }
+
+ struct StrPanicPayload(&'static str);
+
+ unsafe impl BoxMeUp for StrPanicPayload {
+ fn take_box(&mut self) -> *mut (dyn Any + Send) {
+ Box::into_raw(Box::new(self.0))
+ }
+
+ fn get(&mut self) -> &(dyn Any + Send) {
+ &self.0
+ }
+ }
+
+ let loc = info.location().unwrap(); // The current implementation always returns Some
+ let msg = info.message().unwrap(); // The current implementation always returns Some
+ crate::sys_common::backtrace::__rust_end_short_backtrace(move || {
+ if let Some(msg) = msg.as_str() {
+ rust_panic_with_hook(&mut StrPanicPayload(msg), info.message(), loc, info.can_unwind());
+ } else {
+ rust_panic_with_hook(
+ &mut PanicPayload::new(msg),
+ info.message(),
+ loc,
+ info.can_unwind(),
+ );
+ }
+ })
+}
+
+/// This is the entry point of panicking for the non-format-string variants of
+/// panic!() and assert!(). In particular, this is the only entry point that supports
+/// arbitrary payloads, not just format strings.
+#[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")]
+#[cfg_attr(not(test), lang = "begin_panic")]
+// lang item for CTFE panic support
+// never inline unless panic_immediate_abort to avoid code
+// bloat at the call sites as much as possible
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cold]
+#[track_caller]
+#[rustc_do_not_const_check] // hooked by const-eval
+pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ intrinsics::abort()
+ }
+
+ let loc = Location::caller();
+ return crate::sys_common::backtrace::__rust_end_short_backtrace(move || {
+ rust_panic_with_hook(&mut PanicPayload::new(msg), None, loc, true)
+ });
+
+ struct PanicPayload<A> {
+ inner: Option<A>,
+ }
+
+ impl<A: Send + 'static> PanicPayload<A> {
+ fn new(inner: A) -> PanicPayload<A> {
+ PanicPayload { inner: Some(inner) }
+ }
+ }
+
+ unsafe impl<A: Send + 'static> BoxMeUp for PanicPayload<A> {
+ fn take_box(&mut self) -> *mut (dyn Any + Send) {
+ // Note that this should be the only allocation performed in this code path. Currently
+ // this means that panic!() on OOM will invoke this code path, but then again we're not
+ // really ready for panic on OOM anyway. If we do start doing this, then we should
+ // propagate this allocation to be performed in the parent of this thread instead of the
+ // thread that's panicking.
+ let data = match self.inner.take() {
+ Some(a) => Box::new(a) as Box<dyn Any + Send>,
+ None => process::abort(),
+ };
+ Box::into_raw(data)
+ }
+
+ fn get(&mut self) -> &(dyn Any + Send) {
+ match self.inner {
+ Some(ref a) => a,
+ None => process::abort(),
+ }
+ }
+ }
+}
+
+/// Central point for dispatching panics.
+///
+/// Executes the primary logic for a panic, including checking for recursive
+/// panics, panic hooks, and finally dispatching to the panic runtime to either
+/// abort or unwind.
+fn rust_panic_with_hook(
+ payload: &mut dyn BoxMeUp,
+ message: Option<&fmt::Arguments<'_>>,
+ location: &Location<'_>,
+ can_unwind: bool,
+) -> ! {
+ let (must_abort, panics) = panic_count::increase();
+
+ // If this is the third nested call (e.g., panics == 2, this is 0-indexed),
+ // the panic hook probably triggered the last panic, otherwise the
+ // double-panic check would have aborted the process. In this case abort the
+ // process real quickly as we don't want to try calling it again as it'll
+ // probably just panic again.
+ if must_abort || panics > 2 {
+ if panics > 2 {
+ // Don't try to print the message in this case
+ // - perhaps that is causing the recursive panics.
+ rtprintpanic!("thread panicked while processing panic. aborting.\n");
+ } else {
+ // Unfortunately, this does not print a backtrace, because creating
+ // a `Backtrace` will allocate, which we must to avoid here.
+ let panicinfo = PanicInfo::internal_constructor(message, location, can_unwind);
+ rtprintpanic!("{panicinfo}\npanicked after panic::always_abort(), aborting.\n");
+ }
+ crate::sys::abort_internal();
+ }
+
+ unsafe {
+ let mut info = PanicInfo::internal_constructor(message, location, can_unwind);
+ let _guard = HOOK_LOCK.read();
+ match HOOK {
+ // Some platforms (like wasm) know that printing to stderr won't ever actually
+ // print anything, and if that's the case we can skip the default
+ // hook. Since string formatting happens lazily when calling `payload`
+ // methods, this means we avoid formatting the string at all!
+ // (The panic runtime might still call `payload.take_box()` though and trigger
+ // formatting.)
+ Hook::Default if panic_output().is_none() => {}
+ Hook::Default => {
+ info.set_payload(payload.get());
+ default_hook(&info);
+ }
+ Hook::Custom(ptr) => {
+ info.set_payload(payload.get());
+ (*ptr)(&info);
+ }
+ };
+ }
+
+ if panics > 1 || !can_unwind {
+ // If a thread panics while it's already unwinding then we
+ // have limited options. Currently our preference is to
+ // just abort. In the future we may consider resuming
+ // unwinding or otherwise exiting the thread cleanly.
+ rtprintpanic!("thread panicked while panicking. aborting.\n");
+ crate::sys::abort_internal();
+ }
+
+ rust_panic(payload)
+}
+
+/// This is the entry point for `resume_unwind`.
+/// It just forwards the payload to the panic runtime.
+pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
+ panic_count::increase();
+
+ struct RewrapBox(Box<dyn Any + Send>);
+
+ unsafe impl BoxMeUp for RewrapBox {
+ fn take_box(&mut self) -> *mut (dyn Any + Send) {
+ Box::into_raw(mem::replace(&mut self.0, Box::new(())))
+ }
+
+ fn get(&mut self) -> &(dyn Any + Send) {
+ &*self.0
+ }
+ }
+
+ rust_panic(&mut RewrapBox(payload))
+}
+
+/// An unmangled function (through `rustc_std_internal_symbol`) on which to slap
+/// yer breakpoints.
+#[inline(never)]
+#[cfg_attr(not(test), rustc_std_internal_symbol)]
+fn rust_panic(mut msg: &mut dyn BoxMeUp) -> ! {
+ let code = unsafe {
+ let obj = &mut msg as *mut &mut dyn BoxMeUp;
+ __rust_start_panic(obj)
+ };
+ rtabort!("failed to initiate panic, error {code}")
+}
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
new file mode 100644
index 000000000..5dfeb517a
--- /dev/null
+++ b/library/std/src/path.rs
@@ -0,0 +1,3259 @@
+//! Cross-platform path manipulation.
+//!
+//! This module provides two types, [`PathBuf`] and [`Path`] (akin to [`String`]
+//! and [`str`]), for working with paths abstractly. These types are thin wrappers
+//! around [`OsString`] and [`OsStr`] respectively, meaning that they work directly
+//! on strings according to the local platform's path syntax.
+//!
+//! Paths can be parsed into [`Component`]s by iterating over the structure
+//! returned by the [`components`] method on [`Path`]. [`Component`]s roughly
+//! correspond to the substrings between path separators (`/` or `\`). You can
+//! reconstruct an equivalent path from components with the [`push`] method on
+//! [`PathBuf`]; note that the paths may differ syntactically by the
+//! normalization described in the documentation for the [`components`] method.
+//!
+//! ## Case sensitivity
+//!
+//! Unless otherwise indicated path methods that do not access the filesystem,
+//! such as [`Path::starts_with`] and [`Path::ends_with`], are case sensitive no
+//! matter the platform or filesystem. An exception to this is made for Windows
+//! drive letters.
+//!
+//! ## Simple usage
+//!
+//! Path manipulation includes both parsing components from slices and building
+//! new owned paths.
+//!
+//! To parse a path, you can create a [`Path`] slice from a [`str`]
+//! slice and start asking questions:
+//!
+//! ```
+//! use std::path::Path;
+//! use std::ffi::OsStr;
+//!
+//! let path = Path::new("/tmp/foo/bar.txt");
+//!
+//! let parent = path.parent();
+//! assert_eq!(parent, Some(Path::new("/tmp/foo")));
+//!
+//! let file_stem = path.file_stem();
+//! assert_eq!(file_stem, Some(OsStr::new("bar")));
+//!
+//! let extension = path.extension();
+//! assert_eq!(extension, Some(OsStr::new("txt")));
+//! ```
+//!
+//! To build or modify paths, use [`PathBuf`]:
+//!
+//! ```
+//! use std::path::PathBuf;
+//!
+//! // This way works...
+//! let mut path = PathBuf::from("c:\\");
+//!
+//! path.push("windows");
+//! path.push("system32");
+//!
+//! path.set_extension("dll");
+//!
+//! // ... but push is best used if you don't know everything up
+//! // front. If you do, this way is better:
+//! let path: PathBuf = ["c:\\", "windows", "system32.dll"].iter().collect();
+//! ```
+//!
+//! [`components`]: Path::components
+//! [`push`]: PathBuf::push
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[cfg(test)]
+mod tests;
+
+use crate::borrow::{Borrow, Cow};
+use crate::cmp;
+use crate::collections::TryReserveError;
+use crate::error::Error;
+use crate::fmt;
+use crate::fs;
+use crate::hash::{Hash, Hasher};
+use crate::io;
+use crate::iter::{self, FusedIterator};
+use crate::ops::{self, Deref};
+use crate::rc::Rc;
+use crate::str::FromStr;
+use crate::sync::Arc;
+
+use crate::ffi::{OsStr, OsString};
+use crate::sys;
+use crate::sys::path::{is_sep_byte, is_verbatim_sep, parse_prefix, MAIN_SEP_STR};
+
+////////////////////////////////////////////////////////////////////////////////
+// GENERAL NOTES
+////////////////////////////////////////////////////////////////////////////////
+//
+// Parsing in this module is done by directly transmuting OsStr to [u8] slices,
+// taking advantage of the fact that OsStr always encodes ASCII characters
+// as-is. Eventually, this transmutation should be replaced by direct uses of
+// OsStr APIs for parsing, but it will take a while for those to become
+// available.
+
+////////////////////////////////////////////////////////////////////////////////
+// Windows Prefixes
+////////////////////////////////////////////////////////////////////////////////
+
+/// Windows path prefixes, e.g., `C:` or `\\server\share`.
+///
+/// Windows uses a variety of path prefix styles, including references to drive
+/// volumes (like `C:`), network shared folders (like `\\server\share`), and
+/// others. In addition, some path prefixes are "verbatim" (i.e., prefixed with
+/// `\\?\`), in which case `/` is *not* treated as a separator and essentially
+/// no normalization is performed.
+///
+/// # Examples
+///
+/// ```
+/// use std::path::{Component, Path, Prefix};
+/// use std::path::Prefix::*;
+/// use std::ffi::OsStr;
+///
+/// fn get_path_prefix(s: &str) -> Prefix {
+/// let path = Path::new(s);
+/// match path.components().next().unwrap() {
+/// Component::Prefix(prefix_component) => prefix_component.kind(),
+/// _ => panic!(),
+/// }
+/// }
+///
+/// # if cfg!(windows) {
+/// assert_eq!(Verbatim(OsStr::new("pictures")),
+/// get_path_prefix(r"\\?\pictures\kittens"));
+/// assert_eq!(VerbatimUNC(OsStr::new("server"), OsStr::new("share")),
+/// get_path_prefix(r"\\?\UNC\server\share"));
+/// assert_eq!(VerbatimDisk(b'C'), get_path_prefix(r"\\?\c:\"));
+/// assert_eq!(DeviceNS(OsStr::new("BrainInterface")),
+/// get_path_prefix(r"\\.\BrainInterface"));
+/// assert_eq!(UNC(OsStr::new("server"), OsStr::new("share")),
+/// get_path_prefix(r"\\server\share"));
+/// assert_eq!(Disk(b'C'), get_path_prefix(r"C:\Users\Rust\Pictures\Ferris"));
+/// # }
+/// ```
+#[derive(Copy, Clone, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Prefix<'a> {
+ /// Verbatim prefix, e.g., `\\?\cat_pics`.
+ ///
+ /// Verbatim prefixes consist of `\\?\` immediately followed by the given
+ /// component.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Verbatim(#[stable(feature = "rust1", since = "1.0.0")] &'a OsStr),
+
+ /// Verbatim prefix using Windows' _**U**niform **N**aming **C**onvention_,
+ /// e.g., `\\?\UNC\server\share`.
+ ///
+ /// Verbatim UNC prefixes consist of `\\?\UNC\` immediately followed by the
+ /// server's hostname and a share name.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ VerbatimUNC(
+ #[stable(feature = "rust1", since = "1.0.0")] &'a OsStr,
+ #[stable(feature = "rust1", since = "1.0.0")] &'a OsStr,
+ ),
+
+ /// Verbatim disk prefix, e.g., `\\?\C:`.
+ ///
+ /// Verbatim disk prefixes consist of `\\?\` immediately followed by the
+ /// drive letter and `:`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ VerbatimDisk(#[stable(feature = "rust1", since = "1.0.0")] u8),
+
+ /// Device namespace prefix, e.g., `\\.\COM42`.
+ ///
+ /// Device namespace prefixes consist of `\\.\` (possibly using `/`
+ /// instead of `\`), immediately followed by the device name.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ DeviceNS(#[stable(feature = "rust1", since = "1.0.0")] &'a OsStr),
+
+ /// Prefix using Windows' _**U**niform **N**aming **C**onvention_, e.g.
+ /// `\\server\share`.
+ ///
+ /// UNC prefixes consist of the server's hostname and a share name.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ UNC(
+ #[stable(feature = "rust1", since = "1.0.0")] &'a OsStr,
+ #[stable(feature = "rust1", since = "1.0.0")] &'a OsStr,
+ ),
+
+ /// Prefix `C:` for the given disk drive.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Disk(#[stable(feature = "rust1", since = "1.0.0")] u8),
+}
+
+impl<'a> Prefix<'a> {
+ #[inline]
+ fn len(&self) -> usize {
+ use self::Prefix::*;
+ fn os_str_len(s: &OsStr) -> usize {
+ s.bytes().len()
+ }
+ match *self {
+ Verbatim(x) => 4 + os_str_len(x),
+ VerbatimUNC(x, y) => {
+ 8 + os_str_len(x) + if os_str_len(y) > 0 { 1 + os_str_len(y) } else { 0 }
+ }
+ VerbatimDisk(_) => 6,
+ UNC(x, y) => 2 + os_str_len(x) + if os_str_len(y) > 0 { 1 + os_str_len(y) } else { 0 },
+ DeviceNS(x) => 4 + os_str_len(x),
+ Disk(_) => 2,
+ }
+ }
+
+ /// Determines if the prefix is verbatim, i.e., begins with `\\?\`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Prefix::*;
+ /// use std::ffi::OsStr;
+ ///
+ /// assert!(Verbatim(OsStr::new("pictures")).is_verbatim());
+ /// assert!(VerbatimUNC(OsStr::new("server"), OsStr::new("share")).is_verbatim());
+ /// assert!(VerbatimDisk(b'C').is_verbatim());
+ /// assert!(!DeviceNS(OsStr::new("BrainInterface")).is_verbatim());
+ /// assert!(!UNC(OsStr::new("server"), OsStr::new("share")).is_verbatim());
+ /// assert!(!Disk(b'C').is_verbatim());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_verbatim(&self) -> bool {
+ use self::Prefix::*;
+ matches!(*self, Verbatim(_) | VerbatimDisk(_) | VerbatimUNC(..))
+ }
+
+ #[inline]
+ fn is_drive(&self) -> bool {
+ matches!(*self, Prefix::Disk(_))
+ }
+
+ #[inline]
+ fn has_implicit_root(&self) -> bool {
+ !self.is_drive()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Exposed parsing helpers
+////////////////////////////////////////////////////////////////////////////////
+
+/// Determines whether the character is one of the permitted path
+/// separators for the current platform.
+///
+/// # Examples
+///
+/// ```
+/// use std::path;
+///
+/// assert!(path::is_separator('/')); // '/' works for both Unix and Windows
+/// assert!(!path::is_separator('❤'));
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn is_separator(c: char) -> bool {
+ c.is_ascii() && is_sep_byte(c as u8)
+}
+
+/// The primary separator of path components for the current platform.
+///
+/// For example, `/` on Unix and `\` on Windows.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAIN_SEPARATOR: char = crate::sys::path::MAIN_SEP;
+
+/// The primary separator of path components for the current platform.
+///
+/// For example, `/` on Unix and `\` on Windows.
+#[unstable(feature = "main_separator_str", issue = "94071")]
+pub const MAIN_SEPARATOR_STR: &str = crate::sys::path::MAIN_SEP_STR;
+
+////////////////////////////////////////////////////////////////////////////////
+// Misc helpers
+////////////////////////////////////////////////////////////////////////////////
+
+// Iterate through `iter` while it matches `prefix`; return `None` if `prefix`
+// is not a prefix of `iter`, otherwise return `Some(iter_after_prefix)` giving
+// `iter` after having exhausted `prefix`.
+fn iter_after<'a, 'b, I, J>(mut iter: I, mut prefix: J) -> Option<I>
+where
+ I: Iterator<Item = Component<'a>> + Clone,
+ J: Iterator<Item = Component<'b>>,
+{
+ loop {
+ let mut iter_next = iter.clone();
+ match (iter_next.next(), prefix.next()) {
+ (Some(ref x), Some(ref y)) if x == y => (),
+ (Some(_), Some(_)) => return None,
+ (Some(_), None) => return Some(iter),
+ (None, None) => return Some(iter),
+ (None, Some(_)) => return None,
+ }
+ iter = iter_next;
+ }
+}
+
+unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
+ // SAFETY: See note at the top of this module to understand why this and
+ // `OsStr::bytes` are used:
+ //
+ // This casts are safe as OsStr is internally a wrapper around [u8] on all
+ // platforms.
+ //
+ // Note that currently this relies on the special knowledge that libstd has;
+ // these types are single-element structs but are not marked
+ // repr(transparent) or repr(C) which would make these casts not allowable
+ // outside std.
+ unsafe { &*(s as *const [u8] as *const OsStr) }
+}
+
+// Detect scheme on Redox
+fn has_redox_scheme(s: &[u8]) -> bool {
+ cfg!(target_os = "redox") && s.contains(&b':')
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Cross-platform, iterator-independent parsing
+////////////////////////////////////////////////////////////////////////////////
+
+/// Says whether the first byte after the prefix is a separator.
+fn has_physical_root(s: &[u8], prefix: Option<Prefix<'_>>) -> bool {
+ let path = if let Some(p) = prefix { &s[p.len()..] } else { s };
+ !path.is_empty() && is_sep_byte(path[0])
+}
+
+// basic workhorse for splitting stem and extension
+fn rsplit_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
+ if file.bytes() == b".." {
+ return (Some(file), None);
+ }
+
+ // The unsafety here stems from converting between &OsStr and &[u8]
+ // and back. This is safe to do because (1) we only look at ASCII
+ // contents of the encoding and (2) new &OsStr values are produced
+ // only from ASCII-bounded slices of existing &OsStr values.
+ let mut iter = file.bytes().rsplitn(2, |b| *b == b'.');
+ let after = iter.next();
+ let before = iter.next();
+ if before == Some(b"") {
+ (Some(file), None)
+ } else {
+ unsafe { (before.map(|s| u8_slice_as_os_str(s)), after.map(|s| u8_slice_as_os_str(s))) }
+ }
+}
+
+fn split_file_at_dot(file: &OsStr) -> (&OsStr, Option<&OsStr>) {
+ let slice = file.bytes();
+ if slice == b".." {
+ return (file, None);
+ }
+
+ // The unsafety here stems from converting between &OsStr and &[u8]
+ // and back. This is safe to do because (1) we only look at ASCII
+ // contents of the encoding and (2) new &OsStr values are produced
+ // only from ASCII-bounded slices of existing &OsStr values.
+ let i = match slice[1..].iter().position(|b| *b == b'.') {
+ Some(i) => i + 1,
+ None => return (file, None),
+ };
+ let before = &slice[..i];
+ let after = &slice[i + 1..];
+ unsafe { (u8_slice_as_os_str(before), Some(u8_slice_as_os_str(after))) }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// The core iterators
+////////////////////////////////////////////////////////////////////////////////
+
+/// Component parsing works by a double-ended state machine; the cursors at the
+/// front and back of the path each keep track of what parts of the path have
+/// been consumed so far.
+///
+/// Going front to back, a path is made up of a prefix, a starting
+/// directory component, and a body (of normal components)
+#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
+enum State {
+ Prefix = 0, // c:
+ StartDir = 1, // / or . or nothing
+ Body = 2, // foo/bar/baz
+ Done = 3,
+}
+
+/// A structure wrapping a Windows path prefix as well as its unparsed string
+/// representation.
+///
+/// In addition to the parsed [`Prefix`] information returned by [`kind`],
+/// `PrefixComponent` also holds the raw and unparsed [`OsStr`] slice,
+/// returned by [`as_os_str`].
+///
+/// Instances of this `struct` can be obtained by matching against the
+/// [`Prefix` variant] on [`Component`].
+///
+/// Does not occur on Unix.
+///
+/// # Examples
+///
+/// ```
+/// # if cfg!(windows) {
+/// use std::path::{Component, Path, Prefix};
+/// use std::ffi::OsStr;
+///
+/// let path = Path::new(r"c:\you\later\");
+/// match path.components().next().unwrap() {
+/// Component::Prefix(prefix_component) => {
+/// assert_eq!(Prefix::Disk(b'C'), prefix_component.kind());
+/// assert_eq!(OsStr::new("c:"), prefix_component.as_os_str());
+/// }
+/// _ => unreachable!(),
+/// }
+/// # }
+/// ```
+///
+/// [`as_os_str`]: PrefixComponent::as_os_str
+/// [`kind`]: PrefixComponent::kind
+/// [`Prefix` variant]: Component::Prefix
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone, Eq, Debug)]
+pub struct PrefixComponent<'a> {
+ /// The prefix as an unparsed `OsStr` slice.
+ raw: &'a OsStr,
+
+ /// The parsed prefix data.
+ parsed: Prefix<'a>,
+}
+
+impl<'a> PrefixComponent<'a> {
+ /// Returns the parsed prefix data.
+ ///
+ /// See [`Prefix`]'s documentation for more information on the different
+ /// kinds of prefixes.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn kind(&self) -> Prefix<'a> {
+ self.parsed
+ }
+
+ /// Returns the raw [`OsStr`] slice for this prefix.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_os_str(&self) -> &'a OsStr {
+ self.raw
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> cmp::PartialEq for PrefixComponent<'a> {
+ #[inline]
+ fn eq(&self, other: &PrefixComponent<'a>) -> bool {
+ cmp::PartialEq::eq(&self.parsed, &other.parsed)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> cmp::PartialOrd for PrefixComponent<'a> {
+ #[inline]
+ fn partial_cmp(&self, other: &PrefixComponent<'a>) -> Option<cmp::Ordering> {
+ cmp::PartialOrd::partial_cmp(&self.parsed, &other.parsed)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Ord for PrefixComponent<'_> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ cmp::Ord::cmp(&self.parsed, &other.parsed)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Hash for PrefixComponent<'_> {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ self.parsed.hash(h);
+ }
+}
+
+/// A single component of a path.
+///
+/// A `Component` roughly corresponds to a substring between path separators
+/// (`/` or `\`).
+///
+/// This `enum` is created by iterating over [`Components`], which in turn is
+/// created by the [`components`](Path::components) method on [`Path`].
+///
+/// # Examples
+///
+/// ```rust
+/// use std::path::{Component, Path};
+///
+/// let path = Path::new("/tmp/foo/bar.txt");
+/// let components = path.components().collect::<Vec<_>>();
+/// assert_eq!(&components, &[
+/// Component::RootDir,
+/// Component::Normal("tmp".as_ref()),
+/// Component::Normal("foo".as_ref()),
+/// Component::Normal("bar.txt".as_ref()),
+/// ]);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Component<'a> {
+ /// A Windows path prefix, e.g., `C:` or `\\server\share`.
+ ///
+ /// There is a large variety of prefix types, see [`Prefix`]'s documentation
+ /// for more.
+ ///
+ /// Does not occur on Unix.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Prefix(#[stable(feature = "rust1", since = "1.0.0")] PrefixComponent<'a>),
+
+ /// The root directory component, appears after any prefix and before anything else.
+ ///
+ /// It represents a separator that designates that a path starts from root.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ RootDir,
+
+ /// A reference to the current directory, i.e., `.`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ CurDir,
+
+ /// A reference to the parent directory, i.e., `..`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ ParentDir,
+
+ /// A normal component, e.g., `a` and `b` in `a/b`.
+ ///
+ /// This variant is the most common one, it represents references to files
+ /// or directories.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Normal(#[stable(feature = "rust1", since = "1.0.0")] &'a OsStr),
+}
+
+impl<'a> Component<'a> {
+ /// Extracts the underlying [`OsStr`] slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("./tmp/foo/bar.txt");
+ /// let components: Vec<_> = path.components().map(|comp| comp.as_os_str()).collect();
+ /// assert_eq!(&components, &[".", "tmp", "foo", "bar.txt"]);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_os_str(self) -> &'a OsStr {
+ match self {
+ Component::Prefix(p) => p.as_os_str(),
+ Component::RootDir => OsStr::new(MAIN_SEP_STR),
+ Component::CurDir => OsStr::new("."),
+ Component::ParentDir => OsStr::new(".."),
+ Component::Normal(path) => path,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for Component<'_> {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ self.as_os_str()
+ }
+}
+
+#[stable(feature = "path_component_asref", since = "1.25.0")]
+impl AsRef<Path> for Component<'_> {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ self.as_os_str().as_ref()
+ }
+}
+
+/// An iterator over the [`Component`]s of a [`Path`].
+///
+/// This `struct` is created by the [`components`] method on [`Path`].
+/// See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::path::Path;
+///
+/// let path = Path::new("/tmp/foo/bar.txt");
+///
+/// for component in path.components() {
+/// println!("{component:?}");
+/// }
+/// ```
+///
+/// [`components`]: Path::components
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Components<'a> {
+ // The path left to parse components from
+ path: &'a [u8],
+
+ // The prefix as it was originally parsed, if any
+ prefix: Option<Prefix<'a>>,
+
+ // true if path *physically* has a root separator; for most Windows
+ // prefixes, it may have a "logical" root separator for the purposes of
+ // normalization, e.g., \\server\share == \\server\share\.
+ has_physical_root: bool,
+
+ // The iterator is double-ended, and these two states keep track of what has
+ // been produced from either end
+ front: State,
+ back: State,
+}
+
+/// An iterator over the [`Component`]s of a [`Path`], as [`OsStr`] slices.
+///
+/// This `struct` is created by the [`iter`] method on [`Path`].
+/// See its documentation for more.
+///
+/// [`iter`]: Path::iter
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a> {
+ inner: Components<'a>,
+}
+
+#[stable(feature = "path_components_debug", since = "1.13.0")]
+impl fmt::Debug for Components<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ struct DebugHelper<'a>(&'a Path);
+
+ impl fmt::Debug for DebugHelper<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.0.components()).finish()
+ }
+ }
+
+ f.debug_tuple("Components").field(&DebugHelper(self.as_path())).finish()
+ }
+}
+
+impl<'a> Components<'a> {
+ // how long is the prefix, if any?
+ #[inline]
+ fn prefix_len(&self) -> usize {
+ self.prefix.as_ref().map(Prefix::len).unwrap_or(0)
+ }
+
+ #[inline]
+ fn prefix_verbatim(&self) -> bool {
+ self.prefix.as_ref().map(Prefix::is_verbatim).unwrap_or(false)
+ }
+
+ /// how much of the prefix is left from the point of view of iteration?
+ #[inline]
+ fn prefix_remaining(&self) -> usize {
+ if self.front == State::Prefix { self.prefix_len() } else { 0 }
+ }
+
+ // Given the iteration so far, how much of the pre-State::Body path is left?
+ #[inline]
+ fn len_before_body(&self) -> usize {
+ let root = if self.front <= State::StartDir && self.has_physical_root { 1 } else { 0 };
+ let cur_dir = if self.front <= State::StartDir && self.include_cur_dir() { 1 } else { 0 };
+ self.prefix_remaining() + root + cur_dir
+ }
+
+ // is the iteration complete?
+ #[inline]
+ fn finished(&self) -> bool {
+ self.front == State::Done || self.back == State::Done || self.front > self.back
+ }
+
+ #[inline]
+ fn is_sep_byte(&self, b: u8) -> bool {
+ if self.prefix_verbatim() { is_verbatim_sep(b) } else { is_sep_byte(b) }
+ }
+
+ /// Extracts a slice corresponding to the portion of the path remaining for iteration.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let mut components = Path::new("/tmp/foo/bar.txt").components();
+ /// components.next();
+ /// components.next();
+ ///
+ /// assert_eq!(Path::new("foo/bar.txt"), components.as_path());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_path(&self) -> &'a Path {
+ let mut comps = self.clone();
+ if comps.front == State::Body {
+ comps.trim_left();
+ }
+ if comps.back == State::Body {
+ comps.trim_right();
+ }
+ unsafe { Path::from_u8_slice(comps.path) }
+ }
+
+ /// Is the *original* path rooted?
+ fn has_root(&self) -> bool {
+ if self.has_physical_root {
+ return true;
+ }
+ if let Some(p) = self.prefix {
+ if p.has_implicit_root() {
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Should the normalized path include a leading . ?
+ fn include_cur_dir(&self) -> bool {
+ if self.has_root() {
+ return false;
+ }
+ let mut iter = self.path[self.prefix_remaining()..].iter();
+ match (iter.next(), iter.next()) {
+ (Some(&b'.'), None) => true,
+ (Some(&b'.'), Some(&b)) => self.is_sep_byte(b),
+ _ => false,
+ }
+ }
+
+ // parse a given byte sequence into the corresponding path component
+ fn parse_single_component<'b>(&self, comp: &'b [u8]) -> Option<Component<'b>> {
+ match comp {
+ b"." if self.prefix_verbatim() => Some(Component::CurDir),
+ b"." => None, // . components are normalized away, except at
+ // the beginning of a path, which is treated
+ // separately via `include_cur_dir`
+ b".." => Some(Component::ParentDir),
+ b"" => None,
+ _ => Some(Component::Normal(unsafe { u8_slice_as_os_str(comp) })),
+ }
+ }
+
+ // parse a component from the left, saying how many bytes to consume to
+ // remove the component
+ fn parse_next_component(&self) -> (usize, Option<Component<'a>>) {
+ debug_assert!(self.front == State::Body);
+ let (extra, comp) = match self.path.iter().position(|b| self.is_sep_byte(*b)) {
+ None => (0, self.path),
+ Some(i) => (1, &self.path[..i]),
+ };
+ (comp.len() + extra, self.parse_single_component(comp))
+ }
+
+ // parse a component from the right, saying how many bytes to consume to
+ // remove the component
+ fn parse_next_component_back(&self) -> (usize, Option<Component<'a>>) {
+ debug_assert!(self.back == State::Body);
+ let start = self.len_before_body();
+ let (extra, comp) = match self.path[start..].iter().rposition(|b| self.is_sep_byte(*b)) {
+ None => (0, &self.path[start..]),
+ Some(i) => (1, &self.path[start + i + 1..]),
+ };
+ (comp.len() + extra, self.parse_single_component(comp))
+ }
+
+ // trim away repeated separators (i.e., empty components) on the left
+ fn trim_left(&mut self) {
+ while !self.path.is_empty() {
+ let (size, comp) = self.parse_next_component();
+ if comp.is_some() {
+ return;
+ } else {
+ self.path = &self.path[size..];
+ }
+ }
+ }
+
+ // trim away repeated separators (i.e., empty components) on the right
+ fn trim_right(&mut self) {
+ while self.path.len() > self.len_before_body() {
+ let (size, comp) = self.parse_next_component_back();
+ if comp.is_some() {
+ return;
+ } else {
+ self.path = &self.path[..self.path.len() - size];
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for Components<'_> {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ self.as_path()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for Components<'_> {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ self.as_path().as_os_str()
+ }
+}
+
+#[stable(feature = "path_iter_debug", since = "1.13.0")]
+impl fmt::Debug for Iter<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ struct DebugHelper<'a>(&'a Path);
+
+ impl fmt::Debug for DebugHelper<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.0.iter()).finish()
+ }
+ }
+
+ f.debug_tuple("Iter").field(&DebugHelper(self.as_path())).finish()
+ }
+}
+
+impl<'a> Iter<'a> {
+ /// Extracts a slice corresponding to the portion of the path remaining for iteration.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let mut iter = Path::new("/tmp/foo/bar.txt").iter();
+ /// iter.next();
+ /// iter.next();
+ ///
+ /// assert_eq!(Path::new("foo/bar.txt"), iter.as_path());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_path(&self) -> &'a Path {
+ self.inner.as_path()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for Iter<'_> {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ self.as_path()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for Iter<'_> {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ self.as_path().as_os_str()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Iter<'a> {
+ type Item = &'a OsStr;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a OsStr> {
+ self.inner.next().map(Component::as_os_str)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Iter<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a OsStr> {
+ self.inner.next_back().map(Component::as_os_str)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Iter<'_> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Components<'a> {
+ type Item = Component<'a>;
+
+ fn next(&mut self) -> Option<Component<'a>> {
+ while !self.finished() {
+ match self.front {
+ State::Prefix if self.prefix_len() > 0 => {
+ self.front = State::StartDir;
+ debug_assert!(self.prefix_len() <= self.path.len());
+ let raw = &self.path[..self.prefix_len()];
+ self.path = &self.path[self.prefix_len()..];
+ return Some(Component::Prefix(PrefixComponent {
+ raw: unsafe { u8_slice_as_os_str(raw) },
+ parsed: self.prefix.unwrap(),
+ }));
+ }
+ State::Prefix => {
+ self.front = State::StartDir;
+ }
+ State::StartDir => {
+ self.front = State::Body;
+ if self.has_physical_root {
+ debug_assert!(!self.path.is_empty());
+ self.path = &self.path[1..];
+ return Some(Component::RootDir);
+ } else if let Some(p) = self.prefix {
+ if p.has_implicit_root() && !p.is_verbatim() {
+ return Some(Component::RootDir);
+ }
+ } else if self.include_cur_dir() {
+ debug_assert!(!self.path.is_empty());
+ self.path = &self.path[1..];
+ return Some(Component::CurDir);
+ }
+ }
+ State::Body if !self.path.is_empty() => {
+ let (size, comp) = self.parse_next_component();
+ self.path = &self.path[size..];
+ if comp.is_some() {
+ return comp;
+ }
+ }
+ State::Body => {
+ self.front = State::Done;
+ }
+ State::Done => unreachable!(),
+ }
+ }
+ None
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Components<'a> {
+ fn next_back(&mut self) -> Option<Component<'a>> {
+ while !self.finished() {
+ match self.back {
+ State::Body if self.path.len() > self.len_before_body() => {
+ let (size, comp) = self.parse_next_component_back();
+ self.path = &self.path[..self.path.len() - size];
+ if comp.is_some() {
+ return comp;
+ }
+ }
+ State::Body => {
+ self.back = State::StartDir;
+ }
+ State::StartDir => {
+ self.back = State::Prefix;
+ if self.has_physical_root {
+ self.path = &self.path[..self.path.len() - 1];
+ return Some(Component::RootDir);
+ } else if let Some(p) = self.prefix {
+ if p.has_implicit_root() && !p.is_verbatim() {
+ return Some(Component::RootDir);
+ }
+ } else if self.include_cur_dir() {
+ self.path = &self.path[..self.path.len() - 1];
+ return Some(Component::CurDir);
+ }
+ }
+ State::Prefix if self.prefix_len() > 0 => {
+ self.back = State::Done;
+ return Some(Component::Prefix(PrefixComponent {
+ raw: unsafe { u8_slice_as_os_str(self.path) },
+ parsed: self.prefix.unwrap(),
+ }));
+ }
+ State::Prefix => {
+ self.back = State::Done;
+ return None;
+ }
+ State::Done => unreachable!(),
+ }
+ }
+ None
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Components<'_> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> cmp::PartialEq for Components<'a> {
+ #[inline]
+ fn eq(&self, other: &Components<'a>) -> bool {
+ let Components { path: _, front: _, back: _, has_physical_root: _, prefix: _ } = self;
+
+ // Fast path for exact matches, e.g. for hashmap lookups.
+ // Don't explicitly compare the prefix or has_physical_root fields since they'll
+ // either be covered by the `path` buffer or are only relevant for `prefix_verbatim()`.
+ if self.path.len() == other.path.len()
+ && self.front == other.front
+ && self.back == State::Body
+ && other.back == State::Body
+ && self.prefix_verbatim() == other.prefix_verbatim()
+ {
+ // possible future improvement: this could bail out earlier if there were a
+ // reverse memcmp/bcmp comparing back to front
+ if self.path == other.path {
+ return true;
+ }
+ }
+
+ // compare back to front since absolute paths often share long prefixes
+ Iterator::eq(self.clone().rev(), other.clone().rev())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Eq for Components<'_> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> cmp::PartialOrd for Components<'a> {
+ #[inline]
+ fn partial_cmp(&self, other: &Components<'a>) -> Option<cmp::Ordering> {
+ Some(compare_components(self.clone(), other.clone()))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Ord for Components<'_> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ compare_components(self.clone(), other.clone())
+ }
+}
+
+fn compare_components(mut left: Components<'_>, mut right: Components<'_>) -> cmp::Ordering {
+ // Fast path for long shared prefixes
+ //
+ // - compare raw bytes to find first mismatch
+ // - backtrack to find separator before mismatch to avoid ambiguous parsings of '.' or '..' characters
+ // - if found update state to only do a component-wise comparison on the remainder,
+ // otherwise do it on the full path
+ //
+ // The fast path isn't taken for paths with a PrefixComponent to avoid backtracking into
+ // the middle of one
+ if left.prefix.is_none() && right.prefix.is_none() && left.front == right.front {
+ // possible future improvement: a [u8]::first_mismatch simd implementation
+ let first_difference = match left.path.iter().zip(right.path).position(|(&a, &b)| a != b) {
+ None if left.path.len() == right.path.len() => return cmp::Ordering::Equal,
+ None => left.path.len().min(right.path.len()),
+ Some(diff) => diff,
+ };
+
+ if let Some(previous_sep) =
+ left.path[..first_difference].iter().rposition(|&b| left.is_sep_byte(b))
+ {
+ let mismatched_component_start = previous_sep + 1;
+ left.path = &left.path[mismatched_component_start..];
+ left.front = State::Body;
+ right.path = &right.path[mismatched_component_start..];
+ right.front = State::Body;
+ }
+ }
+
+ Iterator::cmp(left, right)
+}
+
+/// An iterator over [`Path`] and its ancestors.
+///
+/// This `struct` is created by the [`ancestors`] method on [`Path`].
+/// See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::path::Path;
+///
+/// let path = Path::new("/foo/bar");
+///
+/// for ancestor in path.ancestors() {
+/// println!("{}", ancestor.display());
+/// }
+/// ```
+///
+/// [`ancestors`]: Path::ancestors
+#[derive(Copy, Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "path_ancestors", since = "1.28.0")]
+pub struct Ancestors<'a> {
+ next: Option<&'a Path>,
+}
+
+#[stable(feature = "path_ancestors", since = "1.28.0")]
+impl<'a> Iterator for Ancestors<'a> {
+ type Item = &'a Path;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ let next = self.next;
+ self.next = next.and_then(Path::parent);
+ next
+ }
+}
+
+#[stable(feature = "path_ancestors", since = "1.28.0")]
+impl FusedIterator for Ancestors<'_> {}
+
+////////////////////////////////////////////////////////////////////////////////
+// Basic types and traits
+////////////////////////////////////////////////////////////////////////////////
+
+/// An owned, mutable path (akin to [`String`]).
+///
+/// This type provides methods like [`push`] and [`set_extension`] that mutate
+/// the path in place. It also implements [`Deref`] to [`Path`], meaning that
+/// all methods on [`Path`] slices are available on `PathBuf` values as well.
+///
+/// [`push`]: PathBuf::push
+/// [`set_extension`]: PathBuf::set_extension
+///
+/// More details about the overall approach can be found in
+/// the [module documentation](self).
+///
+/// # Examples
+///
+/// You can use [`push`] to build up a `PathBuf` from
+/// components:
+///
+/// ```
+/// use std::path::PathBuf;
+///
+/// let mut path = PathBuf::new();
+///
+/// path.push(r"C:\");
+/// path.push("windows");
+/// path.push("system32");
+///
+/// path.set_extension("dll");
+/// ```
+///
+/// However, [`push`] is best used for dynamic situations. This is a better way
+/// to do this when you know all of the components ahead of time:
+///
+/// ```
+/// use std::path::PathBuf;
+///
+/// let path: PathBuf = [r"C:\", "windows", "system32.dll"].iter().collect();
+/// ```
+///
+/// We can still do better than this! Since these are all strings, we can use
+/// `From::from`:
+///
+/// ```
+/// use std::path::PathBuf;
+///
+/// let path = PathBuf::from(r"C:\windows\system32.dll");
+/// ```
+///
+/// Which method works best depends on what kind of situation you're in.
+#[cfg_attr(not(test), rustc_diagnostic_item = "PathBuf")]
+#[stable(feature = "rust1", since = "1.0.0")]
+// FIXME:
+// `PathBuf::as_mut_vec` current implementation relies
+// on `PathBuf` being layout-compatible with `Vec<u8>`.
+// When attribute privacy is implemented, `PathBuf` should be annotated as `#[repr(transparent)]`.
+// Anyway, `PathBuf` representation and layout are considered implementation detail, are
+// not documented and must not be relied upon.
+pub struct PathBuf {
+ inner: OsString,
+}
+
+impl PathBuf {
+ #[inline]
+ fn as_mut_vec(&mut self) -> &mut Vec<u8> {
+ unsafe { &mut *(self as *mut PathBuf as *mut Vec<u8>) }
+ }
+
+ /// Allocates an empty `PathBuf`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let path = PathBuf::new();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn new() -> PathBuf {
+ PathBuf { inner: OsString::new() }
+ }
+
+ /// Creates a new `PathBuf` with a given capacity used to create the
+ /// internal [`OsString`]. See [`with_capacity`] defined on [`OsString`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let mut path = PathBuf::with_capacity(10);
+ /// let capacity = path.capacity();
+ ///
+ /// // This push is done without reallocating
+ /// path.push(r"C:\");
+ ///
+ /// assert_eq!(capacity, path.capacity());
+ /// ```
+ ///
+ /// [`with_capacity`]: OsString::with_capacity
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> PathBuf {
+ PathBuf { inner: OsString::with_capacity(capacity) }
+ }
+
+ /// Coerces to a [`Path`] slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let p = PathBuf::from("/test");
+ /// assert_eq!(Path::new("/test"), p.as_path());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_path(&self) -> &Path {
+ self
+ }
+
+ /// Extends `self` with `path`.
+ ///
+ /// If `path` is absolute, it replaces the current path.
+ ///
+ /// On Windows:
+ ///
+ /// * if `path` has a root but no prefix (e.g., `\windows`), it
+ /// replaces everything except for the prefix (if any) of `self`.
+ /// * if `path` has a prefix but no root, it replaces `self`.
+ /// * if `self` has a verbatim prefix (e.g. `\\?\C:\windows`)
+ /// and `path` is not empty, the new path is normalized: all references
+ /// to `.` and `..` are removed.
+ ///
+ /// # Examples
+ ///
+ /// Pushing a relative path extends the existing path:
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let mut path = PathBuf::from("/tmp");
+ /// path.push("file.bk");
+ /// assert_eq!(path, PathBuf::from("/tmp/file.bk"));
+ /// ```
+ ///
+ /// Pushing an absolute path replaces the existing path:
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let mut path = PathBuf::from("/tmp");
+ /// path.push("/etc");
+ /// assert_eq!(path, PathBuf::from("/etc"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push<P: AsRef<Path>>(&mut self, path: P) {
+ self._push(path.as_ref())
+ }
+
+ fn _push(&mut self, path: &Path) {
+ // in general, a separator is needed if the rightmost byte is not a separator
+ let mut need_sep = self.as_mut_vec().last().map(|c| !is_sep_byte(*c)).unwrap_or(false);
+
+ // in the special case of `C:` on Windows, do *not* add a separator
+ let comps = self.components();
+
+ if comps.prefix_len() > 0
+ && comps.prefix_len() == comps.path.len()
+ && comps.prefix.unwrap().is_drive()
+ {
+ need_sep = false
+ }
+
+ // absolute `path` replaces `self`
+ if path.is_absolute() || path.prefix().is_some() {
+ self.as_mut_vec().truncate(0);
+
+ // verbatim paths need . and .. removed
+ } else if comps.prefix_verbatim() && !path.inner.is_empty() {
+ let mut buf: Vec<_> = comps.collect();
+ for c in path.components() {
+ match c {
+ Component::RootDir => {
+ buf.truncate(1);
+ buf.push(c);
+ }
+ Component::CurDir => (),
+ Component::ParentDir => {
+ if let Some(Component::Normal(_)) = buf.last() {
+ buf.pop();
+ }
+ }
+ _ => buf.push(c),
+ }
+ }
+
+ let mut res = OsString::new();
+ let mut need_sep = false;
+
+ for c in buf {
+ if need_sep && c != Component::RootDir {
+ res.push(MAIN_SEP_STR);
+ }
+ res.push(c.as_os_str());
+
+ need_sep = match c {
+ Component::RootDir => false,
+ Component::Prefix(prefix) => {
+ !prefix.parsed.is_drive() && prefix.parsed.len() > 0
+ }
+ _ => true,
+ }
+ }
+
+ self.inner = res;
+ return;
+
+ // `path` has a root but no prefix, e.g., `\windows` (Windows only)
+ } else if path.has_root() {
+ let prefix_len = self.components().prefix_remaining();
+ self.as_mut_vec().truncate(prefix_len);
+
+ // `path` is a pure relative path
+ } else if need_sep {
+ self.inner.push(MAIN_SEP_STR);
+ }
+
+ self.inner.push(path);
+ }
+
+ /// Truncates `self` to [`self.parent`].
+ ///
+ /// Returns `false` and does nothing if [`self.parent`] is [`None`].
+ /// Otherwise, returns `true`.
+ ///
+ /// [`self.parent`]: Path::parent
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let mut p = PathBuf::from("/spirited/away.rs");
+ ///
+ /// p.pop();
+ /// assert_eq!(Path::new("/spirited"), p);
+ /// p.pop();
+ /// assert_eq!(Path::new("/"), p);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> bool {
+ match self.parent().map(|p| p.as_u8_slice().len()) {
+ Some(len) => {
+ self.as_mut_vec().truncate(len);
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Updates [`self.file_name`] to `file_name`.
+ ///
+ /// If [`self.file_name`] was [`None`], this is equivalent to pushing
+ /// `file_name`.
+ ///
+ /// Otherwise it is equivalent to calling [`pop`] and then pushing
+ /// `file_name`. The new path will be a sibling of the original path.
+ /// (That is, it will have the same parent.)
+ ///
+ /// [`self.file_name`]: Path::file_name
+ /// [`pop`]: PathBuf::pop
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let mut buf = PathBuf::from("/");
+ /// assert!(buf.file_name() == None);
+ /// buf.set_file_name("bar");
+ /// assert!(buf == PathBuf::from("/bar"));
+ /// assert!(buf.file_name().is_some());
+ /// buf.set_file_name("baz.txt");
+ /// assert!(buf == PathBuf::from("/baz.txt"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set_file_name<S: AsRef<OsStr>>(&mut self, file_name: S) {
+ self._set_file_name(file_name.as_ref())
+ }
+
+ fn _set_file_name(&mut self, file_name: &OsStr) {
+ if self.file_name().is_some() {
+ let popped = self.pop();
+ debug_assert!(popped);
+ }
+ self.push(file_name);
+ }
+
+ /// Updates [`self.extension`] to `extension`.
+ ///
+ /// Returns `false` and does nothing if [`self.file_name`] is [`None`],
+ /// returns `true` and updates the extension otherwise.
+ ///
+ /// If [`self.extension`] is [`None`], the extension is added; otherwise
+ /// it is replaced.
+ ///
+ /// [`self.file_name`]: Path::file_name
+ /// [`self.extension`]: Path::extension
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let mut p = PathBuf::from("/feel/the");
+ ///
+ /// p.set_extension("force");
+ /// assert_eq!(Path::new("/feel/the.force"), p.as_path());
+ ///
+ /// p.set_extension("dark_side");
+ /// assert_eq!(Path::new("/feel/the.dark_side"), p.as_path());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set_extension<S: AsRef<OsStr>>(&mut self, extension: S) -> bool {
+ self._set_extension(extension.as_ref())
+ }
+
+ fn _set_extension(&mut self, extension: &OsStr) -> bool {
+ let file_stem = match self.file_stem() {
+ None => return false,
+ Some(f) => f.bytes(),
+ };
+
+ // truncate until right after the file stem
+ let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr();
+ let start = self.inner.bytes().as_ptr().addr();
+ let v = self.as_mut_vec();
+ v.truncate(end_file_stem.wrapping_sub(start));
+
+ // add the new extension, if any
+ let new = extension.bytes();
+ if !new.is_empty() {
+ v.reserve_exact(new.len() + 1);
+ v.push(b'.');
+ v.extend_from_slice(new);
+ }
+
+ true
+ }
+
+ /// Consumes the `PathBuf`, yielding its internal [`OsString`] storage.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::PathBuf;
+ ///
+ /// let p = PathBuf::from("/the/head");
+ /// let os_str = p.into_os_string();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_os_string(self) -> OsString {
+ self.inner
+ }
+
+ /// Converts this `PathBuf` into a [boxed](Box) [`Path`].
+ #[stable(feature = "into_boxed_path", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_boxed_path(self) -> Box<Path> {
+ let rw = Box::into_raw(self.inner.into_boxed_os_str()) as *mut Path;
+ unsafe { Box::from_raw(rw) }
+ }
+
+ /// Invokes [`capacity`] on the underlying instance of [`OsString`].
+ ///
+ /// [`capacity`]: OsString::capacity
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[must_use]
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ /// Invokes [`clear`] on the underlying instance of [`OsString`].
+ ///
+ /// [`clear`]: OsString::clear
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[inline]
+ pub fn clear(&mut self) {
+ self.inner.clear()
+ }
+
+ /// Invokes [`reserve`] on the underlying instance of [`OsString`].
+ ///
+ /// [`reserve`]: OsString::reserve
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ self.inner.reserve(additional)
+ }
+
+ /// Invokes [`try_reserve`] on the underlying instance of [`OsString`].
+ ///
+ /// [`try_reserve`]: OsString::try_reserve
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ #[inline]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve(additional)
+ }
+
+ /// Invokes [`reserve_exact`] on the underlying instance of [`OsString`].
+ ///
+ /// [`reserve_exact`]: OsString::reserve_exact
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[inline]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.inner.reserve_exact(additional)
+ }
+
+ /// Invokes [`try_reserve_exact`] on the underlying instance of [`OsString`].
+ ///
+ /// [`try_reserve_exact`]: OsString::try_reserve_exact
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ #[inline]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve_exact(additional)
+ }
+
+ /// Invokes [`shrink_to_fit`] on the underlying instance of [`OsString`].
+ ///
+ /// [`shrink_to_fit`]: OsString::shrink_to_fit
+ #[stable(feature = "path_buf_capacity", since = "1.44.0")]
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.inner.shrink_to_fit()
+ }
+
+ /// Invokes [`shrink_to`] on the underlying instance of [`OsString`].
+ ///
+ /// [`shrink_to`]: OsString::shrink_to
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ #[inline]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.inner.shrink_to(min_capacity)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for PathBuf {
+ #[inline]
+ fn clone(&self) -> Self {
+ PathBuf { inner: self.inner.clone() }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ self.inner.clone_from(&source.inner)
+ }
+}
+
+#[stable(feature = "box_from_path", since = "1.17.0")]
+impl From<&Path> for Box<Path> {
+ /// Creates a boxed [`Path`] from a reference.
+ ///
+ /// This will allocate and clone `path` to it.
+ fn from(path: &Path) -> Box<Path> {
+ let boxed: Box<OsStr> = path.inner.into();
+ let rw = Box::into_raw(boxed) as *mut Path;
+ unsafe { Box::from_raw(rw) }
+ }
+}
+
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, Path>> for Box<Path> {
+ /// Creates a boxed [`Path`] from a clone-on-write pointer.
+ ///
+ /// Converting from a `Cow::Owned` does not clone or allocate.
+ #[inline]
+ fn from(cow: Cow<'_, Path>) -> Box<Path> {
+ match cow {
+ Cow::Borrowed(path) => Box::from(path),
+ Cow::Owned(path) => Box::from(path),
+ }
+ }
+}
+
+#[stable(feature = "path_buf_from_box", since = "1.18.0")]
+impl From<Box<Path>> for PathBuf {
+ /// Converts a <code>[Box]&lt;[Path]&gt;</code> into a [`PathBuf`].
+ ///
+ /// This conversion does not allocate or copy memory.
+ #[inline]
+ fn from(boxed: Box<Path>) -> PathBuf {
+ boxed.into_path_buf()
+ }
+}
+
+#[stable(feature = "box_from_path_buf", since = "1.20.0")]
+impl From<PathBuf> for Box<Path> {
+ /// Converts a [`PathBuf`] into a <code>[Box]&lt;[Path]&gt;</code>.
+ ///
+ /// This conversion currently should not allocate memory,
+ /// but this behavior is not guaranteed on all platforms or in all future versions.
+ #[inline]
+ fn from(p: PathBuf) -> Box<Path> {
+ p.into_boxed_path()
+ }
+}
+
+#[stable(feature = "more_box_slice_clone", since = "1.29.0")]
+impl Clone for Box<Path> {
+ #[inline]
+ fn clone(&self) -> Self {
+ self.to_path_buf().into_boxed_path()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + AsRef<OsStr>> From<&T> for PathBuf {
+ /// Converts a borrowed [`OsStr`] to a [`PathBuf`].
+ ///
+ /// Allocates a [`PathBuf`] and copies the data into it.
+ #[inline]
+ fn from(s: &T) -> PathBuf {
+ PathBuf::from(s.as_ref().to_os_string())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<OsString> for PathBuf {
+ /// Converts an [`OsString`] into a [`PathBuf`]
+ ///
+ /// This conversion does not allocate or copy memory.
+ #[inline]
+ fn from(s: OsString) -> PathBuf {
+ PathBuf { inner: s }
+ }
+}
+
+#[stable(feature = "from_path_buf_for_os_string", since = "1.14.0")]
+impl From<PathBuf> for OsString {
+ /// Converts a [`PathBuf`] into an [`OsString`]
+ ///
+ /// This conversion does not allocate or copy memory.
+ #[inline]
+ fn from(path_buf: PathBuf) -> OsString {
+ path_buf.inner
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for PathBuf {
+ /// Converts a [`String`] into a [`PathBuf`]
+ ///
+ /// This conversion does not allocate or copy memory.
+ #[inline]
+ fn from(s: String) -> PathBuf {
+ PathBuf::from(OsString::from(s))
+ }
+}
+
+#[stable(feature = "path_from_str", since = "1.32.0")]
+impl FromStr for PathBuf {
+ type Err = core::convert::Infallible;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ Ok(PathBuf::from(s))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<P: AsRef<Path>> iter::FromIterator<P> for PathBuf {
+ fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> PathBuf {
+ let mut buf = PathBuf::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<P: AsRef<Path>> iter::Extend<P> for PathBuf {
+ fn extend<I: IntoIterator<Item = P>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |p| self.push(p.as_ref()));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, p: P) {
+ self.push(p.as_ref());
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for PathBuf {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, formatter)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for PathBuf {
+ type Target = Path;
+ #[inline]
+ fn deref(&self) -> &Path {
+ Path::new(&self.inner)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Borrow<Path> for PathBuf {
+ #[inline]
+ fn borrow(&self) -> &Path {
+ self.deref()
+ }
+}
+
+#[stable(feature = "default_for_pathbuf", since = "1.17.0")]
+impl Default for PathBuf {
+ #[inline]
+ fn default() -> Self {
+ PathBuf::new()
+ }
+}
+
+#[stable(feature = "cow_from_path", since = "1.6.0")]
+impl<'a> From<&'a Path> for Cow<'a, Path> {
+ /// Creates a clone-on-write pointer from a reference to
+ /// [`Path`].
+ ///
+ /// This conversion does not clone or allocate.
+ #[inline]
+ fn from(s: &'a Path) -> Cow<'a, Path> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[stable(feature = "cow_from_path", since = "1.6.0")]
+impl<'a> From<PathBuf> for Cow<'a, Path> {
+ /// Creates a clone-on-write pointer from an owned
+ /// instance of [`PathBuf`].
+ ///
+ /// This conversion does not clone or allocate.
+ #[inline]
+ fn from(s: PathBuf) -> Cow<'a, Path> {
+ Cow::Owned(s)
+ }
+}
+
+#[stable(feature = "cow_from_pathbuf_ref", since = "1.28.0")]
+impl<'a> From<&'a PathBuf> for Cow<'a, Path> {
+ /// Creates a clone-on-write pointer from a reference to
+ /// [`PathBuf`].
+ ///
+ /// This conversion does not clone or allocate.
+ #[inline]
+ fn from(p: &'a PathBuf) -> Cow<'a, Path> {
+ Cow::Borrowed(p.as_path())
+ }
+}
+
+#[stable(feature = "pathbuf_from_cow_path", since = "1.28.0")]
+impl<'a> From<Cow<'a, Path>> for PathBuf {
+ /// Converts a clone-on-write pointer to an owned path.
+ ///
+ /// Converting from a `Cow::Owned` does not clone or allocate.
+ #[inline]
+ fn from(p: Cow<'a, Path>) -> Self {
+ p.into_owned()
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<PathBuf> for Arc<Path> {
+ /// Converts a [`PathBuf`] into an <code>[Arc]<[Path]></code> by moving the [`PathBuf`] data
+ /// into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: PathBuf) -> Arc<Path> {
+ let arc: Arc<OsStr> = Arc::from(s.into_os_string());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Path) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&Path> for Arc<Path> {
+ /// Converts a [`Path`] into an [`Arc`] by copying the [`Path`] data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: &Path) -> Arc<Path> {
+ let arc: Arc<OsStr> = Arc::from(s.as_os_str());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Path) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<PathBuf> for Rc<Path> {
+ /// Converts a [`PathBuf`] into an <code>[Rc]<[Path]></code> by moving the [`PathBuf`] data into
+ /// a new [`Rc`] buffer.
+ #[inline]
+ fn from(s: PathBuf) -> Rc<Path> {
+ let rc: Rc<OsStr> = Rc::from(s.into_os_string());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Path) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&Path> for Rc<Path> {
+ /// Converts a [`Path`] into an [`Rc`] by copying the [`Path`] data into a new [`Rc`] buffer.
+ #[inline]
+ fn from(s: &Path) -> Rc<Path> {
+ let rc: Rc<OsStr> = Rc::from(s.as_os_str());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Path) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToOwned for Path {
+ type Owned = PathBuf;
+ #[inline]
+ fn to_owned(&self) -> PathBuf {
+ self.to_path_buf()
+ }
+ #[inline]
+ fn clone_into(&self, target: &mut PathBuf) {
+ self.inner.clone_into(&mut target.inner);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::PartialEq for PathBuf {
+ #[inline]
+ fn eq(&self, other: &PathBuf) -> bool {
+ self.components() == other.components()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Hash for PathBuf {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ self.as_path().hash(h)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Eq for PathBuf {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::PartialOrd for PathBuf {
+ #[inline]
+ fn partial_cmp(&self, other: &PathBuf) -> Option<cmp::Ordering> {
+ Some(compare_components(self.components(), other.components()))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Ord for PathBuf {
+ #[inline]
+ fn cmp(&self, other: &PathBuf) -> cmp::Ordering {
+ compare_components(self.components(), other.components())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for PathBuf {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ &self.inner[..]
+ }
+}
+
+/// A slice of a path (akin to [`str`]).
+///
+/// This type supports a number of operations for inspecting a path, including
+/// breaking the path into its components (separated by `/` on Unix and by either
+/// `/` or `\` on Windows), extracting the file name, determining whether the path
+/// is absolute, and so on.
+///
+/// This is an *unsized* type, meaning that it must always be used behind a
+/// pointer like `&` or [`Box`]. For an owned version of this type,
+/// see [`PathBuf`].
+///
+/// More details about the overall approach can be found in
+/// the [module documentation](self).
+///
+/// # Examples
+///
+/// ```
+/// use std::path::Path;
+/// use std::ffi::OsStr;
+///
+/// // Note: this example does work on Windows
+/// let path = Path::new("./foo/bar.txt");
+///
+/// let parent = path.parent();
+/// assert_eq!(parent, Some(Path::new("./foo")));
+///
+/// let file_stem = path.file_stem();
+/// assert_eq!(file_stem, Some(OsStr::new("bar")));
+///
+/// let extension = path.extension();
+/// assert_eq!(extension, Some(OsStr::new("txt")));
+/// ```
+#[cfg_attr(not(test), rustc_diagnostic_item = "Path")]
+#[stable(feature = "rust1", since = "1.0.0")]
+// FIXME:
+// `Path::new` current implementation relies
+// on `Path` being layout-compatible with `OsStr`.
+// When attribute privacy is implemented, `Path` should be annotated as `#[repr(transparent)]`.
+// Anyway, `Path` representation and layout are considered implementation detail, are
+// not documented and must not be relied upon.
+pub struct Path {
+ inner: OsStr,
+}
+
+/// An error returned from [`Path::strip_prefix`] if the prefix was not found.
+///
+/// This `struct` is created by the [`strip_prefix`] method on [`Path`].
+/// See its documentation for more.
+///
+/// [`strip_prefix`]: Path::strip_prefix
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(since = "1.7.0", feature = "strip_prefix")]
+pub struct StripPrefixError(());
+
+impl Path {
+ // The following (private!) function allows construction of a path from a u8
+ // slice, which is only safe when it is known to follow the OsStr encoding.
+ unsafe fn from_u8_slice(s: &[u8]) -> &Path {
+ unsafe { Path::new(u8_slice_as_os_str(s)) }
+ }
+ // The following (private!) function reveals the byte encoding used for OsStr.
+ fn as_u8_slice(&self) -> &[u8] {
+ self.inner.bytes()
+ }
+
+ /// Directly wraps a string slice as a `Path` slice.
+ ///
+ /// This is a cost-free conversion.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// Path::new("foo.txt");
+ /// ```
+ ///
+ /// You can create `Path`s from `String`s, or even other `Path`s:
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let string = String::from("foo.txt");
+ /// let from_string = Path::new(&string);
+ /// let from_path = Path::new(&from_string);
+ /// assert_eq!(from_string, from_path);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new<S: AsRef<OsStr> + ?Sized>(s: &S) -> &Path {
+ unsafe { &*(s.as_ref() as *const OsStr as *const Path) }
+ }
+
+ /// Yields the underlying [`OsStr`] slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let os_str = Path::new("foo.txt").as_os_str();
+ /// assert_eq!(os_str, std::ffi::OsStr::new("foo.txt"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn as_os_str(&self) -> &OsStr {
+ &self.inner
+ }
+
+ /// Yields a [`&str`] slice if the `Path` is valid unicode.
+ ///
+ /// This conversion may entail doing a check for UTF-8 validity.
+ /// Note that validation is performed because non-UTF-8 strings are
+ /// perfectly valid for some OS.
+ ///
+ /// [`&str`]: str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("foo.txt");
+ /// assert_eq!(path.to_str(), Some("foo.txt"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub fn to_str(&self) -> Option<&str> {
+ self.inner.to_str()
+ }
+
+ /// Converts a `Path` to a [`Cow<str>`].
+ ///
+ /// Any non-Unicode sequences are replaced with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD].
+ ///
+ /// [U+FFFD]: super::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on a `Path` with valid unicode:
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("foo.txt");
+ /// assert_eq!(path.to_string_lossy(), "foo.txt");
+ /// ```
+ ///
+ /// Had `path` contained invalid unicode, the `to_string_lossy` call might
+ /// have returned `"fo�.txt"`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ self.inner.to_string_lossy()
+ }
+
+ /// Converts a `Path` to an owned [`PathBuf`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path_buf = Path::new("foo.txt").to_path_buf();
+ /// assert_eq!(path_buf, std::path::PathBuf::from("foo.txt"));
+ /// ```
+ #[rustc_conversion_suggestion]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_path_buf(&self) -> PathBuf {
+ PathBuf::from(self.inner.to_os_string())
+ }
+
+ /// Returns `true` if the `Path` is absolute, i.e., if it is independent of
+ /// the current directory.
+ ///
+ /// * On Unix, a path is absolute if it starts with the root, so
+ /// `is_absolute` and [`has_root`] are equivalent.
+ ///
+ /// * On Windows, a path is absolute if it has a prefix and starts with the
+ /// root: `c:\windows` is absolute, while `c:temp` and `\temp` are not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// assert!(!Path::new("foo.txt").is_absolute());
+ /// ```
+ ///
+ /// [`has_root`]: Path::has_root
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[allow(deprecated)]
+ pub fn is_absolute(&self) -> bool {
+ if cfg!(target_os = "redox") {
+ // FIXME: Allow Redox prefixes
+ self.has_root() || has_redox_scheme(self.as_u8_slice())
+ } else {
+ self.has_root() && (cfg!(any(unix, target_os = "wasi")) || self.prefix().is_some())
+ }
+ }
+
+ /// Returns `true` if the `Path` is relative, i.e., not absolute.
+ ///
+ /// See [`is_absolute`]'s documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// assert!(Path::new("foo.txt").is_relative());
+ /// ```
+ ///
+ /// [`is_absolute`]: Path::is_absolute
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn is_relative(&self) -> bool {
+ !self.is_absolute()
+ }
+
+ fn prefix(&self) -> Option<Prefix<'_>> {
+ self.components().prefix
+ }
+
+ /// Returns `true` if the `Path` has a root.
+ ///
+ /// * On Unix, a path has a root if it begins with `/`.
+ ///
+ /// * On Windows, a path has a root if it:
+ /// * has no prefix and begins with a separator, e.g., `\windows`
+ /// * has a prefix followed by a separator, e.g., `c:\windows` but not `c:windows`
+ /// * has any non-disk prefix, e.g., `\\server\share`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// assert!(Path::new("/etc/passwd").has_root());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ #[inline]
+ pub fn has_root(&self) -> bool {
+ self.components().has_root()
+ }
+
+ /// Returns the `Path` without its final component, if there is one.
+ ///
+ /// Returns [`None`] if the path terminates in a root or prefix.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/foo/bar");
+ /// let parent = path.parent().unwrap();
+ /// assert_eq!(parent, Path::new("/foo"));
+ ///
+ /// let grand_parent = parent.parent().unwrap();
+ /// assert_eq!(grand_parent, Path::new("/"));
+ /// assert_eq!(grand_parent.parent(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn parent(&self) -> Option<&Path> {
+ let mut comps = self.components();
+ let comp = comps.next_back();
+ comp.and_then(|p| match p {
+ Component::Normal(_) | Component::CurDir | Component::ParentDir => {
+ Some(comps.as_path())
+ }
+ _ => None,
+ })
+ }
+
+ /// Produces an iterator over `Path` and its ancestors.
+ ///
+ /// The iterator will yield the `Path` that is returned if the [`parent`] method is used zero
+ /// or more times. That means, the iterator will yield `&self`, `&self.parent().unwrap()`,
+ /// `&self.parent().unwrap().parent().unwrap()` and so on. If the [`parent`] method returns
+ /// [`None`], the iterator will do likewise. The iterator will always yield at least one value,
+ /// namely `&self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let mut ancestors = Path::new("/foo/bar").ancestors();
+ /// assert_eq!(ancestors.next(), Some(Path::new("/foo/bar")));
+ /// assert_eq!(ancestors.next(), Some(Path::new("/foo")));
+ /// assert_eq!(ancestors.next(), Some(Path::new("/")));
+ /// assert_eq!(ancestors.next(), None);
+ ///
+ /// let mut ancestors = Path::new("../foo/bar").ancestors();
+ /// assert_eq!(ancestors.next(), Some(Path::new("../foo/bar")));
+ /// assert_eq!(ancestors.next(), Some(Path::new("../foo")));
+ /// assert_eq!(ancestors.next(), Some(Path::new("..")));
+ /// assert_eq!(ancestors.next(), Some(Path::new("")));
+ /// assert_eq!(ancestors.next(), None);
+ /// ```
+ ///
+ /// [`parent`]: Path::parent
+ #[stable(feature = "path_ancestors", since = "1.28.0")]
+ #[inline]
+ pub fn ancestors(&self) -> Ancestors<'_> {
+ Ancestors { next: Some(&self) }
+ }
+
+ /// Returns the final component of the `Path`, if there is one.
+ ///
+ /// If the path is a normal file, this is the file name. If it's the path of a directory, this
+ /// is the directory name.
+ ///
+ /// Returns [`None`] if the path terminates in `..`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ /// use std::ffi::OsStr;
+ ///
+ /// assert_eq!(Some(OsStr::new("bin")), Path::new("/usr/bin/").file_name());
+ /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("tmp/foo.txt").file_name());
+ /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.").file_name());
+ /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.//").file_name());
+ /// assert_eq!(None, Path::new("foo.txt/..").file_name());
+ /// assert_eq!(None, Path::new("/").file_name());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn file_name(&self) -> Option<&OsStr> {
+ self.components().next_back().and_then(|p| match p {
+ Component::Normal(p) => Some(p),
+ _ => None,
+ })
+ }
+
+ /// Returns a path that, when joined onto `base`, yields `self`.
+ ///
+ /// # Errors
+ ///
+ /// If `base` is not a prefix of `self` (i.e., [`starts_with`]
+ /// returns `false`), returns [`Err`].
+ ///
+ /// [`starts_with`]: Path::starts_with
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let path = Path::new("/test/haha/foo.txt");
+ ///
+ /// assert_eq!(path.strip_prefix("/"), Ok(Path::new("test/haha/foo.txt")));
+ /// assert_eq!(path.strip_prefix("/test"), Ok(Path::new("haha/foo.txt")));
+ /// assert_eq!(path.strip_prefix("/test/"), Ok(Path::new("haha/foo.txt")));
+ /// assert_eq!(path.strip_prefix("/test/haha/foo.txt"), Ok(Path::new("")));
+ /// assert_eq!(path.strip_prefix("/test/haha/foo.txt/"), Ok(Path::new("")));
+ ///
+ /// assert!(path.strip_prefix("test").is_err());
+ /// assert!(path.strip_prefix("/haha").is_err());
+ ///
+ /// let prefix = PathBuf::from("/test/");
+ /// assert_eq!(path.strip_prefix(prefix), Ok(Path::new("haha/foo.txt")));
+ /// ```
+ #[stable(since = "1.7.0", feature = "path_strip_prefix")]
+ pub fn strip_prefix<P>(&self, base: P) -> Result<&Path, StripPrefixError>
+ where
+ P: AsRef<Path>,
+ {
+ self._strip_prefix(base.as_ref())
+ }
+
+ fn _strip_prefix(&self, base: &Path) -> Result<&Path, StripPrefixError> {
+ iter_after(self.components(), base.components())
+ .map(|c| c.as_path())
+ .ok_or(StripPrefixError(()))
+ }
+
+ /// Determines whether `base` is a prefix of `self`.
+ ///
+ /// Only considers whole path components to match.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/etc/passwd");
+ ///
+ /// assert!(path.starts_with("/etc"));
+ /// assert!(path.starts_with("/etc/"));
+ /// assert!(path.starts_with("/etc/passwd"));
+ /// assert!(path.starts_with("/etc/passwd/")); // extra slash is okay
+ /// assert!(path.starts_with("/etc/passwd///")); // multiple extra slashes are okay
+ ///
+ /// assert!(!path.starts_with("/e"));
+ /// assert!(!path.starts_with("/etc/passwd.txt"));
+ ///
+ /// assert!(!Path::new("/etc/foo.rs").starts_with("/etc/foo"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn starts_with<P: AsRef<Path>>(&self, base: P) -> bool {
+ self._starts_with(base.as_ref())
+ }
+
+ fn _starts_with(&self, base: &Path) -> bool {
+ iter_after(self.components(), base.components()).is_some()
+ }
+
+ /// Determines whether `child` is a suffix of `self`.
+ ///
+ /// Only considers whole path components to match.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/etc/resolv.conf");
+ ///
+ /// assert!(path.ends_with("resolv.conf"));
+ /// assert!(path.ends_with("etc/resolv.conf"));
+ /// assert!(path.ends_with("/etc/resolv.conf"));
+ ///
+ /// assert!(!path.ends_with("/resolv.conf"));
+ /// assert!(!path.ends_with("conf")); // use .extension() instead
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn ends_with<P: AsRef<Path>>(&self, child: P) -> bool {
+ self._ends_with(child.as_ref())
+ }
+
+ fn _ends_with(&self, child: &Path) -> bool {
+ iter_after(self.components().rev(), child.components().rev()).is_some()
+ }
+
+ /// Extracts the stem (non-extension) portion of [`self.file_name`].
+ ///
+ /// [`self.file_name`]: Path::file_name
+ ///
+ /// The stem is:
+ ///
+ /// * [`None`], if there is no file name;
+ /// * The entire file name if there is no embedded `.`;
+ /// * The entire file name if the file name begins with `.` and has no other `.`s within;
+ /// * Otherwise, the portion of the file name before the final `.`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// assert_eq!("foo", Path::new("foo.rs").file_stem().unwrap());
+ /// assert_eq!("foo.tar", Path::new("foo.tar.gz").file_stem().unwrap());
+ /// ```
+ ///
+ /// # See Also
+ /// This method is similar to [`Path::file_prefix`], which extracts the portion of the file name
+ /// before the *first* `.`
+ ///
+ /// [`Path::file_prefix`]: Path::file_prefix
+ ///
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn file_stem(&self) -> Option<&OsStr> {
+ self.file_name().map(rsplit_file_at_dot).and_then(|(before, after)| before.or(after))
+ }
+
+ /// Extracts the prefix of [`self.file_name`].
+ ///
+ /// The prefix is:
+ ///
+ /// * [`None`], if there is no file name;
+ /// * The entire file name if there is no embedded `.`;
+ /// * The portion of the file name before the first non-beginning `.`;
+ /// * The entire file name if the file name begins with `.` and has no other `.`s within;
+ /// * The portion of the file name before the second `.` if the file name begins with `.`
+ ///
+ /// [`self.file_name`]: Path::file_name
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(path_file_prefix)]
+ /// use std::path::Path;
+ ///
+ /// assert_eq!("foo", Path::new("foo.rs").file_prefix().unwrap());
+ /// assert_eq!("foo", Path::new("foo.tar.gz").file_prefix().unwrap());
+ /// ```
+ ///
+ /// # See Also
+ /// This method is similar to [`Path::file_stem`], which extracts the portion of the file name
+ /// before the *last* `.`
+ ///
+ /// [`Path::file_stem`]: Path::file_stem
+ ///
+ #[unstable(feature = "path_file_prefix", issue = "86319")]
+ #[must_use]
+ pub fn file_prefix(&self) -> Option<&OsStr> {
+ self.file_name().map(split_file_at_dot).and_then(|(before, _after)| Some(before))
+ }
+
+ /// Extracts the extension of [`self.file_name`], if possible.
+ ///
+ /// The extension is:
+ ///
+ /// * [`None`], if there is no file name;
+ /// * [`None`], if there is no embedded `.`;
+ /// * [`None`], if the file name begins with `.` and has no other `.`s within;
+ /// * Otherwise, the portion of the file name after the final `.`
+ ///
+ /// [`self.file_name`]: Path::file_name
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// assert_eq!("rs", Path::new("foo.rs").extension().unwrap());
+ /// assert_eq!("gz", Path::new("foo.tar.gz").extension().unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn extension(&self) -> Option<&OsStr> {
+ self.file_name().map(rsplit_file_at_dot).and_then(|(before, after)| before.and(after))
+ }
+
+ /// Creates an owned [`PathBuf`] with `path` adjoined to `self`.
+ ///
+ /// See [`PathBuf::push`] for more details on what it means to adjoin a path.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// assert_eq!(Path::new("/etc").join("passwd"), PathBuf::from("/etc/passwd"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn join<P: AsRef<Path>>(&self, path: P) -> PathBuf {
+ self._join(path.as_ref())
+ }
+
+ fn _join(&self, path: &Path) -> PathBuf {
+ let mut buf = self.to_path_buf();
+ buf.push(path);
+ buf
+ }
+
+ /// Creates an owned [`PathBuf`] like `self` but with the given file name.
+ ///
+ /// See [`PathBuf::set_file_name`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let path = Path::new("/tmp/foo.txt");
+ /// assert_eq!(path.with_file_name("bar.txt"), PathBuf::from("/tmp/bar.txt"));
+ ///
+ /// let path = Path::new("/tmp");
+ /// assert_eq!(path.with_file_name("var"), PathBuf::from("/var"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_file_name<S: AsRef<OsStr>>(&self, file_name: S) -> PathBuf {
+ self._with_file_name(file_name.as_ref())
+ }
+
+ fn _with_file_name(&self, file_name: &OsStr) -> PathBuf {
+ let mut buf = self.to_path_buf();
+ buf.set_file_name(file_name);
+ buf
+ }
+
+ /// Creates an owned [`PathBuf`] like `self` but with the given extension.
+ ///
+ /// See [`PathBuf::set_extension`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let path = Path::new("foo.rs");
+ /// assert_eq!(path.with_extension("txt"), PathBuf::from("foo.txt"));
+ ///
+ /// let path = Path::new("foo.tar.gz");
+ /// assert_eq!(path.with_extension(""), PathBuf::from("foo.tar"));
+ /// assert_eq!(path.with_extension("xz"), PathBuf::from("foo.tar.xz"));
+ /// assert_eq!(path.with_extension("").with_extension("txt"), PathBuf::from("foo.txt"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with_extension<S: AsRef<OsStr>>(&self, extension: S) -> PathBuf {
+ self._with_extension(extension.as_ref())
+ }
+
+ fn _with_extension(&self, extension: &OsStr) -> PathBuf {
+ let mut buf = self.to_path_buf();
+ buf.set_extension(extension);
+ buf
+ }
+
+ /// Produces an iterator over the [`Component`]s of the path.
+ ///
+ /// When parsing the path, there is a small amount of normalization:
+ ///
+ /// * Repeated separators are ignored, so `a/b` and `a//b` both have
+ /// `a` and `b` as components.
+ ///
+ /// * Occurrences of `.` are normalized away, except if they are at the
+ /// beginning of the path. For example, `a/./b`, `a/b/`, `a/b/.` and
+ /// `a/b` all have `a` and `b` as components, but `./a/b` starts with
+ /// an additional [`CurDir`] component.
+ ///
+ /// * A trailing slash is normalized away, `/a/b` and `/a/b/` are equivalent.
+ ///
+ /// Note that no other normalization takes place; in particular, `a/c`
+ /// and `a/b/../c` are distinct, to account for the possibility that `b`
+ /// is a symbolic link (so its parent isn't `a`).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{Path, Component};
+ /// use std::ffi::OsStr;
+ ///
+ /// let mut components = Path::new("/tmp/foo.txt").components();
+ ///
+ /// assert_eq!(components.next(), Some(Component::RootDir));
+ /// assert_eq!(components.next(), Some(Component::Normal(OsStr::new("tmp"))));
+ /// assert_eq!(components.next(), Some(Component::Normal(OsStr::new("foo.txt"))));
+ /// assert_eq!(components.next(), None)
+ /// ```
+ ///
+ /// [`CurDir`]: Component::CurDir
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn components(&self) -> Components<'_> {
+ let prefix = parse_prefix(self.as_os_str());
+ Components {
+ path: self.as_u8_slice(),
+ prefix,
+ has_physical_root: has_physical_root(self.as_u8_slice(), prefix)
+ || has_redox_scheme(self.as_u8_slice()),
+ front: State::Prefix,
+ back: State::Body,
+ }
+ }
+
+ /// Produces an iterator over the path's components viewed as [`OsStr`]
+ /// slices.
+ ///
+ /// For more information about the particulars of how the path is separated
+ /// into components, see [`components`].
+ ///
+ /// [`components`]: Path::components
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::{self, Path};
+ /// use std::ffi::OsStr;
+ ///
+ /// let mut it = Path::new("/tmp/foo.txt").iter();
+ /// assert_eq!(it.next(), Some(OsStr::new(&path::MAIN_SEPARATOR.to_string())));
+ /// assert_eq!(it.next(), Some(OsStr::new("tmp")));
+ /// assert_eq!(it.next(), Some(OsStr::new("foo.txt")));
+ /// assert_eq!(it.next(), None)
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn iter(&self) -> Iter<'_> {
+ Iter { inner: self.components() }
+ }
+
+ /// Returns an object that implements [`Display`] for safely printing paths
+ /// that may contain non-Unicode data. This may perform lossy conversion,
+ /// depending on the platform. If you would like an implementation which
+ /// escapes the path please use [`Debug`] instead.
+ ///
+ /// [`Display`]: fmt::Display
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/tmp/foo.rs");
+ ///
+ /// println!("{}", path.display());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this does not display the path, \
+ it returns an object that can be displayed"]
+ #[inline]
+ pub fn display(&self) -> Display<'_> {
+ Display { path: self }
+ }
+
+ /// Queries the file system to get information about a file, directory, etc.
+ ///
+ /// This function will traverse symbolic links to query information about the
+ /// destination file.
+ ///
+ /// This is an alias to [`fs::metadata`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/Minas/tirith");
+ /// let metadata = path.metadata().expect("metadata call failed");
+ /// println!("{:?}", metadata.file_type());
+ /// ```
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[inline]
+ pub fn metadata(&self) -> io::Result<fs::Metadata> {
+ fs::metadata(self)
+ }
+
+ /// Queries the metadata about a file without following symlinks.
+ ///
+ /// This is an alias to [`fs::symlink_metadata`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/Minas/tirith");
+ /// let metadata = path.symlink_metadata().expect("symlink_metadata call failed");
+ /// println!("{:?}", metadata.file_type());
+ /// ```
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[inline]
+ pub fn symlink_metadata(&self) -> io::Result<fs::Metadata> {
+ fs::symlink_metadata(self)
+ }
+
+ /// Returns the canonical, absolute form of the path with all intermediate
+ /// components normalized and symbolic links resolved.
+ ///
+ /// This is an alias to [`fs::canonicalize`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let path = Path::new("/foo/test/../test/bar.rs");
+ /// assert_eq!(path.canonicalize().unwrap(), PathBuf::from("/foo/test/bar.rs"));
+ /// ```
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[inline]
+ pub fn canonicalize(&self) -> io::Result<PathBuf> {
+ fs::canonicalize(self)
+ }
+
+ /// Reads a symbolic link, returning the file that the link points to.
+ ///
+ /// This is an alias to [`fs::read_link`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/laputa/sky_castle.rs");
+ /// let path_link = path.read_link().expect("read_link call failed");
+ /// ```
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[inline]
+ pub fn read_link(&self) -> io::Result<PathBuf> {
+ fs::read_link(self)
+ }
+
+ /// Returns an iterator over the entries within a directory.
+ ///
+ /// The iterator will yield instances of <code>[io::Result]<[fs::DirEntry]></code>. New
+ /// errors may be encountered after an iterator is initially constructed.
+ ///
+ /// This is an alias to [`fs::read_dir`].
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ ///
+ /// let path = Path::new("/laputa");
+ /// for entry in path.read_dir().expect("read_dir call failed") {
+ /// if let Ok(entry) = entry {
+ /// println!("{:?}", entry.path());
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[inline]
+ pub fn read_dir(&self) -> io::Result<fs::ReadDir> {
+ fs::read_dir(self)
+ }
+
+ /// Returns `true` if the path points at an existing entity.
+ ///
+ /// Warning: this method may be error-prone, consider using [`try_exists()`] instead!
+ /// It also has a risk of introducing time-of-check to time-of-use (TOCTOU) bugs.
+ ///
+ /// This function will traverse symbolic links to query information about the
+ /// destination file.
+ ///
+ /// If you cannot access the metadata of the file, e.g. because of a
+ /// permission error or broken symbolic links, this will return `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ /// assert!(!Path::new("does_not_exist.txt").exists());
+ /// ```
+ ///
+ /// # See Also
+ ///
+ /// This is a convenience function that coerces errors to false. If you want to
+ /// check errors, call [`Path::try_exists`].
+ ///
+ /// [`try_exists()`]: Self::try_exists
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[must_use]
+ #[inline]
+ pub fn exists(&self) -> bool {
+ fs::metadata(self).is_ok()
+ }
+
+ /// Returns `Ok(true)` if the path points at an existing entity.
+ ///
+ /// This function will traverse symbolic links to query information about the
+ /// destination file. In case of broken symbolic links this will return `Ok(false)`.
+ ///
+ /// As opposed to the [`exists()`] method, this one doesn't silently ignore errors
+ /// unrelated to the path not existing. (E.g. it will return `Err(_)` in case of permission
+ /// denied on some of the parent directories.)
+ ///
+ /// Note that while this avoids some pitfalls of the `exists()` method, it still can not
+ /// prevent time-of-check to time-of-use (TOCTOU) bugs. You should only use it in scenarios
+ /// where those bugs are not an issue.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ /// assert!(!Path::new("does_not_exist.txt").try_exists().expect("Can't check existence of file does_not_exist.txt"));
+ /// assert!(Path::new("/root/secret_file.txt").try_exists().is_err());
+ /// ```
+ ///
+ /// [`exists()`]: Self::exists
+ #[stable(feature = "path_try_exists", since = "1.63.0")]
+ #[inline]
+ pub fn try_exists(&self) -> io::Result<bool> {
+ fs::try_exists(self)
+ }
+
+ /// Returns `true` if the path exists on disk and is pointing at a regular file.
+ ///
+ /// This function will traverse symbolic links to query information about the
+ /// destination file.
+ ///
+ /// If you cannot access the metadata of the file, e.g. because of a
+ /// permission error or broken symbolic links, this will return `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ /// assert_eq!(Path::new("./is_a_directory/").is_file(), false);
+ /// assert_eq!(Path::new("a_file.txt").is_file(), true);
+ /// ```
+ ///
+ /// # See Also
+ ///
+ /// This is a convenience function that coerces errors to false. If you want to
+ /// check errors, call [`fs::metadata`] and handle its [`Result`]. Then call
+ /// [`fs::Metadata::is_file`] if it was [`Ok`].
+ ///
+ /// When the goal is simply to read from (or write to) the source, the most
+ /// reliable way to test the source can be read (or written to) is to open
+ /// it. Only using `is_file` can break workflows like `diff <( prog_a )` on
+ /// a Unix-like system for example. See [`fs::File::open`] or
+ /// [`fs::OpenOptions::open`] for more information.
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[must_use]
+ pub fn is_file(&self) -> bool {
+ fs::metadata(self).map(|m| m.is_file()).unwrap_or(false)
+ }
+
+ /// Returns `true` if the path exists on disk and is pointing at a directory.
+ ///
+ /// This function will traverse symbolic links to query information about the
+ /// destination file.
+ ///
+ /// If you cannot access the metadata of the file, e.g. because of a
+ /// permission error or broken symbolic links, this will return `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::path::Path;
+ /// assert_eq!(Path::new("./is_a_directory/").is_dir(), true);
+ /// assert_eq!(Path::new("a_file.txt").is_dir(), false);
+ /// ```
+ ///
+ /// # See Also
+ ///
+ /// This is a convenience function that coerces errors to false. If you want to
+ /// check errors, call [`fs::metadata`] and handle its [`Result`]. Then call
+ /// [`fs::Metadata::is_dir`] if it was [`Ok`].
+ #[stable(feature = "path_ext", since = "1.5.0")]
+ #[must_use]
+ pub fn is_dir(&self) -> bool {
+ fs::metadata(self).map(|m| m.is_dir()).unwrap_or(false)
+ }
+
+ /// Returns `true` if the path exists on disk and is pointing at a symbolic link.
+ ///
+ /// This function will not traverse symbolic links.
+ /// In case of a broken symbolic link this will also return true.
+ ///
+ /// If you cannot access the directory containing the file, e.g., because of a
+ /// permission error, this will return false.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(unix, doc = "```no_run")]
+ #[cfg_attr(not(unix), doc = "```ignore")]
+ /// use std::path::Path;
+ /// use std::os::unix::fs::symlink;
+ ///
+ /// let link_path = Path::new("link");
+ /// symlink("/origin_does_not_exist/", link_path).unwrap();
+ /// assert_eq!(link_path.is_symlink(), true);
+ /// assert_eq!(link_path.exists(), false);
+ /// ```
+ ///
+ /// # See Also
+ ///
+ /// This is a convenience function that coerces errors to false. If you want to
+ /// check errors, call [`fs::symlink_metadata`] and handle its [`Result`]. Then call
+ /// [`fs::Metadata::is_symlink`] if it was [`Ok`].
+ #[must_use]
+ #[stable(feature = "is_symlink", since = "1.58.0")]
+ pub fn is_symlink(&self) -> bool {
+ fs::symlink_metadata(self).map(|m| m.is_symlink()).unwrap_or(false)
+ }
+
+ /// Converts a [`Box<Path>`](Box) into a [`PathBuf`] without copying or
+ /// allocating.
+ #[stable(feature = "into_boxed_path", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ pub fn into_path_buf(self: Box<Path>) -> PathBuf {
+ let rw = Box::into_raw(self) as *mut OsStr;
+ let inner = unsafe { Box::from_raw(rw) };
+ PathBuf { inner: OsString::from(inner) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<OsStr> for Path {
+ #[inline]
+ fn as_ref(&self) -> &OsStr {
+ &self.inner
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Path {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, formatter)
+ }
+}
+
+/// Helper struct for safely printing paths with [`format!`] and `{}`.
+///
+/// A [`Path`] might contain non-Unicode data. This `struct` implements the
+/// [`Display`] trait in a way that mitigates that. It is created by the
+/// [`display`](Path::display) method on [`Path`]. This may perform lossy
+/// conversion, depending on the platform. If you would like an implementation
+/// which escapes the path please use [`Debug`] instead.
+///
+/// # Examples
+///
+/// ```
+/// use std::path::Path;
+///
+/// let path = Path::new("/tmp/foo.rs");
+///
+/// println!("{}", path.display());
+/// ```
+///
+/// [`Display`]: fmt::Display
+/// [`format!`]: crate::format
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Display<'a> {
+ path: &'a Path,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Display<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.path, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Display<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.path.inner.display(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::PartialEq for Path {
+ #[inline]
+ fn eq(&self, other: &Path) -> bool {
+ self.components() == other.components()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Hash for Path {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ let bytes = self.as_u8_slice();
+ let (prefix_len, verbatim) = match parse_prefix(&self.inner) {
+ Some(prefix) => {
+ prefix.hash(h);
+ (prefix.len(), prefix.is_verbatim())
+ }
+ None => (0, false),
+ };
+ let bytes = &bytes[prefix_len..];
+
+ let mut component_start = 0;
+ let mut bytes_hashed = 0;
+
+ for i in 0..bytes.len() {
+ let is_sep = if verbatim { is_verbatim_sep(bytes[i]) } else { is_sep_byte(bytes[i]) };
+ if is_sep {
+ if i > component_start {
+ let to_hash = &bytes[component_start..i];
+ h.write(to_hash);
+ bytes_hashed += to_hash.len();
+ }
+
+ // skip over separator and optionally a following CurDir item
+ // since components() would normalize these away.
+ component_start = i + 1;
+
+ let tail = &bytes[component_start..];
+
+ if !verbatim {
+ component_start += match tail {
+ [b'.'] => 1,
+ [b'.', sep @ _, ..] if is_sep_byte(*sep) => 1,
+ _ => 0,
+ };
+ }
+ }
+ }
+
+ if component_start < bytes.len() {
+ let to_hash = &bytes[component_start..];
+ h.write(to_hash);
+ bytes_hashed += to_hash.len();
+ }
+
+ h.write_usize(bytes_hashed);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Eq for Path {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::PartialOrd for Path {
+ #[inline]
+ fn partial_cmp(&self, other: &Path) -> Option<cmp::Ordering> {
+ Some(compare_components(self.components(), other.components()))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl cmp::Ord for Path {
+ #[inline]
+ fn cmp(&self, other: &Path) -> cmp::Ordering {
+ compare_components(self.components(), other.components())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for Path {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for OsStr {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ Path::new(self)
+ }
+}
+
+#[stable(feature = "cow_os_str_as_ref_path", since = "1.8.0")]
+impl AsRef<Path> for Cow<'_, OsStr> {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ Path::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for OsString {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ Path::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for str {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ Path::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for String {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ Path::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<Path> for PathBuf {
+ #[inline]
+ fn as_ref(&self) -> &Path {
+ self
+ }
+}
+
+#[stable(feature = "path_into_iter", since = "1.6.0")]
+impl<'a> IntoIterator for &'a PathBuf {
+ type Item = &'a OsStr;
+ type IntoIter = Iter<'a>;
+ #[inline]
+ fn into_iter(self) -> Iter<'a> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "path_into_iter", since = "1.6.0")]
+impl<'a> IntoIterator for &'a Path {
+ type Item = &'a OsStr;
+ type IntoIter = Iter<'a>;
+ #[inline]
+ fn into_iter(self) -> Iter<'a> {
+ self.iter()
+ }
+}
+
+macro_rules! impl_cmp {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "partialeq_path", since = "1.6.0")]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ <Path as PartialEq>::eq(self, other)
+ }
+ }
+
+ #[stable(feature = "partialeq_path", since = "1.6.0")]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ <Path as PartialEq>::eq(self, other)
+ }
+ }
+
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$rhs> for $lhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
+ <Path as PartialOrd>::partial_cmp(self, other)
+ }
+ }
+
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$lhs> for $rhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
+ <Path as PartialOrd>::partial_cmp(self, other)
+ }
+ }
+ };
+}
+
+impl_cmp!(PathBuf, Path);
+impl_cmp!(PathBuf, &'a Path);
+impl_cmp!(Cow<'a, Path>, Path);
+impl_cmp!(Cow<'a, Path>, &'b Path);
+impl_cmp!(Cow<'a, Path>, PathBuf);
+
+macro_rules! impl_cmp_os_str {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ <Path as PartialEq>::eq(self, other.as_ref())
+ }
+ }
+
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ <Path as PartialEq>::eq(self.as_ref(), other)
+ }
+ }
+
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$rhs> for $lhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
+ <Path as PartialOrd>::partial_cmp(self, other.as_ref())
+ }
+ }
+
+ #[stable(feature = "cmp_path", since = "1.8.0")]
+ impl<'a, 'b> PartialOrd<$lhs> for $rhs {
+ #[inline]
+ fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
+ <Path as PartialOrd>::partial_cmp(self.as_ref(), other)
+ }
+ }
+ };
+}
+
+impl_cmp_os_str!(PathBuf, OsStr);
+impl_cmp_os_str!(PathBuf, &'a OsStr);
+impl_cmp_os_str!(PathBuf, Cow<'a, OsStr>);
+impl_cmp_os_str!(PathBuf, OsString);
+impl_cmp_os_str!(Path, OsStr);
+impl_cmp_os_str!(Path, &'a OsStr);
+impl_cmp_os_str!(Path, Cow<'a, OsStr>);
+impl_cmp_os_str!(Path, OsString);
+impl_cmp_os_str!(&'a Path, OsStr);
+impl_cmp_os_str!(&'a Path, Cow<'b, OsStr>);
+impl_cmp_os_str!(&'a Path, OsString);
+impl_cmp_os_str!(Cow<'a, Path>, OsStr);
+impl_cmp_os_str!(Cow<'a, Path>, &'b OsStr);
+impl_cmp_os_str!(Cow<'a, Path>, OsString);
+
+#[stable(since = "1.7.0", feature = "strip_prefix")]
+impl fmt::Display for StripPrefixError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.description().fmt(f)
+ }
+}
+
+#[stable(since = "1.7.0", feature = "strip_prefix")]
+impl Error for StripPrefixError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "prefix not found"
+ }
+}
+
+/// Makes the path absolute without accessing the filesystem.
+///
+/// If the path is relative, the current directory is used as the base directory.
+/// All intermediate components will be resolved according to platforms-specific
+/// rules but unlike [`canonicalize`][crate::fs::canonicalize] this does not
+/// resolve symlinks and may succeed even if the path does not exist.
+///
+/// If the `path` is empty or getting the
+/// [current directory][crate::env::current_dir] fails then an error will be
+/// returned.
+///
+/// # Examples
+///
+/// ## Posix paths
+///
+/// ```
+/// #![feature(absolute_path)]
+/// # #[cfg(unix)]
+/// fn main() -> std::io::Result<()> {
+/// use std::path::{self, Path};
+///
+/// // Relative to absolute
+/// let absolute = path::absolute("foo/./bar")?;
+/// assert!(absolute.ends_with("foo/bar"));
+///
+/// // Absolute to absolute
+/// let absolute = path::absolute("/foo//test/.././bar.rs")?;
+/// assert_eq!(absolute, Path::new("/foo/test/../bar.rs"));
+/// Ok(())
+/// }
+/// # #[cfg(not(unix))]
+/// # fn main() {}
+/// ```
+///
+/// The path is resolved using [POSIX semantics][posix-semantics] except that
+/// it stops short of resolving symlinks. This means it will keep `..`
+/// components and trailing slashes.
+///
+/// ## Windows paths
+///
+/// ```
+/// #![feature(absolute_path)]
+/// # #[cfg(windows)]
+/// fn main() -> std::io::Result<()> {
+/// use std::path::{self, Path};
+///
+/// // Relative to absolute
+/// let absolute = path::absolute("foo/./bar")?;
+/// assert!(absolute.ends_with(r"foo\bar"));
+///
+/// // Absolute to absolute
+/// let absolute = path::absolute(r"C:\foo//test\..\./bar.rs")?;
+///
+/// assert_eq!(absolute, Path::new(r"C:\foo\bar.rs"));
+/// Ok(())
+/// }
+/// # #[cfg(not(windows))]
+/// # fn main() {}
+/// ```
+///
+/// For verbatim paths this will simply return the path as given. For other
+/// paths this is currently equivalent to calling [`GetFullPathNameW`][windows-path]
+/// This may change in the future.
+///
+/// [posix-semantics]: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13
+/// [windows-path]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew
+#[unstable(feature = "absolute_path", issue = "92750")]
+pub fn absolute<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
+ let path = path.as_ref();
+ if path.as_os_str().is_empty() {
+ Err(io::const_io_error!(io::ErrorKind::InvalidInput, "cannot make an empty path absolute",))
+ } else {
+ sys::path::absolute(path)
+ }
+}
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
new file mode 100644
index 000000000..351cf6988
--- /dev/null
+++ b/library/std/src/path/tests.rs
@@ -0,0 +1,1873 @@
+use super::*;
+
+use crate::collections::hash_map::DefaultHasher;
+use crate::collections::{BTreeSet, HashSet};
+use crate::hash::Hasher;
+use crate::rc::Rc;
+use crate::sync::Arc;
+use core::hint::black_box;
+
+#[allow(unknown_lints, unused_macro_rules)]
+macro_rules! t (
+ ($path:expr, iter: $iter:expr) => (
+ {
+ let path = Path::new($path);
+
+ // Forward iteration
+ let comps = path.iter()
+ .map(|p| p.to_string_lossy().into_owned())
+ .collect::<Vec<String>>();
+ let exp: &[&str] = &$iter;
+ let exps = exp.iter().map(|s| s.to_string()).collect::<Vec<String>>();
+ assert!(comps == exps, "iter: Expected {:?}, found {:?}",
+ exps, comps);
+
+ // Reverse iteration
+ let comps = Path::new($path).iter().rev()
+ .map(|p| p.to_string_lossy().into_owned())
+ .collect::<Vec<String>>();
+ let exps = exps.into_iter().rev().collect::<Vec<String>>();
+ assert!(comps == exps, "iter().rev(): Expected {:?}, found {:?}",
+ exps, comps);
+ }
+ );
+
+ ($path:expr, has_root: $has_root:expr, is_absolute: $is_absolute:expr) => (
+ {
+ let path = Path::new($path);
+
+ let act_root = path.has_root();
+ assert!(act_root == $has_root, "has_root: Expected {:?}, found {:?}",
+ $has_root, act_root);
+
+ let act_abs = path.is_absolute();
+ assert!(act_abs == $is_absolute, "is_absolute: Expected {:?}, found {:?}",
+ $is_absolute, act_abs);
+ }
+ );
+
+ ($path:expr, parent: $parent:expr, file_name: $file:expr) => (
+ {
+ let path = Path::new($path);
+
+ let parent = path.parent().map(|p| p.to_str().unwrap());
+ let exp_parent: Option<&str> = $parent;
+ assert!(parent == exp_parent, "parent: Expected {:?}, found {:?}",
+ exp_parent, parent);
+
+ let file = path.file_name().map(|p| p.to_str().unwrap());
+ let exp_file: Option<&str> = $file;
+ assert!(file == exp_file, "file_name: Expected {:?}, found {:?}",
+ exp_file, file);
+ }
+ );
+
+ ($path:expr, file_stem: $file_stem:expr, extension: $extension:expr) => (
+ {
+ let path = Path::new($path);
+
+ let stem = path.file_stem().map(|p| p.to_str().unwrap());
+ let exp_stem: Option<&str> = $file_stem;
+ assert!(stem == exp_stem, "file_stem: Expected {:?}, found {:?}",
+ exp_stem, stem);
+
+ let ext = path.extension().map(|p| p.to_str().unwrap());
+ let exp_ext: Option<&str> = $extension;
+ assert!(ext == exp_ext, "extension: Expected {:?}, found {:?}",
+ exp_ext, ext);
+ }
+ );
+
+ ($path:expr, file_prefix: $file_prefix:expr, extension: $extension:expr) => (
+ {
+ let path = Path::new($path);
+
+ let prefix = path.file_prefix().map(|p| p.to_str().unwrap());
+ let exp_prefix: Option<&str> = $file_prefix;
+ assert!(prefix == exp_prefix, "file_prefix: Expected {:?}, found {:?}",
+ exp_prefix, prefix);
+
+ let ext = path.extension().map(|p| p.to_str().unwrap());
+ let exp_ext: Option<&str> = $extension;
+ assert!(ext == exp_ext, "extension: Expected {:?}, found {:?}",
+ exp_ext, ext);
+ }
+ );
+
+ ($path:expr, iter: $iter:expr,
+ has_root: $has_root:expr, is_absolute: $is_absolute:expr,
+ parent: $parent:expr, file_name: $file:expr,
+ file_stem: $file_stem:expr, extension: $extension:expr,
+ file_prefix: $file_prefix:expr) => (
+ {
+ t!($path, iter: $iter);
+ t!($path, has_root: $has_root, is_absolute: $is_absolute);
+ t!($path, parent: $parent, file_name: $file);
+ t!($path, file_stem: $file_stem, extension: $extension);
+ t!($path, file_prefix: $file_prefix, extension: $extension);
+ }
+ );
+);
+
+#[test]
+fn into() {
+ use crate::borrow::Cow;
+
+ let static_path = Path::new("/home/foo");
+ let static_cow_path: Cow<'static, Path> = static_path.into();
+ let pathbuf = PathBuf::from("/home/foo");
+
+ {
+ let path: &Path = &pathbuf;
+ let borrowed_cow_path: Cow<'_, Path> = path.into();
+
+ assert_eq!(static_cow_path, borrowed_cow_path);
+ }
+
+ let owned_cow_path: Cow<'static, Path> = pathbuf.into();
+
+ assert_eq!(static_cow_path, owned_cow_path);
+}
+
+#[test]
+#[cfg(unix)]
+pub fn test_decompositions_unix() {
+ t!("",
+ iter: [],
+ has_root: false,
+ is_absolute: false,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("/",
+ iter: ["/"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("/foo",
+ iter: ["/", "foo"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("/foo/",
+ iter: ["/", "foo"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/bar",
+ iter: ["foo", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("/foo/bar",
+ iter: ["/", "foo", "bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("/foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("///foo///",
+ iter: ["/", "foo"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("///foo///bar",
+ iter: ["/", "foo", "bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("///foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("./.",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("/..",
+ iter: ["/", ".."],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("/"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("../",
+ iter: [".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/.",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/..",
+ iter: ["foo", ".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/./",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/./bar",
+ iter: ["foo", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("foo/../",
+ iter: ["foo", ".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/../bar",
+ iter: ["foo", "..", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo/.."),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("./a",
+ iter: [".", "a"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("."),
+ file_name: Some("a"),
+ file_stem: Some("a"),
+ extension: None,
+ file_prefix: Some("a")
+ );
+
+ t!(".",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("./",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("a/b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a//b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a/./b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a/b/c",
+ iter: ["a", "b", "c"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a/b"),
+ file_name: Some("c"),
+ file_stem: Some("c"),
+ extension: None,
+ file_prefix: Some("c")
+ );
+
+ t!(".foo",
+ iter: [".foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some(".foo"),
+ file_stem: Some(".foo"),
+ extension: None,
+ file_prefix: Some(".foo")
+ );
+
+ t!("a/.foo",
+ iter: ["a", ".foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some(".foo"),
+ file_stem: Some(".foo"),
+ extension: None,
+ file_prefix: Some(".foo")
+ );
+
+ t!("a/.rustfmt.toml",
+ iter: ["a", ".rustfmt.toml"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some(".rustfmt.toml"),
+ file_stem: Some(".rustfmt"),
+ extension: Some("toml"),
+ file_prefix: Some(".rustfmt")
+ );
+
+ t!("a/.x.y.z",
+ iter: ["a", ".x.y.z"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some(".x.y.z"),
+ file_stem: Some(".x.y"),
+ extension: Some("z"),
+ file_prefix: Some(".x")
+ );
+}
+
+#[test]
+#[cfg(windows)]
+pub fn test_decompositions_windows() {
+ t!("",
+ iter: [],
+ has_root: false,
+ is_absolute: false,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("/",
+ iter: ["\\"],
+ has_root: true,
+ is_absolute: false,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\",
+ iter: ["\\"],
+ has_root: true,
+ is_absolute: false,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("c:",
+ iter: ["c:"],
+ has_root: false,
+ is_absolute: false,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("c:\\",
+ iter: ["c:", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("c:/",
+ iter: ["c:", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("/foo",
+ iter: ["\\", "foo"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("/foo/",
+ iter: ["\\", "foo"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/bar",
+ iter: ["foo", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("/foo/bar",
+ iter: ["\\", "foo", "bar"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("/foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("///foo///",
+ iter: ["\\", "foo"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("/"),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("///foo///bar",
+ iter: ["\\", "foo", "bar"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("///foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("./.",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("/..",
+ iter: ["\\", ".."],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("/"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("../",
+ iter: [".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/.",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/..",
+ iter: ["foo", ".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/./",
+ iter: ["foo"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: Some("foo"),
+ file_stem: Some("foo"),
+ extension: None,
+ file_prefix: Some("foo")
+ );
+
+ t!("foo/./bar",
+ iter: ["foo", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("foo/../",
+ iter: ["foo", ".."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo"),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("foo/../bar",
+ iter: ["foo", "..", "bar"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("foo/.."),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("./a",
+ iter: [".", "a"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("."),
+ file_name: Some("a"),
+ file_stem: Some("a"),
+ extension: None,
+ file_prefix: Some("a")
+ );
+
+ t!(".",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("./",
+ iter: ["."],
+ has_root: false,
+ is_absolute: false,
+ parent: Some(""),
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("a/b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a//b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a/./b",
+ iter: ["a", "b"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("a/b/c",
+ iter: ["a", "b", "c"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a/b"),
+ file_name: Some("c"),
+ file_stem: Some("c"),
+ extension: None,
+ file_prefix: Some("c")
+ );
+
+ t!("a\\b\\c",
+ iter: ["a", "b", "c"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a\\b"),
+ file_name: Some("c"),
+ file_stem: Some("c"),
+ extension: None,
+ file_prefix: Some("c")
+ );
+
+ t!("\\a",
+ iter: ["\\", "a"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("\\"),
+ file_name: Some("a"),
+ file_stem: Some("a"),
+ extension: None,
+ file_prefix: Some("a")
+ );
+
+ t!("c:\\foo.txt",
+ iter: ["c:", "\\", "foo.txt"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("c:\\"),
+ file_name: Some("foo.txt"),
+ file_stem: Some("foo"),
+ extension: Some("txt"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\server\\share\\foo.txt",
+ iter: ["\\\\server\\share", "\\", "foo.txt"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\server\\share\\"),
+ file_name: Some("foo.txt"),
+ file_stem: Some("foo"),
+ extension: Some("txt"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\server\\share",
+ iter: ["\\\\server\\share", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\server",
+ iter: ["\\", "server"],
+ has_root: true,
+ is_absolute: false,
+ parent: Some("\\"),
+ file_name: Some("server"),
+ file_stem: Some("server"),
+ extension: None,
+ file_prefix: Some("server")
+ );
+
+ t!("\\\\?\\bar\\foo.txt",
+ iter: ["\\\\?\\bar", "\\", "foo.txt"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\bar\\"),
+ file_name: Some("foo.txt"),
+ file_stem: Some("foo"),
+ extension: Some("txt"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\?\\bar",
+ iter: ["\\\\?\\bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\",
+ iter: ["\\\\?\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\UNC\\server\\share\\foo.txt",
+ iter: ["\\\\?\\UNC\\server\\share", "\\", "foo.txt"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\UNC\\server\\share\\"),
+ file_name: Some("foo.txt"),
+ file_stem: Some("foo"),
+ extension: Some("txt"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\?\\UNC\\server",
+ iter: ["\\\\?\\UNC\\server"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\UNC\\",
+ iter: ["\\\\?\\UNC\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\C:\\foo.txt",
+ iter: ["\\\\?\\C:", "\\", "foo.txt"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\C:\\"),
+ file_name: Some("foo.txt"),
+ file_stem: Some("foo"),
+ extension: Some("txt"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\?\\C:\\",
+ iter: ["\\\\?\\C:", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\C:",
+ iter: ["\\\\?\\C:"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\foo/bar",
+ iter: ["\\\\?\\foo/bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\C:/foo/bar",
+ iter: ["\\\\?\\C:", "\\", "foo/bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\C:/"),
+ file_name: Some("foo/bar"),
+ file_stem: Some("foo/bar"),
+ extension: None,
+ file_prefix: Some("foo/bar")
+ );
+
+ t!("\\\\.\\foo\\bar",
+ iter: ["\\\\.\\foo", "\\", "bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\.\\foo\\"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("\\\\.\\foo",
+ iter: ["\\\\.\\foo", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\.\\foo/bar",
+ iter: ["\\\\.\\foo", "\\", "bar"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\.\\foo/"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
+ extension: None,
+ file_prefix: Some("bar")
+ );
+
+ t!("\\\\.\\foo\\bar/baz",
+ iter: ["\\\\.\\foo", "\\", "bar", "baz"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\.\\foo\\bar"),
+ file_name: Some("baz"),
+ file_stem: Some("baz"),
+ extension: None,
+ file_prefix: Some("baz")
+ );
+
+ t!("\\\\.\\",
+ iter: ["\\\\.\\", "\\"],
+ has_root: true,
+ is_absolute: true,
+ parent: None,
+ file_name: None,
+ file_stem: None,
+ extension: None,
+ file_prefix: None
+ );
+
+ t!("\\\\?\\a\\b\\",
+ iter: ["\\\\?\\a", "\\", "b"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\a\\"),
+ file_name: Some("b"),
+ file_stem: Some("b"),
+ extension: None,
+ file_prefix: Some("b")
+ );
+
+ t!("\\\\?\\C:\\foo.txt.zip",
+ iter: ["\\\\?\\C:", "\\", "foo.txt.zip"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\C:\\"),
+ file_name: Some("foo.txt.zip"),
+ file_stem: Some("foo.txt"),
+ extension: Some("zip"),
+ file_prefix: Some("foo")
+ );
+
+ t!("\\\\?\\C:\\.foo.txt.zip",
+ iter: ["\\\\?\\C:", "\\", ".foo.txt.zip"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\C:\\"),
+ file_name: Some(".foo.txt.zip"),
+ file_stem: Some(".foo.txt"),
+ extension: Some("zip"),
+ file_prefix: Some(".foo")
+ );
+
+ t!("\\\\?\\C:\\.foo",
+ iter: ["\\\\?\\C:", "\\", ".foo"],
+ has_root: true,
+ is_absolute: true,
+ parent: Some("\\\\?\\C:\\"),
+ file_name: Some(".foo"),
+ file_stem: Some(".foo"),
+ extension: None,
+ file_prefix: Some(".foo")
+ );
+
+ t!("a/.x.y.z",
+ iter: ["a", ".x.y.z"],
+ has_root: false,
+ is_absolute: false,
+ parent: Some("a"),
+ file_name: Some(".x.y.z"),
+ file_stem: Some(".x.y"),
+ extension: Some("z"),
+ file_prefix: Some(".x")
+ );
+}
+
+#[test]
+pub fn test_stem_ext() {
+ t!("foo",
+ file_stem: Some("foo"),
+ extension: None
+ );
+
+ t!("foo.",
+ file_stem: Some("foo"),
+ extension: Some("")
+ );
+
+ t!(".foo",
+ file_stem: Some(".foo"),
+ extension: None
+ );
+
+ t!("foo.txt",
+ file_stem: Some("foo"),
+ extension: Some("txt")
+ );
+
+ t!("foo.bar.txt",
+ file_stem: Some("foo.bar"),
+ extension: Some("txt")
+ );
+
+ t!("foo.bar.",
+ file_stem: Some("foo.bar"),
+ extension: Some("")
+ );
+
+ t!(".", file_stem: None, extension: None);
+
+ t!("..", file_stem: None, extension: None);
+
+ t!(".x.y.z", file_stem: Some(".x.y"), extension: Some("z"));
+
+ t!("..x.y.z", file_stem: Some("..x.y"), extension: Some("z"));
+
+ t!("", file_stem: None, extension: None);
+}
+
+#[test]
+pub fn test_prefix_ext() {
+ t!("foo",
+ file_prefix: Some("foo"),
+ extension: None
+ );
+
+ t!("foo.",
+ file_prefix: Some("foo"),
+ extension: Some("")
+ );
+
+ t!(".foo",
+ file_prefix: Some(".foo"),
+ extension: None
+ );
+
+ t!("foo.txt",
+ file_prefix: Some("foo"),
+ extension: Some("txt")
+ );
+
+ t!("foo.bar.txt",
+ file_prefix: Some("foo"),
+ extension: Some("txt")
+ );
+
+ t!("foo.bar.",
+ file_prefix: Some("foo"),
+ extension: Some("")
+ );
+
+ t!(".", file_prefix: None, extension: None);
+
+ t!("..", file_prefix: None, extension: None);
+
+ t!(".x.y.z", file_prefix: Some(".x"), extension: Some("z"));
+
+ t!("..x.y.z", file_prefix: Some("."), extension: Some("z"));
+
+ t!("", file_prefix: None, extension: None);
+}
+
+#[test]
+pub fn test_push() {
+ macro_rules! tp (
+ ($path:expr, $push:expr, $expected:expr) => ( {
+ let mut actual = PathBuf::from($path);
+ actual.push($push);
+ assert!(actual.to_str() == Some($expected),
+ "pushing {:?} onto {:?}: Expected {:?}, got {:?}",
+ $push, $path, $expected, actual.to_str().unwrap());
+ });
+ );
+
+ if cfg!(unix) || cfg!(all(target_env = "sgx", target_vendor = "fortanix")) {
+ tp!("", "foo", "foo");
+ tp!("foo", "bar", "foo/bar");
+ tp!("foo/", "bar", "foo/bar");
+ tp!("foo//", "bar", "foo//bar");
+ tp!("foo/.", "bar", "foo/./bar");
+ tp!("foo./.", "bar", "foo././bar");
+ tp!("foo", "", "foo/");
+ tp!("foo", ".", "foo/.");
+ tp!("foo", "..", "foo/..");
+ tp!("foo", "/", "/");
+ tp!("/foo/bar", "/", "/");
+ tp!("/foo/bar", "/baz", "/baz");
+ tp!("/foo/bar", "./baz", "/foo/bar/./baz");
+ } else {
+ tp!("", "foo", "foo");
+ tp!("foo", "bar", r"foo\bar");
+ tp!("foo/", "bar", r"foo/bar");
+ tp!(r"foo\", "bar", r"foo\bar");
+ tp!("foo//", "bar", r"foo//bar");
+ tp!(r"foo\\", "bar", r"foo\\bar");
+ tp!("foo/.", "bar", r"foo/.\bar");
+ tp!("foo./.", "bar", r"foo./.\bar");
+ tp!(r"foo\.", "bar", r"foo\.\bar");
+ tp!(r"foo.\.", "bar", r"foo.\.\bar");
+ tp!("foo", "", "foo\\");
+ tp!("foo", ".", r"foo\.");
+ tp!("foo", "..", r"foo\..");
+ tp!("foo", "/", "/");
+ tp!("foo", r"\", r"\");
+ tp!("/foo/bar", "/", "/");
+ tp!(r"\foo\bar", r"\", r"\");
+ tp!("/foo/bar", "/baz", "/baz");
+ tp!("/foo/bar", r"\baz", r"\baz");
+ tp!("/foo/bar", "./baz", r"/foo/bar\./baz");
+ tp!("/foo/bar", r".\baz", r"/foo/bar\.\baz");
+
+ tp!("c:\\", "windows", "c:\\windows");
+ tp!("c:", "windows", "c:windows");
+
+ tp!("a\\b\\c", "d", "a\\b\\c\\d");
+ tp!("\\a\\b\\c", "d", "\\a\\b\\c\\d");
+ tp!("a\\b", "c\\d", "a\\b\\c\\d");
+ tp!("a\\b", "\\c\\d", "\\c\\d");
+ tp!("a\\b", ".", "a\\b\\.");
+ tp!("a\\b", "..\\c", "a\\b\\..\\c");
+ tp!("a\\b", "C:a.txt", "C:a.txt");
+ tp!("a\\b", "C:\\a.txt", "C:\\a.txt");
+ tp!("C:\\a", "C:\\b.txt", "C:\\b.txt");
+ tp!("C:\\a\\b\\c", "C:d", "C:d");
+ tp!("C:a\\b\\c", "C:d", "C:d");
+ tp!("C:", r"a\b\c", r"C:a\b\c");
+ tp!("C:", r"..\a", r"C:..\a");
+ tp!("\\\\server\\share\\foo", "bar", "\\\\server\\share\\foo\\bar");
+ tp!("\\\\server\\share\\foo", "C:baz", "C:baz");
+ tp!("\\\\?\\C:\\a\\b", "C:c\\d", "C:c\\d");
+ tp!("\\\\?\\C:a\\b", "C:c\\d", "C:c\\d");
+ tp!("\\\\?\\C:\\a\\b", "C:\\c\\d", "C:\\c\\d");
+ tp!("\\\\?\\foo\\bar", "baz", "\\\\?\\foo\\bar\\baz");
+ tp!("\\\\?\\UNC\\server\\share\\foo", "bar", "\\\\?\\UNC\\server\\share\\foo\\bar");
+ tp!("\\\\?\\UNC\\server\\share", "C:\\a", "C:\\a");
+ tp!("\\\\?\\UNC\\server\\share", "C:a", "C:a");
+
+ // Note: modified from old path API
+ tp!("\\\\?\\UNC\\server", "foo", "\\\\?\\UNC\\server\\foo");
+
+ tp!("C:\\a", "\\\\?\\UNC\\server\\share", "\\\\?\\UNC\\server\\share");
+ tp!("\\\\.\\foo\\bar", "baz", "\\\\.\\foo\\bar\\baz");
+ tp!("\\\\.\\foo\\bar", "C:a", "C:a");
+ // again, not sure about the following, but I'm assuming \\.\ should be verbatim
+ tp!("\\\\.\\foo", "..\\bar", "\\\\.\\foo\\..\\bar");
+
+ tp!("\\\\?\\C:", "foo", "\\\\?\\C:\\foo"); // this is a weird one
+
+ tp!(r"\\?\C:\bar", "../foo", r"\\?\C:\foo");
+ tp!(r"\\?\C:\bar", "../../foo", r"\\?\C:\foo");
+ tp!(r"\\?\C:\", "../foo", r"\\?\C:\foo");
+ tp!(r"\\?\C:", r"D:\foo/./", r"D:\foo/./");
+ tp!(r"\\?\C:", r"\\?\D:\foo\.\", r"\\?\D:\foo\.\");
+ tp!(r"\\?\A:\x\y", "/foo", r"\\?\A:\foo");
+ tp!(r"\\?\A:", r"..\foo\.", r"\\?\A:\foo");
+ tp!(r"\\?\A:\x\y", r".\foo\.", r"\\?\A:\x\y\foo");
+ tp!(r"\\?\A:\x\y", r"", r"\\?\A:\x\y\");
+ }
+}
+
+#[test]
+pub fn test_pop() {
+ macro_rules! tp (
+ ($path:expr, $expected:expr, $output:expr) => ( {
+ let mut actual = PathBuf::from($path);
+ let output = actual.pop();
+ assert!(actual.to_str() == Some($expected) && output == $output,
+ "popping from {:?}: Expected {:?}/{:?}, got {:?}/{:?}",
+ $path, $expected, $output,
+ actual.to_str().unwrap(), output);
+ });
+ );
+
+ tp!("", "", false);
+ tp!("/", "/", false);
+ tp!("foo", "", true);
+ tp!(".", "", true);
+ tp!("/foo", "/", true);
+ tp!("/foo/bar", "/foo", true);
+ tp!("foo/bar", "foo", true);
+ tp!("foo/.", "", true);
+ tp!("foo//bar", "foo", true);
+
+ if cfg!(windows) {
+ tp!("a\\b\\c", "a\\b", true);
+ tp!("\\a", "\\", true);
+ tp!("\\", "\\", false);
+
+ tp!("C:\\a\\b", "C:\\a", true);
+ tp!("C:\\a", "C:\\", true);
+ tp!("C:\\", "C:\\", false);
+ tp!("C:a\\b", "C:a", true);
+ tp!("C:a", "C:", true);
+ tp!("C:", "C:", false);
+ tp!("\\\\server\\share\\a\\b", "\\\\server\\share\\a", true);
+ tp!("\\\\server\\share\\a", "\\\\server\\share\\", true);
+ tp!("\\\\server\\share", "\\\\server\\share", false);
+ tp!("\\\\?\\a\\b\\c", "\\\\?\\a\\b", true);
+ tp!("\\\\?\\a\\b", "\\\\?\\a\\", true);
+ tp!("\\\\?\\a", "\\\\?\\a", false);
+ tp!("\\\\?\\C:\\a\\b", "\\\\?\\C:\\a", true);
+ tp!("\\\\?\\C:\\a", "\\\\?\\C:\\", true);
+ tp!("\\\\?\\C:\\", "\\\\?\\C:\\", false);
+ tp!("\\\\?\\UNC\\server\\share\\a\\b", "\\\\?\\UNC\\server\\share\\a", true);
+ tp!("\\\\?\\UNC\\server\\share\\a", "\\\\?\\UNC\\server\\share\\", true);
+ tp!("\\\\?\\UNC\\server\\share", "\\\\?\\UNC\\server\\share", false);
+ tp!("\\\\.\\a\\b\\c", "\\\\.\\a\\b", true);
+ tp!("\\\\.\\a\\b", "\\\\.\\a\\", true);
+ tp!("\\\\.\\a", "\\\\.\\a", false);
+
+ tp!("\\\\?\\a\\b\\", "\\\\?\\a\\", true);
+ }
+}
+
+#[test]
+pub fn test_set_file_name() {
+ macro_rules! tfn (
+ ($path:expr, $file:expr, $expected:expr) => ( {
+ let mut p = PathBuf::from($path);
+ p.set_file_name($file);
+ assert!(p.to_str() == Some($expected),
+ "setting file name of {:?} to {:?}: Expected {:?}, got {:?}",
+ $path, $file, $expected,
+ p.to_str().unwrap());
+ });
+ );
+
+ tfn!("foo", "foo", "foo");
+ tfn!("foo", "bar", "bar");
+ tfn!("foo", "", "");
+ tfn!("", "foo", "foo");
+ if cfg!(unix) || cfg!(all(target_env = "sgx", target_vendor = "fortanix")) {
+ tfn!(".", "foo", "./foo");
+ tfn!("foo/", "bar", "bar");
+ tfn!("foo/.", "bar", "bar");
+ tfn!("..", "foo", "../foo");
+ tfn!("foo/..", "bar", "foo/../bar");
+ tfn!("/", "foo", "/foo");
+ } else {
+ tfn!(".", "foo", r".\foo");
+ tfn!(r"foo\", "bar", r"bar");
+ tfn!(r"foo\.", "bar", r"bar");
+ tfn!("..", "foo", r"..\foo");
+ tfn!(r"foo\..", "bar", r"foo\..\bar");
+ tfn!(r"\", "foo", r"\foo");
+ }
+}
+
+#[test]
+pub fn test_set_extension() {
+ macro_rules! tfe (
+ ($path:expr, $ext:expr, $expected:expr, $output:expr) => ( {
+ let mut p = PathBuf::from($path);
+ let output = p.set_extension($ext);
+ assert!(p.to_str() == Some($expected) && output == $output,
+ "setting extension of {:?} to {:?}: Expected {:?}/{:?}, got {:?}/{:?}",
+ $path, $ext, $expected, $output,
+ p.to_str().unwrap(), output);
+ });
+ );
+
+ tfe!("foo", "txt", "foo.txt", true);
+ tfe!("foo.bar", "txt", "foo.txt", true);
+ tfe!("foo.bar.baz", "txt", "foo.bar.txt", true);
+ tfe!(".test", "txt", ".test.txt", true);
+ tfe!("foo.txt", "", "foo", true);
+ tfe!("foo", "", "foo", true);
+ tfe!("", "foo", "", false);
+ tfe!(".", "foo", ".", false);
+ tfe!("foo/", "bar", "foo.bar", true);
+ tfe!("foo/.", "bar", "foo.bar", true);
+ tfe!("..", "foo", "..", false);
+ tfe!("foo/..", "bar", "foo/..", false);
+ tfe!("/", "foo", "/", false);
+}
+
+#[test]
+fn test_eq_receivers() {
+ use crate::borrow::Cow;
+
+ let borrowed: &Path = Path::new("foo/bar");
+ let mut owned: PathBuf = PathBuf::new();
+ owned.push("foo");
+ owned.push("bar");
+ let borrowed_cow: Cow<'_, Path> = borrowed.into();
+ let owned_cow: Cow<'_, Path> = owned.clone().into();
+
+ macro_rules! t {
+ ($($current:expr),+) => {
+ $(
+ assert_eq!($current, borrowed);
+ assert_eq!($current, owned);
+ assert_eq!($current, borrowed_cow);
+ assert_eq!($current, owned_cow);
+ )+
+ }
+ }
+
+ t!(borrowed, owned, borrowed_cow, owned_cow);
+}
+
+#[test]
+pub fn test_compare() {
+ use crate::collections::hash_map::DefaultHasher;
+ use crate::hash::{Hash, Hasher};
+
+ fn hash<T: Hash>(t: T) -> u64 {
+ let mut s = DefaultHasher::new();
+ t.hash(&mut s);
+ s.finish()
+ }
+
+ macro_rules! tc (
+ ($path1:expr, $path2:expr, eq: $eq:expr,
+ starts_with: $starts_with:expr, ends_with: $ends_with:expr,
+ relative_from: $relative_from:expr) => ({
+ let path1 = Path::new($path1);
+ let path2 = Path::new($path2);
+
+ let eq = path1 == path2;
+ assert!(eq == $eq, "{:?} == {:?}, expected {:?}, got {:?}",
+ $path1, $path2, $eq, eq);
+ assert!($eq == (hash(path1) == hash(path2)),
+ "{:?} == {:?}, expected {:?}, got {} and {}",
+ $path1, $path2, $eq, hash(path1), hash(path2));
+
+ let starts_with = path1.starts_with(path2);
+ assert!(starts_with == $starts_with,
+ "{:?}.starts_with({:?}), expected {:?}, got {:?}", $path1, $path2,
+ $starts_with, starts_with);
+
+ let ends_with = path1.ends_with(path2);
+ assert!(ends_with == $ends_with,
+ "{:?}.ends_with({:?}), expected {:?}, got {:?}", $path1, $path2,
+ $ends_with, ends_with);
+
+ let relative_from = path1.strip_prefix(path2)
+ .map(|p| p.to_str().unwrap())
+ .ok();
+ let exp: Option<&str> = $relative_from;
+ assert!(relative_from == exp,
+ "{:?}.strip_prefix({:?}), expected {:?}, got {:?}",
+ $path1, $path2, exp, relative_from);
+ });
+ );
+
+ tc!("", "",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!("foo", "",
+ eq: false,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("foo")
+ );
+
+ tc!("", "foo",
+ eq: false,
+ starts_with: false,
+ ends_with: false,
+ relative_from: None
+ );
+
+ tc!("foo", "foo",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!("foo/", "foo",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!("foo/.", "foo",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!("foo/./bar", "foo/bar",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!("foo/bar", "foo",
+ eq: false,
+ starts_with: true,
+ ends_with: false,
+ relative_from: Some("bar")
+ );
+
+ tc!("foo/bar/baz", "foo/bar",
+ eq: false,
+ starts_with: true,
+ ends_with: false,
+ relative_from: Some("baz")
+ );
+
+ tc!("foo/bar", "foo/bar/baz",
+ eq: false,
+ starts_with: false,
+ ends_with: false,
+ relative_from: None
+ );
+
+ tc!("./foo/bar/", ".",
+ eq: false,
+ starts_with: true,
+ ends_with: false,
+ relative_from: Some("foo/bar")
+ );
+
+ if cfg!(windows) {
+ tc!(r"C:\src\rust\cargo-test\test\Cargo.toml",
+ r"c:\src\rust\cargo-test\test",
+ eq: false,
+ starts_with: true,
+ ends_with: false,
+ relative_from: Some("Cargo.toml")
+ );
+
+ tc!(r"c:\foo", r"C:\foo",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!(r"C:\foo\.\bar.txt", r"C:\foo\bar.txt",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!(r"C:\foo\.", r"C:\foo",
+ eq: true,
+ starts_with: true,
+ ends_with: true,
+ relative_from: Some("")
+ );
+
+ tc!(r"\\?\C:\foo\.\bar.txt", r"\\?\C:\foo\bar.txt",
+ eq: false,
+ starts_with: false,
+ ends_with: false,
+ relative_from: None
+ );
+ }
+}
+
+#[test]
+fn test_components_debug() {
+ let path = Path::new("/tmp");
+
+ let mut components = path.components();
+
+ let expected = "Components([RootDir, Normal(\"tmp\")])";
+ let actual = format!("{components:?}");
+ assert_eq!(expected, actual);
+
+ let _ = components.next().unwrap();
+ let expected = "Components([Normal(\"tmp\")])";
+ let actual = format!("{components:?}");
+ assert_eq!(expected, actual);
+
+ let _ = components.next().unwrap();
+ let expected = "Components([])";
+ let actual = format!("{components:?}");
+ assert_eq!(expected, actual);
+}
+
+#[cfg(unix)]
+#[test]
+fn test_iter_debug() {
+ let path = Path::new("/tmp");
+
+ let mut iter = path.iter();
+
+ let expected = "Iter([\"/\", \"tmp\"])";
+ let actual = format!("{iter:?}");
+ assert_eq!(expected, actual);
+
+ let _ = iter.next().unwrap();
+ let expected = "Iter([\"tmp\"])";
+ let actual = format!("{iter:?}");
+ assert_eq!(expected, actual);
+
+ let _ = iter.next().unwrap();
+ let expected = "Iter([])";
+ let actual = format!("{iter:?}");
+ assert_eq!(expected, actual);
+}
+
+#[test]
+fn into_boxed() {
+ let orig: &str = "some/sort/of/path";
+ let path = Path::new(orig);
+ let boxed: Box<Path> = Box::from(path);
+ let path_buf = path.to_owned().into_boxed_path().into_path_buf();
+ assert_eq!(path, &*boxed);
+ assert_eq!(&*boxed, &*path_buf);
+ assert_eq!(&*path_buf, path);
+}
+
+#[test]
+fn test_clone_into() {
+ let mut path_buf = PathBuf::from("supercalifragilisticexpialidocious");
+ let path = Path::new("short");
+ path.clone_into(&mut path_buf);
+ assert_eq!(path, path_buf);
+ assert!(path_buf.into_os_string().capacity() >= 15);
+}
+
+#[test]
+fn display_format_flags() {
+ assert_eq!(format!("a{:#<5}b", Path::new("").display()), "a#####b");
+ assert_eq!(format!("a{:#<5}b", Path::new("a").display()), "aa####b");
+}
+
+#[test]
+fn into_rc() {
+ let orig = "hello/world";
+ let path = Path::new(orig);
+ let rc: Rc<Path> = Rc::from(path);
+ let arc: Arc<Path> = Arc::from(path);
+
+ assert_eq!(&*rc, path);
+ assert_eq!(&*arc, path);
+
+ let rc2: Rc<Path> = Rc::from(path.to_owned());
+ let arc2: Arc<Path> = Arc::from(path.to_owned());
+
+ assert_eq!(&*rc2, path);
+ assert_eq!(&*arc2, path);
+}
+
+#[test]
+fn test_ord() {
+ macro_rules! ord(
+ ($ord:ident, $left:expr, $right:expr) => ( {
+ use core::cmp::Ordering;
+
+ let left = Path::new($left);
+ let right = Path::new($right);
+ assert_eq!(left.cmp(&right), Ordering::$ord);
+ if (core::cmp::Ordering::$ord == Ordering::Equal) {
+ assert_eq!(left, right);
+
+ let mut hasher = DefaultHasher::new();
+ left.hash(&mut hasher);
+ let left_hash = hasher.finish();
+ hasher = DefaultHasher::new();
+ right.hash(&mut hasher);
+ let right_hash = hasher.finish();
+
+ assert_eq!(left_hash, right_hash, "hashes for {:?} and {:?} must match", left, right);
+ } else {
+ assert_ne!(left, right);
+ }
+ });
+ );
+
+ ord!(Less, "1", "2");
+ ord!(Less, "/foo/bar", "/foo./bar");
+ ord!(Less, "foo/bar", "foo/bar.");
+ ord!(Equal, "foo/./bar", "foo/bar/");
+ ord!(Equal, "foo/bar", "foo/bar/");
+ ord!(Equal, "foo/bar", "foo/bar/.");
+ ord!(Equal, "foo/bar", "foo/bar//");
+}
+
+#[test]
+#[cfg(unix)]
+fn test_unix_absolute() {
+ use crate::path::absolute;
+
+ assert!(absolute("").is_err());
+
+ let relative = "a/b";
+ let mut expected = crate::env::current_dir().unwrap();
+ expected.push(relative);
+ assert_eq!(absolute(relative).unwrap().as_os_str(), expected.as_os_str());
+
+ // Test how components are collected.
+ assert_eq!(absolute("/a/b/c").unwrap().as_os_str(), Path::new("/a/b/c").as_os_str());
+ assert_eq!(absolute("/a//b/c").unwrap().as_os_str(), Path::new("/a/b/c").as_os_str());
+ assert_eq!(absolute("//a/b/c").unwrap().as_os_str(), Path::new("//a/b/c").as_os_str());
+ assert_eq!(absolute("///a/b/c").unwrap().as_os_str(), Path::new("/a/b/c").as_os_str());
+ assert_eq!(absolute("/a/b/c/").unwrap().as_os_str(), Path::new("/a/b/c/").as_os_str());
+ assert_eq!(
+ absolute("/a/./b/../c/.././..").unwrap().as_os_str(),
+ Path::new("/a/b/../c/../..").as_os_str()
+ );
+
+ // Test leading `.` and `..` components
+ let curdir = crate::env::current_dir().unwrap();
+ assert_eq!(absolute("./a").unwrap().as_os_str(), curdir.join("a").as_os_str());
+ assert_eq!(absolute("../a").unwrap().as_os_str(), curdir.join("../a").as_os_str()); // return /pwd/../a
+}
+
+#[test]
+#[cfg(windows)]
+fn test_windows_absolute() {
+ use crate::path::absolute;
+ // An empty path is an error.
+ assert!(absolute("").is_err());
+
+ let relative = r"a\b";
+ let mut expected = crate::env::current_dir().unwrap();
+ expected.push(relative);
+ assert_eq!(absolute(relative).unwrap().as_os_str(), expected.as_os_str());
+
+ macro_rules! unchanged(
+ ($path:expr) => {
+ assert_eq!(absolute($path).unwrap().as_os_str(), Path::new($path).as_os_str());
+ }
+ );
+
+ unchanged!(r"C:\path\to\file");
+ unchanged!(r"C:\path\to\file\");
+ unchanged!(r"\\server\share\to\file");
+ unchanged!(r"\\server.\share.\to\file");
+ unchanged!(r"\\.\PIPE\name");
+ unchanged!(r"\\.\C:\path\to\COM1");
+ unchanged!(r"\\?\C:\path\to\file");
+ unchanged!(r"\\?\UNC\server\share\to\file");
+ unchanged!(r"\\?\PIPE\name");
+ // Verbatim paths are always unchanged, no matter what.
+ unchanged!(r"\\?\path.\to/file..");
+
+ assert_eq!(
+ absolute(r"C:\path..\to.\file.").unwrap().as_os_str(),
+ Path::new(r"C:\path..\to\file").as_os_str()
+ );
+ assert_eq!(absolute(r"COM1").unwrap().as_os_str(), Path::new(r"\\.\COM1").as_os_str());
+}
+
+#[bench]
+fn bench_path_cmp_fast_path_buf_sort(b: &mut test::Bencher) {
+ let prefix = "my/home";
+ let mut paths: Vec<_> =
+ (0..1000).map(|num| PathBuf::from(prefix).join(format!("file {num}.rs"))).collect();
+
+ paths.sort();
+
+ b.iter(|| {
+ black_box(paths.as_mut_slice()).sort_unstable();
+ });
+}
+
+#[bench]
+fn bench_path_cmp_fast_path_long(b: &mut test::Bencher) {
+ let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
+ let paths: Vec<_> =
+ (0..1000).map(|num| PathBuf::from(prefix).join(format!("file {num}.rs"))).collect();
+
+ let mut set = BTreeSet::new();
+
+ paths.iter().for_each(|p| {
+ set.insert(p.as_path());
+ });
+
+ b.iter(|| {
+ set.remove(paths[500].as_path());
+ set.insert(paths[500].as_path());
+ });
+}
+
+#[bench]
+fn bench_path_cmp_fast_path_short(b: &mut test::Bencher) {
+ let prefix = "my/home";
+ let paths: Vec<_> =
+ (0..1000).map(|num| PathBuf::from(prefix).join(format!("file {num}.rs"))).collect();
+
+ let mut set = BTreeSet::new();
+
+ paths.iter().for_each(|p| {
+ set.insert(p.as_path());
+ });
+
+ b.iter(|| {
+ set.remove(paths[500].as_path());
+ set.insert(paths[500].as_path());
+ });
+}
+
+#[bench]
+fn bench_path_hashset(b: &mut test::Bencher) {
+ let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
+ let paths: Vec<_> =
+ (0..1000).map(|num| PathBuf::from(prefix).join(format!("file {num}.rs"))).collect();
+
+ let mut set = HashSet::new();
+
+ paths.iter().for_each(|p| {
+ set.insert(p.as_path());
+ });
+
+ b.iter(|| {
+ set.remove(paths[500].as_path());
+ set.insert(black_box(paths[500].as_path()))
+ });
+}
+
+#[bench]
+fn bench_path_hashset_miss(b: &mut test::Bencher) {
+ let prefix = "/my/home/is/my/castle/and/my/castle/has/a/rusty/workbench/";
+ let paths: Vec<_> =
+ (0..1000).map(|num| PathBuf::from(prefix).join(format!("file {num}.rs"))).collect();
+
+ let mut set = HashSet::new();
+
+ paths.iter().for_each(|p| {
+ set.insert(p.as_path());
+ });
+
+ let probe = PathBuf::from(prefix).join("other");
+
+ b.iter(|| set.remove(black_box(probe.as_path())));
+}
+
+#[bench]
+fn bench_hash_path_short(b: &mut test::Bencher) {
+ let mut hasher = DefaultHasher::new();
+ let path = Path::new("explorer.exe");
+
+ b.iter(|| black_box(path).hash(&mut hasher));
+
+ black_box(hasher.finish());
+}
+
+#[bench]
+fn bench_hash_path_long(b: &mut test::Bencher) {
+ let mut hasher = DefaultHasher::new();
+ let path =
+ Path::new("/aaaaa/aaaaaa/./../aaaaaaaa/bbbbbbbbbbbbb/ccccccccccc/ddddddddd/eeeeeee.fff");
+
+ b.iter(|| black_box(path).hash(&mut hasher));
+
+ black_box(hasher.finish());
+}
diff --git a/library/std/src/prelude/mod.rs b/library/std/src/prelude/mod.rs
new file mode 100644
index 000000000..c314bbbb6
--- /dev/null
+++ b/library/std/src/prelude/mod.rs
@@ -0,0 +1,148 @@
+//! # The Rust Prelude
+//!
+//! Rust comes with a variety of things in its standard library. However, if
+//! you had to manually import every single thing that you used, it would be
+//! very verbose. But importing a lot of things that a program never uses isn't
+//! good either. A balance needs to be struck.
+//!
+//! The *prelude* is the list of things that Rust automatically imports into
+//! every Rust program. It's kept as small as possible, and is focused on
+//! things, particularly traits, which are used in almost every single Rust
+//! program.
+//!
+//! # Other preludes
+//!
+//! Preludes can be seen as a pattern to make using multiple types more
+//! convenient. As such, you'll find other preludes in the standard library,
+//! such as [`std::io::prelude`]. Various libraries in the Rust ecosystem may
+//! also define their own preludes.
+//!
+//! [`std::io::prelude`]: crate::io::prelude
+//!
+//! The difference between 'the prelude' and these other preludes is that they
+//! are not automatically `use`'d, and must be imported manually. This is still
+//! easier than importing all of their constituent components.
+//!
+//! # Prelude contents
+//!
+//! The first version of the prelude is used in Rust 2015 and Rust 2018,
+//! and lives in [`std::prelude::v1`].
+//! [`std::prelude::rust_2015`] and [`std::prelude::rust_2018`] re-export this prelude.
+//! It re-exports the following:
+//!
+//! * <code>[std::marker]::{[Copy], [Send], [Sized], [Sync], [Unpin]}</code>,
+//! marker traits that indicate fundamental properties of types.
+//! * <code>[std::ops]::{[Drop], [Fn], [FnMut], [FnOnce]}</code>, various
+//! operations for both destructors and overloading `()`.
+//! * <code>[std::mem]::[drop][mem::drop]</code>, a convenience function for explicitly
+//! dropping a value.
+//! * <code>[std::boxed]::[Box]</code>, a way to allocate values on the heap.
+//! * <code>[std::borrow]::[ToOwned]</code>, the conversion trait that defines
+//! [`to_owned`], the generic method for creating an owned type from a
+//! borrowed type.
+//! * <code>[std::clone]::[Clone]</code>, the ubiquitous trait that defines
+//! [`clone`][Clone::clone], the method for producing a copy of a value.
+//! * <code>[std::cmp]::{[PartialEq], [PartialOrd], [Eq], [Ord]}</code>, the
+//! comparison traits, which implement the comparison operators and are often
+//! seen in trait bounds.
+//! * <code>[std::convert]::{[AsRef], [AsMut], [Into], [From]}</code>, generic
+//! conversions, used by savvy API authors to create overloaded methods.
+//! * <code>[std::default]::[Default]</code>, types that have default values.
+//! * <code>[std::iter]::{[Iterator], [Extend], [IntoIterator], [DoubleEndedIterator], [ExactSizeIterator]}</code>,
+//! iterators of various
+//! kinds.
+//! * <code>[std::option]::[Option]::{[self][Option], [Some], [None]}</code>, a
+//! type which expresses the presence or absence of a value. This type is so
+//! commonly used, its variants are also exported.
+//! * <code>[std::result]::[Result]::{[self][Result], [Ok], [Err]}</code>, a type
+//! for functions that may succeed or fail. Like [`Option`], its variants are
+//! exported as well.
+//! * <code>[std::string]::{[String], [ToString]}</code>, heap-allocated strings.
+//! * <code>[std::vec]::[Vec]</code>, a growable, heap-allocated vector.
+//!
+//! The prelude used in Rust 2021, [`std::prelude::rust_2021`], includes all of the above,
+//! and in addition re-exports:
+//!
+//! * <code>[std::convert]::{[TryFrom], [TryInto]}</code>,
+//! * <code>[std::iter]::[FromIterator]</code>.
+//!
+//! [mem::drop]: crate::mem::drop
+//! [std::borrow]: crate::borrow
+//! [std::boxed]: crate::boxed
+//! [std::clone]: crate::clone
+//! [std::cmp]: crate::cmp
+//! [std::convert]: crate::convert
+//! [std::default]: crate::default
+//! [std::iter]: crate::iter
+//! [std::marker]: crate::marker
+//! [std::mem]: crate::mem
+//! [std::ops]: crate::ops
+//! [std::option]: crate::option
+//! [`std::prelude::v1`]: v1
+//! [`std::prelude::rust_2015`]: rust_2015
+//! [`std::prelude::rust_2018`]: rust_2018
+//! [`std::prelude::rust_2021`]: rust_2021
+//! [std::result]: crate::result
+//! [std::slice]: crate::slice
+//! [std::string]: crate::string
+//! [std::vec]: mod@crate::vec
+//! [TryFrom]: crate::convert::TryFrom
+//! [TryInto]: crate::convert::TryInto
+//! [FromIterator]: crate::iter::FromIterator
+//! [`to_owned`]: crate::borrow::ToOwned::to_owned
+//! [book-closures]: ../../book/ch13-01-closures.html
+//! [book-dtor]: ../../book/ch15-03-drop.html
+//! [book-enums]: ../../book/ch06-01-defining-an-enum.html
+//! [book-iter]: ../../book/ch13-02-iterators.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod v1;
+
+/// The 2015 version of the prelude of The Rust Standard Library.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2015", since = "1.55.0")]
+pub mod rust_2015 {
+ #[stable(feature = "prelude_2015", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+}
+
+/// The 2018 version of the prelude of The Rust Standard Library.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2018", since = "1.55.0")]
+pub mod rust_2018 {
+ #[stable(feature = "prelude_2018", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+}
+
+/// The 2021 version of the prelude of The Rust Standard Library.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "prelude_2021", since = "1.55.0")]
+pub mod rust_2021 {
+ #[stable(feature = "prelude_2021", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+
+ #[stable(feature = "prelude_2021", since = "1.55.0")]
+ #[doc(no_inline)]
+ pub use core::prelude::rust_2021::*;
+}
+
+/// The 2024 version of the prelude of The Rust Standard Library.
+///
+/// See the [module-level documentation](self) for more.
+#[unstable(feature = "prelude_2024", issue = "none")]
+pub mod rust_2024 {
+ #[unstable(feature = "prelude_2024", issue = "none")]
+ #[doc(no_inline)]
+ pub use super::v1::*;
+
+ #[unstable(feature = "prelude_2024", issue = "none")]
+ #[doc(no_inline)]
+ pub use core::prelude::rust_2024::*;
+}
diff --git a/library/std/src/prelude/v1.rs b/library/std/src/prelude/v1.rs
new file mode 100644
index 000000000..0226c4d7a
--- /dev/null
+++ b/library/std/src/prelude/v1.rs
@@ -0,0 +1,97 @@
+//! The first version of the prelude of The Rust Standard Library.
+//!
+//! See the [module-level documentation](super) for more.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+// Re-exported core operators
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::marker::{Send, Sized, Sync, Unpin};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::ops::{Drop, Fn, FnMut, FnOnce};
+
+// Re-exported functions
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::mem::drop;
+
+// Re-exported types and traits
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::convert::{AsMut, AsRef, From, Into};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::iter::{DoubleEndedIterator, ExactSizeIterator};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::iter::{Extend, IntoIterator, Iterator};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::option::Option::{self, None, Some};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::result::Result::{self, Err, Ok};
+
+// Re-exported built-in macros
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+#[doc(no_inline)]
+pub use core::prelude::v1::{
+ assert, cfg, column, compile_error, concat, concat_idents, env, file, format_args,
+ format_args_nl, include, include_bytes, include_str, line, log_syntax, module_path, option_env,
+ stringify, trace_macros, Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd,
+};
+
+#[unstable(
+ feature = "concat_bytes",
+ issue = "87555",
+ reason = "`concat_bytes` is not stable enough for use and is subject to change"
+)]
+#[doc(no_inline)]
+pub use core::prelude::v1::concat_bytes;
+
+// Do not `doc(inline)` these `doc(hidden)` items.
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+pub use core::prelude::v1::{RustcDecodable, RustcEncodable};
+
+// Do not `doc(no_inline)` so that they become doc items on their own
+// (no public module for them to be re-exported from).
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+pub use core::prelude::v1::{bench, derive, global_allocator, test, test_case};
+
+// Do not `doc(no_inline)` either.
+#[unstable(
+ feature = "cfg_accessible",
+ issue = "64797",
+ reason = "`cfg_accessible` is not fully implemented"
+)]
+pub use core::prelude::v1::cfg_accessible;
+
+// Do not `doc(no_inline)` either.
+#[unstable(
+ feature = "cfg_eval",
+ issue = "82679",
+ reason = "`cfg_eval` is a recently implemented feature"
+)]
+pub use core::prelude::v1::cfg_eval;
+
+// The file so far is equivalent to src/libcore/prelude/v1.rs,
+// and below to src/liballoc/prelude.rs.
+// Those files are duplicated rather than using glob imports
+// because we want docs to show these re-exports as pointing to within `std`.
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::borrow::ToOwned;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::boxed::Box;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::string::{String, ToString};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use crate::vec::Vec;
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
new file mode 100644
index 000000000..b8e546164
--- /dev/null
+++ b/library/std/src/primitive_docs.rs
@@ -0,0 +1,1508 @@
+// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
+// These are different files so that relative links work properly without
+// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
+#[doc(primitive = "bool")]
+#[doc(alias = "true")]
+#[doc(alias = "false")]
+/// The boolean type.
+///
+/// The `bool` represents a value, which could only be either [`true`] or [`false`]. If you cast
+/// a `bool` into an integer, [`true`] will be 1 and [`false`] will be 0.
+///
+/// # Basic usage
+///
+/// `bool` implements various traits, such as [`BitAnd`], [`BitOr`], [`Not`], etc.,
+/// which allow us to perform boolean operations using `&`, `|` and `!`.
+///
+/// [`if`] requires a `bool` value as its conditional. [`assert!`], which is an
+/// important macro in testing, checks whether an expression is [`true`] and panics
+/// if it isn't.
+///
+/// ```
+/// let bool_val = true & false | false;
+/// assert!(!bool_val);
+/// ```
+///
+/// [`true`]: ../std/keyword.true.html
+/// [`false`]: ../std/keyword.false.html
+/// [`BitAnd`]: ops::BitAnd
+/// [`BitOr`]: ops::BitOr
+/// [`Not`]: ops::Not
+/// [`if`]: ../std/keyword.if.html
+///
+/// # Examples
+///
+/// A trivial example of the usage of `bool`:
+///
+/// ```
+/// let praise_the_borrow_checker = true;
+///
+/// // using the `if` conditional
+/// if praise_the_borrow_checker {
+/// println!("oh, yeah!");
+/// } else {
+/// println!("what?!!");
+/// }
+///
+/// // ... or, a match pattern
+/// match praise_the_borrow_checker {
+/// true => println!("keep praising!"),
+/// false => println!("you should praise!"),
+/// }
+/// ```
+///
+/// Also, since `bool` implements the [`Copy`] trait, we don't
+/// have to worry about the move semantics (just like the integer and float primitives).
+///
+/// Now an example of `bool` cast to integer type:
+///
+/// ```
+/// assert_eq!(true as i32, 1);
+/// assert_eq!(false as i32, 0);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_bool {}
+
+#[doc(primitive = "never")]
+#[doc(alias = "!")]
+//
+/// The `!` type, also called "never".
+///
+/// `!` represents the type of computations which never resolve to any value at all. For example,
+/// the [`exit`] function `fn exit(code: i32) -> !` exits the process without ever returning, and
+/// so returns `!`.
+///
+/// `break`, `continue` and `return` expressions also have type `!`. For example we are allowed to
+/// write:
+///
+/// ```
+/// #![feature(never_type)]
+/// # fn foo() -> u32 {
+/// let x: ! = {
+/// return 123
+/// };
+/// # }
+/// ```
+///
+/// Although the `let` is pointless here, it illustrates the meaning of `!`. Since `x` is never
+/// assigned a value (because `return` returns from the entire function), `x` can be given type
+/// `!`. We could also replace `return 123` with a `panic!` or a never-ending `loop` and this code
+/// would still be valid.
+///
+/// A more realistic usage of `!` is in this code:
+///
+/// ```
+/// # fn get_a_number() -> Option<u32> { None }
+/// # loop {
+/// let num: u32 = match get_a_number() {
+/// Some(num) => num,
+/// None => break,
+/// };
+/// # }
+/// ```
+///
+/// Both match arms must produce values of type [`u32`], but since `break` never produces a value
+/// at all we know it can never produce a value which isn't a [`u32`]. This illustrates another
+/// behaviour of the `!` type - expressions with type `!` will coerce into any other type.
+///
+/// [`u32`]: prim@u32
+#[doc = concat!("[`exit`]: ", include_str!("../primitive_docs/process_exit.md"))]
+///
+/// # `!` and generics
+///
+/// ## Infallible errors
+///
+/// The main place you'll see `!` used explicitly is in generic code. Consider the [`FromStr`]
+/// trait:
+///
+/// ```
+/// trait FromStr: Sized {
+/// type Err;
+/// fn from_str(s: &str) -> Result<Self, Self::Err>;
+/// }
+/// ```
+///
+/// When implementing this trait for [`String`] we need to pick a type for [`Err`]. And since
+/// converting a string into a string will never result in an error, the appropriate type is `!`.
+/// (Currently the type actually used is an enum with no variants, though this is only because `!`
+/// was added to Rust at a later date and it may change in the future.) With an [`Err`] type of
+/// `!`, if we have to call [`String::from_str`] for some reason the result will be a
+/// [`Result<String, !>`] which we can unpack like this:
+///
+/// ```
+/// #![feature(exhaustive_patterns)]
+/// use std::str::FromStr;
+/// let Ok(s) = String::from_str("hello");
+/// ```
+///
+/// Since the [`Err`] variant contains a `!`, it can never occur. If the `exhaustive_patterns`
+/// feature is present this means we can exhaustively match on [`Result<T, !>`] by just taking the
+/// [`Ok`] variant. This illustrates another behaviour of `!` - it can be used to "delete" certain
+/// enum variants from generic types like `Result`.
+///
+/// ## Infinite loops
+///
+/// While [`Result<T, !>`] is very useful for removing errors, `!` can also be used to remove
+/// successes as well. If we think of [`Result<T, !>`] as "if this function returns, it has not
+/// errored," we get a very intuitive idea of [`Result<!, E>`] as well: if the function returns, it
+/// *has* errored.
+///
+/// For example, consider the case of a simple web server, which can be simplified to:
+///
+/// ```ignore (hypothetical-example)
+/// loop {
+/// let (client, request) = get_request().expect("disconnected");
+/// let response = request.process();
+/// response.send(client);
+/// }
+/// ```
+///
+/// Currently, this isn't ideal, because we simply panic whenever we fail to get a new connection.
+/// Instead, we'd like to keep track of this error, like this:
+///
+/// ```ignore (hypothetical-example)
+/// loop {
+/// match get_request() {
+/// Err(err) => break err,
+/// Ok((client, request)) => {
+/// let response = request.process();
+/// response.send(client);
+/// },
+/// }
+/// }
+/// ```
+///
+/// Now, when the server disconnects, we exit the loop with an error instead of panicking. While it
+/// might be intuitive to simply return the error, we might want to wrap it in a [`Result<!, E>`]
+/// instead:
+///
+/// ```ignore (hypothetical-example)
+/// fn server_loop() -> Result<!, ConnectionError> {
+/// loop {
+/// let (client, request) = get_request()?;
+/// let response = request.process();
+/// response.send(client);
+/// }
+/// }
+/// ```
+///
+/// Now, we can use `?` instead of `match`, and the return type makes a lot more sense: if the loop
+/// ever stops, it means that an error occurred. We don't even have to wrap the loop in an `Ok`
+/// because `!` coerces to `Result<!, ConnectionError>` automatically.
+///
+/// [`String::from_str`]: str::FromStr::from_str
+#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+/// [`FromStr`]: str::FromStr
+///
+/// # `!` and traits
+///
+/// When writing your own traits, `!` should have an `impl` whenever there is an obvious `impl`
+/// which doesn't `panic!`. The reason is that functions returning an `impl Trait` where `!`
+/// does not have an `impl` of `Trait` cannot diverge as their only possible code path. In other
+/// words, they can't return `!` from every code path. As an example, this code doesn't compile:
+///
+/// ```compile_fail
+/// use std::ops::Add;
+///
+/// fn foo() -> impl Add<u32> {
+/// unimplemented!()
+/// }
+/// ```
+///
+/// But this code does:
+///
+/// ```
+/// use std::ops::Add;
+///
+/// fn foo() -> impl Add<u32> {
+/// if true {
+/// unimplemented!()
+/// } else {
+/// 0
+/// }
+/// }
+/// ```
+///
+/// The reason is that, in the first example, there are many possible types that `!` could coerce
+/// to, because many types implement `Add<u32>`. However, in the second example,
+/// the `else` branch returns a `0`, which the compiler infers from the return type to be of type
+/// `u32`. Since `u32` is a concrete type, `!` can and will be coerced to it. See issue [#36375]
+/// for more information on this quirk of `!`.
+///
+/// [#36375]: https://github.com/rust-lang/rust/issues/36375
+///
+/// As it turns out, though, most traits can have an `impl` for `!`. Take [`Debug`]
+/// for example:
+///
+/// ```
+/// #![feature(never_type)]
+/// # use std::fmt;
+/// # trait Debug {
+/// # fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result;
+/// # }
+/// impl Debug for ! {
+/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// Once again we're using `!`'s ability to coerce into any other type, in this case
+/// [`fmt::Result`]. Since this method takes a `&!` as an argument we know that it can never be
+/// called (because there is no value of type `!` for it to be called with). Writing `*self`
+/// essentially tells the compiler "We know that this code can never be run, so just treat the
+/// entire function body as having type [`fmt::Result`]". This pattern can be used a lot when
+/// implementing traits for `!`. Generally, any trait which only has methods which take a `self`
+/// parameter should have such an impl.
+///
+/// On the other hand, one trait which would not be appropriate to implement is [`Default`]:
+///
+/// ```
+/// trait Default {
+/// fn default() -> Self;
+/// }
+/// ```
+///
+/// Since `!` has no values, it has no default value either. It's true that we could write an
+/// `impl` for this which simply panics, but the same is true for any type (we could `impl
+/// Default` for (eg.) [`File`] by just making [`default()`] panic.)
+///
+#[doc = concat!("[`File`]: ", include_str!("../primitive_docs/fs_file.md"))]
+/// [`Debug`]: fmt::Debug
+/// [`default()`]: Default::default
+///
+#[unstable(feature = "never_type", issue = "35121")]
+mod prim_never {}
+
+#[doc(primitive = "char")]
+#[allow(rustdoc::invalid_rust_codeblocks)]
+/// A character type.
+///
+/// The `char` type represents a single character. More specifically, since
+/// 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
+/// scalar value]'.
+///
+/// This documentation describes a number of methods and trait implementations on the
+/// `char` type. For technical reasons, there is additional, separate
+/// documentation in [the `std::char` module](char/index.html) as well.
+///
+/// # Validity
+///
+/// A `char` is a '[Unicode scalar value]', which is any '[Unicode code point]'
+/// other than a [surrogate code point]. This has a fixed numerical definition:
+/// code points are in the range 0 to 0x10FFFF, inclusive.
+/// Surrogate code points, used by UTF-16, are in the range 0xD800 to 0xDFFF.
+///
+/// No `char` may be constructed, whether as a literal or at runtime, that is not a
+/// Unicode scalar value:
+///
+/// ```compile_fail
+/// // Each of these is a compiler error
+/// ['\u{D800}', '\u{DFFF}', '\u{110000}'];
+/// ```
+///
+/// ```should_panic
+/// // Panics; from_u32 returns None.
+/// char::from_u32(0xDE01).unwrap();
+/// ```
+///
+/// ```no_run
+/// // Undefined behaviour
+/// unsafe { char::from_u32_unchecked(0x110000) };
+/// ```
+///
+/// USVs are also the exact set of values that may be encoded in UTF-8. Because
+/// `char` values are USVs and `str` values are valid UTF-8, it is safe to store
+/// any `char` in a `str` or read any character from a `str` as a `char`.
+///
+/// The gap in valid `char` values is understood by the compiler, so in the
+/// below example the two ranges are understood to cover the whole range of
+/// possible `char` values and there is no error for a [non-exhaustive match].
+///
+/// ```
+/// let c: char = 'a';
+/// match c {
+/// '\0' ..= '\u{D7FF}' => false,
+/// '\u{E000}' ..= '\u{10FFFF}' => true,
+/// };
+/// ```
+///
+/// All USVs are valid `char` values, but not all of them represent a real
+/// character. Many USVs are not currently assigned to a character, but may be
+/// in the future ("reserved"); some will never be a character
+/// ("noncharacters"); and some may be given different meanings by different
+/// users ("private use").
+///
+/// [Unicode code point]: https://www.unicode.org/glossary/#code_point
+/// [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
+/// [non-exhaustive match]: ../book/ch06-02-match.html#matches-are-exhaustive
+/// [surrogate code point]: https://www.unicode.org/glossary/#surrogate_code_point
+///
+/// # Representation
+///
+/// `char` is always four bytes in size. This is a different representation than
+/// a given character would have as part of a [`String`]. For example:
+///
+/// ```
+/// let v = vec!['h', 'e', 'l', 'l', 'o'];
+///
+/// // five elements times four bytes for each element
+/// assert_eq!(20, v.len() * std::mem::size_of::<char>());
+///
+/// let s = String::from("hello");
+///
+/// // five elements times one byte per element
+/// assert_eq!(5, s.len() * std::mem::size_of::<u8>());
+/// ```
+///
+#[doc = concat!("[`String`]: ", include_str!("../primitive_docs/string_string.md"))]
+///
+/// As always, remember that a human intuition for 'character' might not map to
+/// Unicode's definitions. For example, despite looking similar, the 'é'
+/// character is one Unicode code point while 'é' is two Unicode code points:
+///
+/// ```
+/// let mut chars = "é".chars();
+/// // U+00e9: 'latin small letter e with acute'
+/// assert_eq!(Some('\u{00e9}'), chars.next());
+/// assert_eq!(None, chars.next());
+///
+/// let mut chars = "é".chars();
+/// // U+0065: 'latin small letter e'
+/// assert_eq!(Some('\u{0065}'), chars.next());
+/// // U+0301: 'combining acute accent'
+/// assert_eq!(Some('\u{0301}'), chars.next());
+/// assert_eq!(None, chars.next());
+/// ```
+///
+/// This means that the contents of the first string above _will_ fit into a
+/// `char` while the contents of the second string _will not_. Trying to create
+/// a `char` literal with the contents of the second string gives an error:
+///
+/// ```text
+/// error: character literal may only contain one codepoint: 'é'
+/// let c = 'é';
+/// ^^^
+/// ```
+///
+/// Another implication of the 4-byte fixed size of a `char` is that
+/// per-`char` processing can end up using a lot more memory:
+///
+/// ```
+/// let s = String::from("love: ❤️");
+/// let v: Vec<char> = s.chars().collect();
+///
+/// assert_eq!(12, std::mem::size_of_val(&s[..]));
+/// assert_eq!(32, std::mem::size_of_val(&v[..]));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_char {}
+
+#[doc(primitive = "unit")]
+#[doc(alias = "(")]
+#[doc(alias = ")")]
+#[doc(alias = "()")]
+//
+/// The `()` type, also called "unit".
+///
+/// The `()` type has exactly one value `()`, and is used when there
+/// is no other meaningful value that could be returned. `()` is most
+/// commonly seen implicitly: functions without a `-> ...` implicitly
+/// have return type `()`, that is, these are equivalent:
+///
+/// ```rust
+/// fn long() -> () {}
+///
+/// fn short() {}
+/// ```
+///
+/// The semicolon `;` can be used to discard the result of an
+/// expression at the end of a block, making the expression (and thus
+/// the block) evaluate to `()`. For example,
+///
+/// ```rust
+/// fn returns_i64() -> i64 {
+/// 1i64
+/// }
+/// fn returns_unit() {
+/// 1i64;
+/// }
+///
+/// let is_i64 = {
+/// returns_i64()
+/// };
+/// let is_unit = {
+/// returns_i64();
+/// };
+/// ```
+///
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_unit {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+impl () {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for () {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Copy for () {
+ // empty
+}
+
+#[doc(primitive = "pointer")]
+#[doc(alias = "ptr")]
+#[doc(alias = "*")]
+#[doc(alias = "*const")]
+#[doc(alias = "*mut")]
+//
+/// Raw, unsafe pointers, `*const T`, and `*mut T`.
+///
+/// *[See also the `std::ptr` module](ptr).*
+///
+/// Working with raw pointers in Rust is uncommon, typically limited to a few patterns.
+/// Raw pointers can be unaligned or [`null`]. However, when a raw pointer is
+/// dereferenced (using the `*` operator), it must be non-null and aligned.
+///
+/// Storing through a raw pointer using `*ptr = data` calls `drop` on the old value, so
+/// [`write`] must be used if the type has drop glue and memory is not already
+/// initialized - otherwise `drop` would be called on the uninitialized memory.
+///
+/// Use the [`null`] and [`null_mut`] functions to create null pointers, and the
+/// [`is_null`] method of the `*const T` and `*mut T` types to check for null.
+/// The `*const T` and `*mut T` types also define the [`offset`] method, for
+/// pointer math.
+///
+/// # Common ways to create raw pointers
+///
+/// ## 1. Coerce a reference (`&T`) or mutable reference (`&mut T`).
+///
+/// ```
+/// let my_num: i32 = 10;
+/// let my_num_ptr: *const i32 = &my_num;
+/// let mut my_speed: i32 = 88;
+/// let my_speed_ptr: *mut i32 = &mut my_speed;
+/// ```
+///
+/// To get a pointer to a boxed value, dereference the box:
+///
+/// ```
+/// let my_num: Box<i32> = Box::new(10);
+/// let my_num_ptr: *const i32 = &*my_num;
+/// let mut my_speed: Box<i32> = Box::new(88);
+/// let my_speed_ptr: *mut i32 = &mut *my_speed;
+/// ```
+///
+/// This does not take ownership of the original allocation
+/// and requires no resource management later,
+/// but you must not use the pointer after its lifetime.
+///
+/// ## 2. Consume a box (`Box<T>`).
+///
+/// The [`into_raw`] function consumes a box and returns
+/// the raw pointer. It doesn't destroy `T` or deallocate any memory.
+///
+/// ```
+/// let my_speed: Box<i32> = Box::new(88);
+/// let my_speed: *mut i32 = Box::into_raw(my_speed);
+///
+/// // By taking ownership of the original `Box<T>` though
+/// // we are obligated to put it together later to be destroyed.
+/// unsafe {
+/// drop(Box::from_raw(my_speed));
+/// }
+/// ```
+///
+/// Note that here the call to [`drop`] is for clarity - it indicates
+/// that we are done with the given value and it should be destroyed.
+///
+/// ## 3. Create it using `ptr::addr_of!`
+///
+/// Instead of coercing a reference to a raw pointer, you can use the macros
+/// [`ptr::addr_of!`] (for `*const T`) and [`ptr::addr_of_mut!`] (for `*mut T`).
+/// These macros allow you to create raw pointers to fields to which you cannot
+/// create a reference (without causing undefined behaviour), such as an
+/// unaligned field. This might be necessary if packed structs or uninitialized
+/// memory is involved.
+///
+/// ```
+/// #[derive(Debug, Default, Copy, Clone)]
+/// #[repr(C, packed)]
+/// struct S {
+/// aligned: u8,
+/// unaligned: u32,
+/// }
+/// let s = S::default();
+/// let p = std::ptr::addr_of!(s.unaligned); // not allowed with coercion
+/// ```
+///
+/// ## 4. Get it from C.
+///
+/// ```
+/// # #![feature(rustc_private)]
+/// extern crate libc;
+///
+/// use std::mem;
+///
+/// unsafe {
+/// let my_num: *mut i32 = libc::malloc(mem::size_of::<i32>()) as *mut i32;
+/// if my_num.is_null() {
+/// panic!("failed to allocate memory");
+/// }
+/// libc::free(my_num as *mut libc::c_void);
+/// }
+/// ```
+///
+/// Usually you wouldn't literally use `malloc` and `free` from Rust,
+/// but C APIs hand out a lot of pointers generally, so are a common source
+/// of raw pointers in Rust.
+///
+/// [`null`]: ptr::null
+/// [`null_mut`]: ptr::null_mut
+/// [`is_null`]: pointer::is_null
+/// [`offset`]: pointer::offset
+#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
+/// [`drop`]: mem::drop
+/// [`write`]: ptr::write
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_pointer {}
+
+#[doc(primitive = "array")]
+#[doc(alias = "[]")]
+#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
+#[doc(alias = "[T; N]")]
+/// A fixed-size array, denoted `[T; N]`, for the element type, `T`, and the
+/// non-negative compile-time constant size, `N`.
+///
+/// There are two syntactic forms for creating an array:
+///
+/// * A list with each element, i.e., `[x, y, z]`.
+/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`.
+/// The type of `x` must be [`Copy`].
+///
+/// Note that `[expr; 0]` is allowed, and produces an empty array.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// Arrays of *any* size implement the following traits if the element type allows it:
+///
+/// - [`Copy`]
+/// - [`Clone`]
+/// - [`Debug`]
+/// - [`IntoIterator`] (implemented for `[T; N]`, `&[T; N]` and `&mut [T; N]`)
+/// - [`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`]
+/// - [`Hash`]
+/// - [`AsRef`], [`AsMut`]
+/// - [`Borrow`], [`BorrowMut`]
+///
+/// Arrays of sizes from 0 to 32 (inclusive) implement the [`Default`] trait
+/// if the element type allows it. As a stopgap, trait implementations are
+/// statically generated up to size 32.
+///
+/// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on
+/// an array. Indeed, this provides most of the API for working with arrays.
+/// Slices have a dynamic size and do not coerce to arrays.
+///
+/// You can move elements out of an array with a [slice pattern]. If you want
+/// one element, see [`mem::replace`].
+///
+/// # Examples
+///
+/// ```
+/// let mut array: [i32; 3] = [0; 3];
+///
+/// array[1] = 1;
+/// array[2] = 2;
+///
+/// assert_eq!([1, 2], &array[1..]);
+///
+/// // This loop prints: 0 1 2
+/// for x in array {
+/// print!("{x} ");
+/// }
+/// ```
+///
+/// You can also iterate over reference to the array's elements:
+///
+/// ```
+/// let array: [i32; 3] = [0; 3];
+///
+/// for x in &array { }
+/// ```
+///
+/// You can use a [slice pattern] to move elements out of an array:
+///
+/// ```
+/// fn move_away(_: String) { /* Do interesting things. */ }
+///
+/// let [john, roa] = ["John".to_string(), "Roa".to_string()];
+/// move_away(john);
+/// move_away(roa);
+/// ```
+///
+/// # Editions
+///
+/// Prior to Rust 1.53, arrays did not implement [`IntoIterator`] by value, so the method call
+/// `array.into_iter()` auto-referenced into a [slice iterator](slice::iter). Right now, the old
+/// behavior is preserved in the 2015 and 2018 editions of Rust for compatibility, ignoring
+/// [`IntoIterator`] by value. In the future, the behavior on the 2015 and 2018 edition
+/// might be made consistent to the behavior of later editions.
+///
+/// ```rust,edition2018
+/// // Rust 2015 and 2018:
+///
+/// # #![allow(array_into_iter)] // override our `deny(warnings)`
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This creates a slice iterator, producing references to each value.
+/// for item in array.into_iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // The `array_into_iter` lint suggests this change for future compatibility:
+/// for item in array.iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // You can explicitly iterate an array by value using `IntoIterator::into_iter`
+/// for item in IntoIterator::into_iter(array).enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// Starting in the 2021 edition, `array.into_iter()` uses `IntoIterator` normally to iterate
+/// by value, and `iter()` should be used to iterate by reference like previous editions.
+///
+/// ```rust,edition2021
+/// // Rust 2021:
+///
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This iterates by reference:
+/// for item in array.iter().enumerate() {
+/// let (i, x): (usize, &i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in array.into_iter().enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// Future language versions might start treating the `array.into_iter()`
+/// syntax on editions 2015 and 2018 the same as on edition 2021. So code using
+/// those older editions should still be written with this change in mind, to
+/// prevent breakage in the future. The safest way to accomplish this is to
+/// avoid the `into_iter` syntax on those editions. If an edition update is not
+/// viable/desired, there are multiple alternatives:
+/// * use `iter`, equivalent to the old behavior, creating references
+/// * use [`IntoIterator::into_iter`], equivalent to the post-2021 behavior (Rust 1.53+)
+/// * replace `for ... in array.into_iter() {` with `for ... in array {`,
+/// equivalent to the post-2021 behavior (Rust 1.53+)
+///
+/// ```rust,edition2018
+/// // Rust 2015 and 2018:
+///
+/// let array: [i32; 3] = [0; 3];
+///
+/// // This iterates by reference:
+/// for item in array.iter() {
+/// let x: &i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in IntoIterator::into_iter(array) {
+/// let x: i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // This iterates by value:
+/// for item in array {
+/// let x: i32 = item;
+/// println!("{x}");
+/// }
+///
+/// // IntoIter can also start a chain.
+/// // This iterates by value:
+/// for item in IntoIterator::into_iter(array).enumerate() {
+/// let (i, x): (usize, i32) = item;
+/// println!("array[{i}] = {x}");
+/// }
+/// ```
+///
+/// [slice]: prim@slice
+/// [`Debug`]: fmt::Debug
+/// [`Hash`]: hash::Hash
+/// [`Borrow`]: borrow::Borrow
+/// [`BorrowMut`]: borrow::BorrowMut
+/// [slice pattern]: ../reference/patterns.html#slice-patterns
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_array {}
+
+#[doc(primitive = "slice")]
+#[doc(alias = "[")]
+#[doc(alias = "]")]
+#[doc(alias = "[]")]
+/// A dynamically-sized view into a contiguous sequence, `[T]`. Contiguous here
+/// means that elements are laid out so that every element is the same
+/// distance from its neighbors.
+///
+/// *[See also the `std::slice` module](crate::slice).*
+///
+/// Slices are a view into a block of memory represented as a pointer and a
+/// length.
+///
+/// ```
+/// // slicing a Vec
+/// let vec = vec![1, 2, 3];
+/// let int_slice = &vec[..];
+/// // coercing an array to a slice
+/// let str_slice: &[&str] = &["one", "two", "three"];
+/// ```
+///
+/// Slices are either mutable or shared. The shared slice type is `&[T]`,
+/// while the mutable slice type is `&mut [T]`, where `T` represents the element
+/// type. For example, you can mutate the block of memory that a mutable slice
+/// points to:
+///
+/// ```
+/// let mut x = [1, 2, 3];
+/// let x = &mut x[..]; // Take a full slice of `x`.
+/// x[1] = 7;
+/// assert_eq!(x, &[1, 7, 3]);
+/// ```
+///
+/// As slices store the length of the sequence they refer to, they have twice
+/// the size of pointers to [`Sized`](marker/trait.Sized.html) types.
+/// Also see the reference on
+/// [dynamically sized types](../reference/dynamically-sized-types.html).
+///
+/// ```
+/// # use std::rc::Rc;
+/// let pointer_size = std::mem::size_of::<&u8>();
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<&[u8]>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<*const [u8]>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<Box<[u8]>>());
+/// assert_eq!(2 * pointer_size, std::mem::size_of::<Rc<[u8]>>());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_slice {}
+
+#[doc(primitive = "str")]
+//
+/// String slices.
+///
+/// *[See also the `std::str` module](crate::str).*
+///
+/// The `str` type, also called a 'string slice', is the most primitive string
+/// type. It is usually seen in its borrowed form, `&str`. It is also the type
+/// of string literals, `&'static str`.
+///
+/// String slices are always valid UTF-8.
+///
+/// # Examples
+///
+/// String literals are string slices:
+///
+/// ```
+/// let hello = "Hello, world!";
+///
+/// // with an explicit type annotation
+/// let hello: &'static str = "Hello, world!";
+/// ```
+///
+/// They are `'static` because they're stored directly in the final binary, and
+/// so will be valid for the `'static` duration.
+///
+/// # Representation
+///
+/// A `&str` is made up of two components: a pointer to some bytes, and a
+/// length. You can look at these with the [`as_ptr`] and [`len`] methods:
+///
+/// ```
+/// use std::slice;
+/// use std::str;
+///
+/// let story = "Once upon a time...";
+///
+/// let ptr = story.as_ptr();
+/// let len = story.len();
+///
+/// // story has nineteen bytes
+/// assert_eq!(19, len);
+///
+/// // We can re-build a str out of ptr and len. This is all unsafe because
+/// // we are responsible for making sure the two components are valid:
+/// let s = unsafe {
+/// // First, we build a &[u8]...
+/// let slice = slice::from_raw_parts(ptr, len);
+///
+/// // ... and then convert that slice into a string slice
+/// str::from_utf8(slice)
+/// };
+///
+/// assert_eq!(s, Ok(story));
+/// ```
+///
+/// [`as_ptr`]: str::as_ptr
+/// [`len`]: str::len
+///
+/// Note: This example shows the internals of `&str`. `unsafe` should not be
+/// used to get a string slice under normal circumstances. Use `as_str`
+/// instead.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_str {}
+
+#[doc(primitive = "tuple")]
+#[doc(alias = "(")]
+#[doc(alias = ")")]
+#[doc(alias = "()")]
+//
+/// A finite heterogeneous sequence, `(T, U, ..)`.
+///
+/// Let's cover each of those in turn:
+///
+/// Tuples are *finite*. In other words, a tuple has a length. Here's a tuple
+/// of length `3`:
+///
+/// ```
+/// ("hello", 5, 'c');
+/// ```
+///
+/// 'Length' is also sometimes called 'arity' here; each tuple of a different
+/// length is a different, distinct type.
+///
+/// Tuples are *heterogeneous*. This means that each element of the tuple can
+/// have a different type. In that tuple above, it has the type:
+///
+/// ```
+/// # let _:
+/// (&'static str, i32, char)
+/// # = ("hello", 5, 'c');
+/// ```
+///
+/// Tuples are a *sequence*. This means that they can be accessed by position;
+/// this is called 'tuple indexing', and it looks like this:
+///
+/// ```rust
+/// let tuple = ("hello", 5, 'c');
+///
+/// assert_eq!(tuple.0, "hello");
+/// assert_eq!(tuple.1, 5);
+/// assert_eq!(tuple.2, 'c');
+/// ```
+///
+/// The sequential nature of the tuple applies to its implementations of various
+/// traits. For example, in [`PartialOrd`] and [`Ord`], the elements are compared
+/// sequentially until the first non-equal set is found.
+///
+/// For more about tuples, see [the book](../book/ch03-02-data-types.html#the-tuple-type).
+///
+// Hardcoded anchor in src/librustdoc/html/format.rs
+// linked to as `#trait-implementations-1`
+/// # Trait implementations
+///
+/// In this documentation the shorthand `(T₁, T₂, …, Tₙ)` is used to represent tuples of varying
+/// length. When that is used, any trait bound expressed on `T` applies to each element of the
+/// tuple independently. Note that this is a convenience notation to avoid repetitive
+/// documentation, not valid Rust syntax.
+///
+/// Due to a temporary restriction in Rust’s type system, the following traits are only
+/// implemented on tuples of arity 12 or less. In the future, this may change:
+///
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`Debug`]
+/// * [`Default`]
+/// * [`Hash`]
+///
+/// [`Debug`]: fmt::Debug
+/// [`Hash`]: hash::Hash
+///
+/// The following traits are implemented for tuples of any length. These traits have
+/// implementations that are automatically generated by the compiler, so are not limited by
+/// missing language features.
+///
+/// * [`Clone`]
+/// * [`Copy`]
+/// * [`Send`]
+/// * [`Sync`]
+/// * [`Unpin`]
+/// * [`UnwindSafe`]
+/// * [`RefUnwindSafe`]
+///
+/// [`Unpin`]: marker::Unpin
+/// [`UnwindSafe`]: panic::UnwindSafe
+/// [`RefUnwindSafe`]: panic::RefUnwindSafe
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let tuple = ("hello", 5, 'c');
+///
+/// assert_eq!(tuple.0, "hello");
+/// ```
+///
+/// Tuples are often used as a return type when you want to return more than
+/// one value:
+///
+/// ```
+/// fn calculate_point() -> (i32, i32) {
+/// // Don't do a calculation, that's not the point of the example
+/// (4, 5)
+/// }
+///
+/// let point = calculate_point();
+///
+/// assert_eq!(point.0, 4);
+/// assert_eq!(point.1, 5);
+///
+/// // Combining this with patterns can be nicer.
+///
+/// let (x, y) = calculate_point();
+///
+/// assert_eq!(x, 4);
+/// assert_eq!(y, 5);
+/// ```
+///
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_tuple {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+impl<T> (T,) {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on arbitrary-length tuples.
+impl<T: Clone> Clone for (T,) {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on arbitrary-length tuples.
+impl<T: Copy> Copy for (T,) {
+ // empty
+}
+
+#[doc(primitive = "f32")]
+/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
+///
+/// This type can represent a wide range of decimal numbers, like `3.5`, `27`,
+/// `-113.75`, `0.0078125`, `34359738368`, `0`, `-1`. So unlike integer types
+/// (such as `i32`), floating point types can represent non-integer numbers,
+/// too.
+///
+/// However, being able to represent this wide range of numbers comes at the
+/// cost of precision: floats can only represent some of the real numbers and
+/// calculation with floats round to a nearby representable number. For example,
+/// `5.0` and `1.0` can be exactly represented as `f32`, but `1.0 / 5.0` results
+/// in `0.20000000298023223876953125` since `0.2` cannot be exactly represented
+/// as `f32`. Note, however, that printing floats with `println` and friends will
+/// often discard insignificant digits: `println!("{}", 1.0f32 / 5.0f32)` will
+/// print `0.2`.
+///
+/// Additionally, `f32` can represent some special values:
+///
+/// - −0.0: IEEE 754 floating point numbers have a bit that indicates their sign, so −0.0 is a
+/// possible value. For comparison −0.0 = +0.0, but floating point operations can carry
+/// the sign bit through arithmetic operations. This means −0.0 × +0.0 produces −0.0 and
+/// a negative number rounded to a value smaller than a float can represent also produces −0.0.
+/// - [∞](#associatedconstant.INFINITY) and
+/// [−∞](#associatedconstant.NEG_INFINITY): these result from calculations
+/// like `1.0 / 0.0`.
+/// - [NaN (not a number)](#associatedconstant.NAN): this value results from
+/// calculations like `(-1.0).sqrt()`. NaN has some potentially unexpected
+/// behavior:
+/// - It is unequal to any float, including itself! This is the reason `f32`
+/// doesn't implement the `Eq` trait.
+/// - It is also neither smaller nor greater than any float, making it
+/// impossible to sort by the default comparison operation, which is the
+/// reason `f32` doesn't implement the `Ord` trait.
+/// - It is also considered *infectious* as almost all calculations where one
+/// of the operands is NaN will also result in NaN. The explanations on this
+/// page only explicitly document behavior on NaN operands if this default
+/// is deviated from.
+/// - Lastly, there are multiple bit patterns that are considered NaN.
+/// Rust does not currently guarantee that the bit patterns of NaN are
+/// preserved over arithmetic operations, and they are not guaranteed to be
+/// portable or even fully deterministic! This means that there may be some
+/// surprising results upon inspecting the bit patterns,
+/// as the same calculations might produce NaNs with different bit patterns.
+///
+/// When the number resulting from a primitive operation (addition,
+/// subtraction, multiplication, or division) on this type is not exactly
+/// representable as `f32`, it is rounded according to the roundTiesToEven
+/// direction defined in IEEE 754-2008. That means:
+///
+/// - The result is the representable value closest to the true value, if there
+/// is a unique closest representable value.
+/// - If the true value is exactly half-way between two representable values,
+/// the result is the one with an even least-significant binary digit.
+/// - If the true value's magnitude is ≥ `f32::MAX` + 2<sup>(`f32::MAX_EXP` −
+/// `f32::MANTISSA_DIGITS` − 1)</sup>, the result is ∞ or −∞ (preserving the
+/// true value's sign).
+///
+/// For more information on floating point numbers, see [Wikipedia][wikipedia].
+///
+/// *[See also the `std::f32::consts` module](crate::f32::consts).*
+///
+/// [wikipedia]: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_f32 {}
+
+#[doc(primitive = "f64")]
+/// A 64-bit floating point type (specifically, the "binary64" type defined in IEEE 754-2008).
+///
+/// This type is very similar to [`f32`], but has increased
+/// precision by using twice as many bits. Please see [the documentation for
+/// `f32`][`f32`] or [Wikipedia on double precision
+/// values][wikipedia] for more information.
+///
+/// *[See also the `std::f64::consts` module](crate::f64::consts).*
+///
+/// [`f32`]: prim@f32
+/// [wikipedia]: https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_f64 {}
+
+#[doc(primitive = "i8")]
+//
+/// The 8-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i8 {}
+
+#[doc(primitive = "i16")]
+//
+/// The 16-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i16 {}
+
+#[doc(primitive = "i32")]
+//
+/// The 32-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i32 {}
+
+#[doc(primitive = "i64")]
+//
+/// The 64-bit signed integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_i64 {}
+
+#[doc(primitive = "i128")]
+//
+/// The 128-bit signed integer type.
+#[stable(feature = "i128", since = "1.26.0")]
+mod prim_i128 {}
+
+#[doc(primitive = "u8")]
+//
+/// The 8-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u8 {}
+
+#[doc(primitive = "u16")]
+//
+/// The 16-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u16 {}
+
+#[doc(primitive = "u32")]
+//
+/// The 32-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u32 {}
+
+#[doc(primitive = "u64")]
+//
+/// The 64-bit unsigned integer type.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_u64 {}
+
+#[doc(primitive = "u128")]
+//
+/// The 128-bit unsigned integer type.
+#[stable(feature = "i128", since = "1.26.0")]
+mod prim_u128 {}
+
+#[doc(primitive = "isize")]
+//
+/// The pointer-sized signed integer type.
+///
+/// The size of this primitive is how many bytes it takes to reference any
+/// location in memory. For example, on a 32 bit target, this is 4 bytes
+/// and on a 64 bit target, this is 8 bytes.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_isize {}
+
+#[doc(primitive = "usize")]
+//
+/// The pointer-sized unsigned integer type.
+///
+/// The size of this primitive is how many bytes it takes to reference any
+/// location in memory. For example, on a 32 bit target, this is 4 bytes
+/// and on a 64 bit target, this is 8 bytes.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_usize {}
+
+#[doc(primitive = "reference")]
+#[doc(alias = "&")]
+#[doc(alias = "&mut")]
+//
+/// References, both shared and mutable.
+///
+/// A reference represents a borrow of some owned value. You can get one by using the `&` or `&mut`
+/// operators on a value, or by using a [`ref`](../std/keyword.ref.html) or
+/// <code>[ref](../std/keyword.ref.html) [mut](../std/keyword.mut.html)</code> pattern.
+///
+/// For those familiar with pointers, a reference is just a pointer that is assumed to be
+/// aligned, not null, and pointing to memory containing a valid value of `T` - for example,
+/// <code>&[bool]</code> can only point to an allocation containing the integer values `1`
+/// ([`true`](../std/keyword.true.html)) or `0` ([`false`](../std/keyword.false.html)), but
+/// creating a <code>&[bool]</code> that points to an allocation containing
+/// the value `3` causes undefined behaviour.
+/// In fact, <code>[Option]\<&T></code> has the same memory representation as a
+/// nullable but aligned pointer, and can be passed across FFI boundaries as such.
+///
+/// In most cases, references can be used much like the original value. Field access, method
+/// calling, and indexing work the same (save for mutability rules, of course). In addition, the
+/// comparison operators transparently defer to the referent's implementation, allowing references
+/// to be compared the same as owned values.
+///
+/// References have a lifetime attached to them, which represents the scope for which the borrow is
+/// valid. A lifetime is said to "outlive" another one if its representative scope is as long or
+/// longer than the other. The `'static` lifetime is the longest lifetime, which represents the
+/// total life of the program. For example, string literals have a `'static` lifetime because the
+/// text data is embedded into the binary of the program, rather than in an allocation that needs
+/// to be dynamically managed.
+///
+/// `&mut T` references can be freely coerced into `&T` references with the same referent type, and
+/// references with longer lifetimes can be freely coerced into references with shorter ones.
+///
+/// Reference equality by address, instead of comparing the values pointed to, is accomplished via
+/// implicit reference-pointer coercion and raw pointer equality via [`ptr::eq`], while
+/// [`PartialEq`] compares values.
+///
+/// ```
+/// use std::ptr;
+///
+/// let five = 5;
+/// let other_five = 5;
+/// let five_ref = &five;
+/// let same_five_ref = &five;
+/// let other_five_ref = &other_five;
+///
+/// assert!(five_ref == same_five_ref);
+/// assert!(five_ref == other_five_ref);
+///
+/// assert!(ptr::eq(five_ref, same_five_ref));
+/// assert!(!ptr::eq(five_ref, other_five_ref));
+/// ```
+///
+/// For more information on how to use references, see [the book's section on "References and
+/// Borrowing"][book-refs].
+///
+/// [book-refs]: ../book/ch04-02-references-and-borrowing.html
+///
+/// # Trait implementations
+///
+/// The following traits are implemented for all `&T`, regardless of the type of its referent:
+///
+/// * [`Copy`]
+/// * [`Clone`] \(Note that this will not defer to `T`'s `Clone` implementation if it exists!)
+/// * [`Deref`]
+/// * [`Borrow`]
+/// * [`fmt::Pointer`]
+///
+/// [`Deref`]: ops::Deref
+/// [`Borrow`]: borrow::Borrow
+///
+/// `&mut T` references get all of the above except `Copy` and `Clone` (to prevent creating
+/// multiple simultaneous mutable borrows), plus the following, regardless of the type of its
+/// referent:
+///
+/// * [`DerefMut`]
+/// * [`BorrowMut`]
+///
+/// [`DerefMut`]: ops::DerefMut
+/// [`BorrowMut`]: borrow::BorrowMut
+/// [bool]: prim@bool
+///
+/// The following traits are implemented on `&T` references if the underlying `T` also implements
+/// that trait:
+///
+/// * All the traits in [`std::fmt`] except [`fmt::Pointer`] (which is implemented regardless of the type of its referent) and [`fmt::Write`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`AsRef`]
+/// * [`Fn`] \(in addition, `&T` references get [`FnMut`] and [`FnOnce`] if `T: Fn`)
+/// * [`Hash`]
+/// * [`ToSocketAddrs`]
+/// * [`Send`] \(`&T` references also require <code>T: [Sync]</code>)
+///
+/// [`std::fmt`]: fmt
+/// [`Hash`]: hash::Hash
+#[doc = concat!("[`ToSocketAddrs`]: ", include_str!("../primitive_docs/net_tosocketaddrs.md"))]
+///
+/// `&mut T` references get all of the above except `ToSocketAddrs`, plus the following, if `T`
+/// implements that trait:
+///
+/// * [`AsMut`]
+/// * [`FnMut`] \(in addition, `&mut T` references get [`FnOnce`] if `T: FnMut`)
+/// * [`fmt::Write`]
+/// * [`Iterator`]
+/// * [`DoubleEndedIterator`]
+/// * [`ExactSizeIterator`]
+/// * [`FusedIterator`]
+/// * [`TrustedLen`]
+/// * [`io::Write`]
+/// * [`Read`]
+/// * [`Seek`]
+/// * [`BufRead`]
+///
+/// [`FusedIterator`]: iter::FusedIterator
+/// [`TrustedLen`]: iter::TrustedLen
+#[doc = concat!("[`Seek`]: ", include_str!("../primitive_docs/io_seek.md"))]
+#[doc = concat!("[`BufRead`]: ", include_str!("../primitive_docs/io_bufread.md"))]
+#[doc = concat!("[`Read`]: ", include_str!("../primitive_docs/io_read.md"))]
+#[doc = concat!("[`io::Write`]: ", include_str!("../primitive_docs/io_write.md"))]
+///
+/// Note that due to method call deref coercion, simply calling a trait method will act like they
+/// work on references as well as they do on owned values! The implementations described here are
+/// meant for generic contexts, where the final type `T` is a type parameter or otherwise not
+/// locally known.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_ref {}
+
+#[doc(primitive = "fn")]
+//
+/// Function pointers, like `fn(usize) -> bool`.
+///
+/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].*
+///
+/// [`Fn`]: ops::Fn
+/// [`FnMut`]: ops::FnMut
+/// [`FnOnce`]: ops::FnOnce
+///
+/// Function pointers are pointers that point to *code*, not data. They can be called
+/// just like functions. Like references, function pointers are, among other things, assumed to
+/// not be null, so if you want to pass a function pointer over FFI and be able to accommodate null
+/// pointers, make your type [`Option<fn()>`](core::option#options-and-pointers-nullable-pointers)
+/// with your required signature.
+///
+/// ### Safety
+///
+/// Plain function pointers are obtained by casting either plain functions, or closures that don't
+/// capture an environment:
+///
+/// ```
+/// fn add_one(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// let ptr: fn(usize) -> usize = add_one;
+/// assert_eq!(ptr(5), 6);
+///
+/// let clos: fn(usize) -> usize = |x| x + 5;
+/// assert_eq!(clos(5), 10);
+/// ```
+///
+/// In addition to varying based on their signature, function pointers come in two flavors: safe
+/// and unsafe. Plain `fn()` function pointers can only point to safe functions,
+/// while `unsafe fn()` function pointers can point to safe or unsafe functions.
+///
+/// ```
+/// fn add_one(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// unsafe fn add_one_unsafely(x: usize) -> usize {
+/// x + 1
+/// }
+///
+/// let safe_ptr: fn(usize) -> usize = add_one;
+///
+/// //ERROR: mismatched types: expected normal fn, found unsafe fn
+/// //let bad_ptr: fn(usize) -> usize = add_one_unsafely;
+///
+/// let unsafe_ptr: unsafe fn(usize) -> usize = add_one_unsafely;
+/// let really_safe_ptr: unsafe fn(usize) -> usize = add_one;
+/// ```
+///
+/// ### ABI
+///
+/// On top of that, function pointers can vary based on what ABI they use. This
+/// is achieved by adding the `extern` keyword before the type, followed by the
+/// ABI in question. The default ABI is "Rust", i.e., `fn()` is the exact same
+/// type as `extern "Rust" fn()`. A pointer to a function with C ABI would have
+/// type `extern "C" fn()`.
+///
+/// `extern "ABI" { ... }` blocks declare functions with ABI "ABI". The default
+/// here is "C", i.e., functions declared in an `extern {...}` block have "C"
+/// ABI.
+///
+/// For more information and a list of supported ABIs, see [the nomicon's
+/// section on foreign calling conventions][nomicon-abi].
+///
+/// [nomicon-abi]: ../nomicon/ffi.html#foreign-calling-conventions
+///
+/// ### Variadic functions
+///
+/// Extern function declarations with the "C" or "cdecl" ABIs can also be *variadic*, allowing them
+/// to be called with a variable number of arguments. Normal Rust functions, even those with an
+/// `extern "ABI"`, cannot be variadic. For more information, see [the nomicon's section on
+/// variadic functions][nomicon-variadic].
+///
+/// [nomicon-variadic]: ../nomicon/ffi.html#variadic-functions
+///
+/// ### Creating function pointers
+///
+/// When `bar` is the name of a function, then the expression `bar` is *not* a
+/// function pointer. Rather, it denotes a value of an unnameable type that
+/// uniquely identifies the function `bar`. The value is zero-sized because the
+/// type already identifies the function. This has the advantage that "calling"
+/// the value (it implements the `Fn*` traits) does not require dynamic
+/// dispatch.
+///
+/// This zero-sized type *coerces* to a regular function pointer. For example:
+///
+/// ```rust
+/// use std::mem;
+///
+/// fn bar(x: i32) {}
+///
+/// let not_bar_ptr = bar; // `not_bar_ptr` is zero-sized, uniquely identifying `bar`
+/// assert_eq!(mem::size_of_val(&not_bar_ptr), 0);
+///
+/// let bar_ptr: fn(i32) = not_bar_ptr; // force coercion to function pointer
+/// assert_eq!(mem::size_of_val(&bar_ptr), mem::size_of::<usize>());
+///
+/// let footgun = &bar; // this is a shared reference to the zero-sized type identifying `bar`
+/// ```
+///
+/// The last line shows that `&bar` is not a function pointer either. Rather, it
+/// is a reference to the function-specific ZST. `&bar` is basically never what you
+/// want when `bar` is a function.
+///
+/// ### Casting to and from integers
+///
+/// You cast function pointers directly to integers:
+///
+/// ```rust
+/// let fnptr: fn(i32) -> i32 = |x| x+2;
+/// let fnptr_addr = fnptr as usize;
+/// ```
+///
+/// However, a direct cast back is not possible. You need to use `transmute`:
+///
+/// ```rust
+/// # let fnptr: fn(i32) -> i32 = |x| x+2;
+/// # let fnptr_addr = fnptr as usize;
+/// let fnptr = fnptr_addr as *const ();
+/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
+/// assert_eq!(fnptr(40), 42);
+/// ```
+///
+/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
+/// This avoids an integer-to-pointer `transmute`, which can be problematic.
+/// Transmuting between raw pointers and function pointers (i.e., two pointer types) is fine.
+///
+/// Note that all of this is not portable to platforms where function pointers and data pointers
+/// have different sizes.
+///
+/// ### Trait implementations
+///
+/// In this documentation the shorthand `fn (T₁, T₂, …, Tₙ)` is used to represent non-variadic
+/// function pointers of varying length. Note that this is a convenience notation to avoid
+/// repetitive documentation, not valid Rust syntax.
+///
+/// Due to a temporary restriction in Rust's type system, these traits are only implemented on
+/// functions that take 12 arguments or less, with the `"Rust"` and `"C"` ABIs. In the future, this
+/// may change:
+///
+/// * [`PartialEq`]
+/// * [`Eq`]
+/// * [`PartialOrd`]
+/// * [`Ord`]
+/// * [`Hash`]
+/// * [`Pointer`]
+/// * [`Debug`]
+///
+/// The following traits are implemented for function pointers with any number of arguments and
+/// any ABI. These traits have implementations that are automatically generated by the compiler,
+/// so are not limited by missing language features:
+///
+/// * [`Clone`]
+/// * [`Copy`]
+/// * [`Send`]
+/// * [`Sync`]
+/// * [`Unpin`]
+/// * [`UnwindSafe`]
+/// * [`RefUnwindSafe`]
+///
+/// [`Hash`]: hash::Hash
+/// [`Pointer`]: fmt::Pointer
+/// [`UnwindSafe`]: panic::UnwindSafe
+/// [`RefUnwindSafe`]: panic::RefUnwindSafe
+///
+/// In addition, all *safe* function pointers implement [`Fn`], [`FnMut`], and [`FnOnce`], because
+/// these traits are specially known to the compiler.
+#[stable(feature = "rust1", since = "1.0.0")]
+mod prim_fn {}
+
+// Required to make auto trait impls render.
+// See src/librustdoc/passes/collect_trait_impls.rs:collect_trait_impls
+#[doc(hidden)]
+#[cfg(not(bootstrap))]
+impl<Ret, T> fn(T) -> Ret {}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on function pointers with any number of arguments.
+impl<Ret, T> Clone for fn(T) -> Ret {
+ fn clone(&self) -> Self {
+ loop {}
+ }
+}
+
+// Fake impl that's only really used for docs.
+#[cfg(doc)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(bootstrap), doc(fake_variadic))]
+/// This trait is implemented on function pointers with any number of arguments.
+impl<Ret, T> Copy for fn(T) -> Ret {
+ // empty
+}
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
new file mode 100644
index 000000000..d6cba7e75
--- /dev/null
+++ b/library/std/src/process.rs
@@ -0,0 +1,2210 @@
+//! A module for working with processes.
+//!
+//! This module is mostly concerned with spawning and interacting with child
+//! processes, but it also provides [`abort`] and [`exit`] for terminating the
+//! current process.
+//!
+//! # Spawning a process
+//!
+//! The [`Command`] struct is used to configure and spawn processes:
+//!
+//! ```no_run
+//! use std::process::Command;
+//!
+//! let output = Command::new("echo")
+//! .arg("Hello world")
+//! .output()
+//! .expect("Failed to execute command");
+//!
+//! assert_eq!(b"Hello world\n", output.stdout.as_slice());
+//! ```
+//!
+//! Several methods on [`Command`], such as [`spawn`] or [`output`], can be used
+//! to spawn a process. In particular, [`output`] spawns the child process and
+//! waits until the process terminates, while [`spawn`] will return a [`Child`]
+//! that represents the spawned child process.
+//!
+//! # Handling I/O
+//!
+//! The [`stdout`], [`stdin`], and [`stderr`] of a child process can be
+//! configured by passing an [`Stdio`] to the corresponding method on
+//! [`Command`]. Once spawned, they can be accessed from the [`Child`]. For
+//! example, piping output from one command into another command can be done
+//! like so:
+//!
+//! ```no_run
+//! use std::process::{Command, Stdio};
+//!
+//! // stdout must be configured with `Stdio::piped` in order to use
+//! // `echo_child.stdout`
+//! let echo_child = Command::new("echo")
+//! .arg("Oh no, a tpyo!")
+//! .stdout(Stdio::piped())
+//! .spawn()
+//! .expect("Failed to start echo process");
+//!
+//! // Note that `echo_child` is moved here, but we won't be needing
+//! // `echo_child` anymore
+//! let echo_out = echo_child.stdout.expect("Failed to open echo stdout");
+//!
+//! let mut sed_child = Command::new("sed")
+//! .arg("s/tpyo/typo/")
+//! .stdin(Stdio::from(echo_out))
+//! .stdout(Stdio::piped())
+//! .spawn()
+//! .expect("Failed to start sed process");
+//!
+//! let output = sed_child.wait_with_output().expect("Failed to wait on sed");
+//! assert_eq!(b"Oh no, a typo!\n", output.stdout.as_slice());
+//! ```
+//!
+//! Note that [`ChildStderr`] and [`ChildStdout`] implement [`Read`] and
+//! [`ChildStdin`] implements [`Write`]:
+//!
+//! ```no_run
+//! use std::process::{Command, Stdio};
+//! use std::io::Write;
+//!
+//! let mut child = Command::new("/bin/cat")
+//! .stdin(Stdio::piped())
+//! .stdout(Stdio::piped())
+//! .spawn()
+//! .expect("failed to execute child");
+//!
+//! // If the child process fills its stdout buffer, it may end up
+//! // waiting until the parent reads the stdout, and not be able to
+//! // read stdin in the meantime, causing a deadlock.
+//! // Writing from another thread ensures that stdout is being read
+//! // at the same time, avoiding the problem.
+//! let mut stdin = child.stdin.take().expect("failed to get stdin");
+//! std::thread::spawn(move || {
+//! stdin.write_all(b"test").expect("failed to write to stdin");
+//! });
+//!
+//! let output = child
+//! .wait_with_output()
+//! .expect("failed to wait on child");
+//!
+//! assert_eq!(b"test", output.stdout.as_slice());
+//! ```
+//!
+//! [`spawn`]: Command::spawn
+//! [`output`]: Command::output
+//!
+//! [`stdout`]: Command::stdout
+//! [`stdin`]: Command::stdin
+//! [`stderr`]: Command::stderr
+//!
+//! [`Write`]: io::Write
+//! [`Read`]: io::Read
+
+#![stable(feature = "process", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))]
+mod tests;
+
+use crate::io::prelude::*;
+
+use crate::convert::Infallible;
+use crate::ffi::OsStr;
+use crate::fmt;
+use crate::fs;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::num::NonZeroI32;
+use crate::path::Path;
+use crate::str;
+use crate::sys::pipe::{read2, AnonPipe};
+use crate::sys::process as imp;
+#[stable(feature = "command_access", since = "1.57.0")]
+pub use crate::sys_common::process::CommandEnvs;
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+/// Representation of a running or exited child process.
+///
+/// This structure is used to represent and manage child processes. A child
+/// process is created via the [`Command`] struct, which configures the
+/// spawning process and can itself be constructed using a builder-style
+/// interface.
+///
+/// There is no implementation of [`Drop`] for child processes,
+/// so if you do not ensure the `Child` has exited then it will continue to
+/// run, even after the `Child` handle to the child process has gone out of
+/// scope.
+///
+/// Calling [`wait`] (or other functions that wrap around it) will make
+/// the parent process wait until the child has actually exited before
+/// continuing.
+///
+/// # Warning
+///
+/// On some systems, calling [`wait`] or similar is necessary for the OS to
+/// release resources. A process that terminated but has not been waited on is
+/// still around as a "zombie". Leaving too many zombies around may exhaust
+/// global resources (for example process IDs).
+///
+/// The standard library does *not* automatically wait on child processes (not
+/// even if the `Child` is dropped), it is up to the application developer to do
+/// so. As a consequence, dropping `Child` handles without waiting on them first
+/// is not recommended in long-running applications.
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::process::Command;
+///
+/// let mut child = Command::new("/bin/cat")
+/// .arg("file.txt")
+/// .spawn()
+/// .expect("failed to execute child");
+///
+/// let ecode = child.wait()
+/// .expect("failed to wait on child");
+///
+/// assert!(ecode.success());
+/// ```
+///
+/// [`wait`]: Child::wait
+#[stable(feature = "process", since = "1.0.0")]
+pub struct Child {
+ pub(crate) handle: imp::Process,
+
+ /// The handle for writing to the child's standard input (stdin), if it has
+ /// been captured. To avoid partially moving
+ /// the `child` and thus blocking yourself from calling
+ /// functions on `child` while using `stdin`,
+ /// you might find it helpful:
+ ///
+ /// ```compile_fail,E0425
+ /// let stdin = child.stdin.take().unwrap();
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub stdin: Option<ChildStdin>,
+
+ /// The handle for reading from the child's standard output (stdout), if it
+ /// has been captured. You might find it helpful to do
+ ///
+ /// ```compile_fail,E0425
+ /// let stdout = child.stdout.take().unwrap();
+ /// ```
+ ///
+ /// to avoid partially moving the `child` and thus blocking yourself from calling
+ /// functions on `child` while using `stdout`.
+ #[stable(feature = "process", since = "1.0.0")]
+ pub stdout: Option<ChildStdout>,
+
+ /// The handle for reading from the child's standard error (stderr), if it
+ /// has been captured. You might find it helpful to do
+ ///
+ /// ```compile_fail,E0425
+ /// let stderr = child.stderr.take().unwrap();
+ /// ```
+ ///
+ /// to avoid partially moving the `child` and thus blocking yourself from calling
+ /// functions on `child` while using `stderr`.
+ #[stable(feature = "process", since = "1.0.0")]
+ pub stderr: Option<ChildStderr>,
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for Child {}
+
+impl AsInner<imp::Process> for Child {
+ fn as_inner(&self) -> &imp::Process {
+ &self.handle
+ }
+}
+
+impl FromInner<(imp::Process, imp::StdioPipes)> for Child {
+ fn from_inner((handle, io): (imp::Process, imp::StdioPipes)) -> Child {
+ Child {
+ handle,
+ stdin: io.stdin.map(ChildStdin::from_inner),
+ stdout: io.stdout.map(ChildStdout::from_inner),
+ stderr: io.stderr.map(ChildStderr::from_inner),
+ }
+ }
+}
+
+impl IntoInner<imp::Process> for Child {
+ fn into_inner(self) -> imp::Process {
+ self.handle
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Child {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Child")
+ .field("stdin", &self.stdin)
+ .field("stdout", &self.stdout)
+ .field("stderr", &self.stderr)
+ .finish_non_exhaustive()
+ }
+}
+
+/// A handle to a child process's standard input (stdin).
+///
+/// This struct is used in the [`stdin`] field on [`Child`].
+///
+/// When an instance of `ChildStdin` is [dropped], the `ChildStdin`'s underlying
+/// file handle will be closed. If the child process was blocked on input prior
+/// to being dropped, it will become unblocked after dropping.
+///
+/// [`stdin`]: Child::stdin
+/// [dropped]: Drop
+#[stable(feature = "process", since = "1.0.0")]
+pub struct ChildStdin {
+ inner: AnonPipe,
+}
+
+// In addition to the `impl`s here, `ChildStdin` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and
+// `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows.
+
+#[stable(feature = "process", since = "1.0.0")]
+impl Write for ChildStdin {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&*self).write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (&*self).write_vectored(bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ io::Write::is_write_vectored(&&*self)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (&*self).flush()
+ }
+}
+
+#[stable(feature = "write_mt", since = "1.48.0")]
+impl Write for &ChildStdin {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.write(buf)
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.write_vectored(bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl AsInner<AnonPipe> for ChildStdin {
+ fn as_inner(&self) -> &AnonPipe {
+ &self.inner
+ }
+}
+
+impl IntoInner<AnonPipe> for ChildStdin {
+ fn into_inner(self) -> AnonPipe {
+ self.inner
+ }
+}
+
+impl FromInner<AnonPipe> for ChildStdin {
+ fn from_inner(pipe: AnonPipe) -> ChildStdin {
+ ChildStdin { inner: pipe }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for ChildStdin {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ChildStdin").finish_non_exhaustive()
+ }
+}
+
+/// A handle to a child process's standard output (stdout).
+///
+/// This struct is used in the [`stdout`] field on [`Child`].
+///
+/// When an instance of `ChildStdout` is [dropped], the `ChildStdout`'s
+/// underlying file handle will be closed.
+///
+/// [`stdout`]: Child::stdout
+/// [dropped]: Drop
+#[stable(feature = "process", since = "1.0.0")]
+pub struct ChildStdout {
+ inner: AnonPipe,
+}
+
+// In addition to the `impl`s here, `ChildStdout` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and
+// `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows.
+
+#[stable(feature = "process", since = "1.0.0")]
+impl Read for ChildStdout {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+}
+
+impl AsInner<AnonPipe> for ChildStdout {
+ fn as_inner(&self) -> &AnonPipe {
+ &self.inner
+ }
+}
+
+impl IntoInner<AnonPipe> for ChildStdout {
+ fn into_inner(self) -> AnonPipe {
+ self.inner
+ }
+}
+
+impl FromInner<AnonPipe> for ChildStdout {
+ fn from_inner(pipe: AnonPipe) -> ChildStdout {
+ ChildStdout { inner: pipe }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for ChildStdout {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ChildStdout").finish_non_exhaustive()
+ }
+}
+
+/// A handle to a child process's stderr.
+///
+/// This struct is used in the [`stderr`] field on [`Child`].
+///
+/// When an instance of `ChildStderr` is [dropped], the `ChildStderr`'s
+/// underlying file handle will be closed.
+///
+/// [`stderr`]: Child::stderr
+/// [dropped]: Drop
+#[stable(feature = "process", since = "1.0.0")]
+pub struct ChildStderr {
+ inner: AnonPipe,
+}
+
+// In addition to the `impl`s here, `ChildStderr` also has `impl`s for
+// `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and
+// `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and
+// `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and
+// `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows.
+
+#[stable(feature = "process", since = "1.0.0")]
+impl Read for ChildStderr {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+}
+
+impl AsInner<AnonPipe> for ChildStderr {
+ fn as_inner(&self) -> &AnonPipe {
+ &self.inner
+ }
+}
+
+impl IntoInner<AnonPipe> for ChildStderr {
+ fn into_inner(self) -> AnonPipe {
+ self.inner
+ }
+}
+
+impl FromInner<AnonPipe> for ChildStderr {
+ fn from_inner(pipe: AnonPipe) -> ChildStderr {
+ ChildStderr { inner: pipe }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for ChildStderr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ChildStderr").finish_non_exhaustive()
+ }
+}
+
+/// A process builder, providing fine-grained control
+/// over how a new process should be spawned.
+///
+/// A default configuration can be
+/// generated using `Command::new(program)`, where `program` gives a path to the
+/// program to be executed. Additional builder methods allow the configuration
+/// to be changed (for example, by adding arguments) prior to spawning:
+///
+/// ```
+/// use std::process::Command;
+///
+/// let output = if cfg!(target_os = "windows") {
+/// Command::new("cmd")
+/// .args(["/C", "echo hello"])
+/// .output()
+/// .expect("failed to execute process")
+/// } else {
+/// Command::new("sh")
+/// .arg("-c")
+/// .arg("echo hello")
+/// .output()
+/// .expect("failed to execute process")
+/// };
+///
+/// let hello = output.stdout;
+/// ```
+///
+/// `Command` can be reused to spawn multiple processes. The builder methods
+/// change the command without needing to immediately spawn the process.
+///
+/// ```no_run
+/// use std::process::Command;
+///
+/// let mut echo_hello = Command::new("sh");
+/// echo_hello.arg("-c")
+/// .arg("echo hello");
+/// let hello_1 = echo_hello.output().expect("failed to execute process");
+/// let hello_2 = echo_hello.output().expect("failed to execute process");
+/// ```
+///
+/// Similarly, you can call builder methods after spawning a process and then
+/// spawn a new process with the modified settings.
+///
+/// ```no_run
+/// use std::process::Command;
+///
+/// let mut list_dir = Command::new("ls");
+///
+/// // Execute `ls` in the current directory of the program.
+/// list_dir.status().expect("process failed to execute");
+///
+/// println!();
+///
+/// // Change `ls` to execute in the root directory.
+/// list_dir.current_dir("/");
+///
+/// // And then execute `ls` again but in the root directory.
+/// list_dir.status().expect("process failed to execute");
+/// ```
+#[stable(feature = "process", since = "1.0.0")]
+pub struct Command {
+ inner: imp::Command,
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for Command {}
+
+impl Command {
+ /// Constructs a new `Command` for launching the program at
+ /// path `program`, with the following default configuration:
+ ///
+ /// * No arguments to the program
+ /// * Inherit the current process's environment
+ /// * Inherit the current process's working directory
+ /// * Inherit stdin/stdout/stderr for [`spawn`] or [`status`], but create pipes for [`output`]
+ ///
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
+ ///
+ /// Builder methods are provided to change these defaults and
+ /// otherwise configure the process.
+ ///
+ /// If `program` is not an absolute path, the `PATH` will be searched in
+ /// an OS-defined way.
+ ///
+ /// The search path to be used may be controlled by setting the
+ /// `PATH` environment variable on the Command,
+ /// but this has some implementation limitations on Windows
+ /// (see issue #37519).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("sh")
+ /// .spawn()
+ /// .expect("sh command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn new<S: AsRef<OsStr>>(program: S) -> Command {
+ Command { inner: imp::Command::new(program.as_ref()) }
+ }
+
+ /// Adds an argument to pass to the program.
+ ///
+ /// Only one argument can be passed per use. So instead of:
+ ///
+ /// ```no_run
+ /// # std::process::Command::new("sh")
+ /// .arg("-C /path/to/repo")
+ /// # ;
+ /// ```
+ ///
+ /// usage would be:
+ ///
+ /// ```no_run
+ /// # std::process::Command::new("sh")
+ /// .arg("-C")
+ /// .arg("/path/to/repo")
+ /// # ;
+ /// ```
+ ///
+ /// To pass multiple arguments see [`args`].
+ ///
+ /// [`args`]: Command::args
+ ///
+ /// Note that the argument is not passed through a shell, but given
+ /// literally to the program. This means that shell syntax like quotes,
+ /// escaped characters, word splitting, glob patterns, substitution, etc.
+ /// have no effect.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .arg("-l")
+ /// .arg("-a")
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
+ self.inner.arg(arg.as_ref());
+ self
+ }
+
+ /// Adds multiple arguments to pass to the program.
+ ///
+ /// To pass a single argument see [`arg`].
+ ///
+ /// [`arg`]: Command::arg
+ ///
+ /// Note that the arguments are not passed through a shell, but given
+ /// literally to the program. This means that shell syntax like quotes,
+ /// escaped characters, word splitting, glob patterns, substitution, etc.
+ /// have no effect.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .args(["-l", "-a"])
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn args<I, S>(&mut self, args: I) -> &mut Command
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ for arg in args {
+ self.arg(arg.as_ref());
+ }
+ self
+ }
+
+ /// Inserts or updates an environment variable mapping.
+ ///
+ /// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
+ /// and case-sensitive on all other platforms.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .env("PATH", "/bin")
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ self.inner.env_mut().set(key.as_ref(), val.as_ref());
+ self
+ }
+
+ /// Adds or updates multiple environment variable mappings.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ /// use std::env;
+ /// use std::collections::HashMap;
+ ///
+ /// let filtered_env : HashMap<String, String> =
+ /// env::vars().filter(|&(ref k, _)|
+ /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH"
+ /// ).collect();
+ ///
+ /// Command::new("printenv")
+ /// .stdin(Stdio::null())
+ /// .stdout(Stdio::inherit())
+ /// .env_clear()
+ /// .envs(&filtered_env)
+ /// .spawn()
+ /// .expect("printenv failed to start");
+ /// ```
+ #[stable(feature = "command_envs", since = "1.19.0")]
+ pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command
+ where
+ I: IntoIterator<Item = (K, V)>,
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ for (ref key, ref val) in vars {
+ self.inner.env_mut().set(key.as_ref(), val.as_ref());
+ }
+ self
+ }
+
+ /// Removes an environment variable mapping.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .env_remove("PATH")
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command {
+ self.inner.env_mut().remove(key.as_ref());
+ self
+ }
+
+ /// Clears the entire environment map for the child process.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .env_clear()
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn env_clear(&mut self) -> &mut Command {
+ self.inner.env_mut().clear();
+ self
+ }
+
+ /// Sets the working directory for the child process.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous
+ /// whether it should be interpreted relative to the parent's working
+ /// directory or relative to `current_dir`. The behavior in this case is
+ /// platform specific and unstable, and it's recommended to use
+ /// [`canonicalize`] to get an absolute program path instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .current_dir("/bin")
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ ///
+ /// [`canonicalize`]: crate::fs::canonicalize
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command {
+ self.inner.cwd(dir.as_ref().as_ref());
+ self
+ }
+
+ /// Configuration for the child process's standard input (stdin) handle.
+ ///
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
+ ///
+ /// [`inherit`]: Stdio::inherit
+ /// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// Command::new("ls")
+ /// .stdin(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stdin(cfg.into().0);
+ self
+ }
+
+ /// Configuration for the child process's standard output (stdout) handle.
+ ///
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
+ ///
+ /// [`inherit`]: Stdio::inherit
+ /// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// Command::new("ls")
+ /// .stdout(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stdout(cfg.into().0);
+ self
+ }
+
+ /// Configuration for the child process's standard error (stderr) handle.
+ ///
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
+ ///
+ /// [`inherit`]: Stdio::inherit
+ /// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// Command::new("ls")
+ /// .stderr(Stdio::null())
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stderr(cfg.into().0);
+ self
+ }
+
+ /// Executes the command as a child process, returning a handle to it.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// Command::new("ls")
+ /// .spawn()
+ /// .expect("ls command failed to start");
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn spawn(&mut self) -> io::Result<Child> {
+ self.inner.spawn(imp::Stdio::Inherit, true).map(Child::from_inner)
+ }
+
+ /// Executes the command as a child process, waiting for it to finish and
+ /// collecting all of its output.
+ ///
+ /// By default, stdout and stderr are captured (and used to provide the
+ /// resulting output). Stdin is not inherited from the parent and any
+ /// attempt by the child process to read from the stdin stream will result
+ /// in the stream immediately closing.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::process::Command;
+ /// use std::io::{self, Write};
+ /// let output = Command::new("/bin/cat")
+ /// .arg("file.txt")
+ /// .output()
+ /// .expect("failed to execute process");
+ ///
+ /// println!("status: {}", output.status);
+ /// io::stdout().write_all(&output.stdout).unwrap();
+ /// io::stderr().write_all(&output.stderr).unwrap();
+ ///
+ /// assert!(output.status.success());
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn output(&mut self) -> io::Result<Output> {
+ self.inner
+ .spawn(imp::Stdio::MakePipe, false)
+ .map(Child::from_inner)
+ .and_then(|p| p.wait_with_output())
+ }
+
+ /// Executes a command as a child process, waiting for it to finish and
+ /// collecting its status.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::process::Command;
+ ///
+ /// let status = Command::new("/bin/cat")
+ /// .arg("file.txt")
+ /// .status()
+ /// .expect("failed to execute process");
+ ///
+ /// println!("process finished with: {status}");
+ ///
+ /// assert!(status.success());
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn status(&mut self) -> io::Result<ExitStatus> {
+ self.inner
+ .spawn(imp::Stdio::Inherit, true)
+ .map(Child::from_inner)
+ .and_then(|mut p| p.wait())
+ }
+
+ /// Returns the path to the program that was given to [`Command::new`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::process::Command;
+ ///
+ /// let cmd = Command::new("echo");
+ /// assert_eq!(cmd.get_program(), "echo");
+ /// ```
+ #[must_use]
+ #[stable(feature = "command_access", since = "1.57.0")]
+ pub fn get_program(&self) -> &OsStr {
+ self.inner.get_program()
+ }
+
+ /// Returns an iterator of the arguments that will be passed to the program.
+ ///
+ /// This does not include the path to the program as the first argument;
+ /// it only includes the arguments specified with [`Command::arg`] and
+ /// [`Command::args`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ /// use std::process::Command;
+ ///
+ /// let mut cmd = Command::new("echo");
+ /// cmd.arg("first").arg("second");
+ /// let args: Vec<&OsStr> = cmd.get_args().collect();
+ /// assert_eq!(args, &["first", "second"]);
+ /// ```
+ #[stable(feature = "command_access", since = "1.57.0")]
+ pub fn get_args(&self) -> CommandArgs<'_> {
+ CommandArgs { inner: self.inner.get_args() }
+ }
+
+ /// Returns an iterator of the environment variables that will be set when
+ /// the process is spawned.
+ ///
+ /// Each element is a tuple `(&OsStr, Option<&OsStr>)`, where the first
+ /// value is the key, and the second is the value, which is [`None`] if
+ /// the environment variable is to be explicitly removed.
+ ///
+ /// This only includes environment variables explicitly set with
+ /// [`Command::env`], [`Command::envs`], and [`Command::env_remove`]. It
+ /// does not include environment variables that will be inherited by the
+ /// child process.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsStr;
+ /// use std::process::Command;
+ ///
+ /// let mut cmd = Command::new("ls");
+ /// cmd.env("TERM", "dumb").env_remove("TZ");
+ /// let envs: Vec<(&OsStr, Option<&OsStr>)> = cmd.get_envs().collect();
+ /// assert_eq!(envs, &[
+ /// (OsStr::new("TERM"), Some(OsStr::new("dumb"))),
+ /// (OsStr::new("TZ"), None)
+ /// ]);
+ /// ```
+ #[stable(feature = "command_access", since = "1.57.0")]
+ pub fn get_envs(&self) -> CommandEnvs<'_> {
+ self.inner.get_envs()
+ }
+
+ /// Returns the working directory for the child process.
+ ///
+ /// This returns [`None`] if the working directory will not be changed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::path::Path;
+ /// use std::process::Command;
+ ///
+ /// let mut cmd = Command::new("ls");
+ /// assert_eq!(cmd.get_current_dir(), None);
+ /// cmd.current_dir("/bin");
+ /// assert_eq!(cmd.get_current_dir(), Some(Path::new("/bin")));
+ /// ```
+ #[must_use]
+ #[stable(feature = "command_access", since = "1.57.0")]
+ pub fn get_current_dir(&self) -> Option<&Path> {
+ self.inner.get_current_dir()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Command {
+ /// Format the program and arguments of a Command for display. Any
+ /// non-utf8 data is lossily converted using the utf8 replacement
+ /// character.
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.fmt(f)
+ }
+}
+
+impl AsInner<imp::Command> for Command {
+ fn as_inner(&self) -> &imp::Command {
+ &self.inner
+ }
+}
+
+impl AsInnerMut<imp::Command> for Command {
+ fn as_inner_mut(&mut self) -> &mut imp::Command {
+ &mut self.inner
+ }
+}
+
+/// An iterator over the command arguments.
+///
+/// This struct is created by [`Command::get_args`]. See its documentation for
+/// more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "command_access", since = "1.57.0")]
+#[derive(Debug)]
+pub struct CommandArgs<'a> {
+ inner: imp::CommandArgs<'a>,
+}
+
+#[stable(feature = "command_access", since = "1.57.0")]
+impl<'a> Iterator for CommandArgs<'a> {
+ type Item = &'a OsStr;
+ fn next(&mut self) -> Option<&'a OsStr> {
+ self.inner.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "command_access", since = "1.57.0")]
+impl<'a> ExactSizeIterator for CommandArgs<'a> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
+
+/// The output of a finished process.
+///
+/// This is returned in a Result by either the [`output`] method of a
+/// [`Command`], or the [`wait_with_output`] method of a [`Child`]
+/// process.
+///
+/// [`output`]: Command::output
+/// [`wait_with_output`]: Child::wait_with_output
+#[derive(PartialEq, Eq, Clone)]
+#[stable(feature = "process", since = "1.0.0")]
+pub struct Output {
+ /// The status (exit code) of the process.
+ #[stable(feature = "process", since = "1.0.0")]
+ pub status: ExitStatus,
+ /// The data that the process wrote to stdout.
+ #[stable(feature = "process", since = "1.0.0")]
+ pub stdout: Vec<u8>,
+ /// The data that the process wrote to stderr.
+ #[stable(feature = "process", since = "1.0.0")]
+ pub stderr: Vec<u8>,
+}
+
+// If either stderr or stdout are valid utf8 strings it prints the valid
+// strings, otherwise it prints the byte sequence instead
+#[stable(feature = "process_output_debug", since = "1.7.0")]
+impl fmt::Debug for Output {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let stdout_utf8 = str::from_utf8(&self.stdout);
+ let stdout_debug: &dyn fmt::Debug = match stdout_utf8 {
+ Ok(ref str) => str,
+ Err(_) => &self.stdout,
+ };
+
+ let stderr_utf8 = str::from_utf8(&self.stderr);
+ let stderr_debug: &dyn fmt::Debug = match stderr_utf8 {
+ Ok(ref str) => str,
+ Err(_) => &self.stderr,
+ };
+
+ fmt.debug_struct("Output")
+ .field("status", &self.status)
+ .field("stdout", stdout_debug)
+ .field("stderr", stderr_debug)
+ .finish()
+ }
+}
+
+/// Describes what to do with a standard I/O stream for a child process when
+/// passed to the [`stdin`], [`stdout`], and [`stderr`] methods of [`Command`].
+///
+/// [`stdin`]: Command::stdin
+/// [`stdout`]: Command::stdout
+/// [`stderr`]: Command::stderr
+#[stable(feature = "process", since = "1.0.0")]
+pub struct Stdio(imp::Stdio);
+
+impl Stdio {
+ /// A new pipe should be arranged to connect the parent and child processes.
+ ///
+ /// # Examples
+ ///
+ /// With stdout:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let output = Command::new("echo")
+ /// .arg("Hello, world!")
+ /// .stdout(Stdio::piped())
+ /// .output()
+ /// .expect("Failed to execute command");
+ ///
+ /// assert_eq!(String::from_utf8_lossy(&output.stdout), "Hello, world!\n");
+ /// // Nothing echoed to console
+ /// ```
+ ///
+ /// With stdin:
+ ///
+ /// ```no_run
+ /// use std::io::Write;
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let mut child = Command::new("rev")
+ /// .stdin(Stdio::piped())
+ /// .stdout(Stdio::piped())
+ /// .spawn()
+ /// .expect("Failed to spawn child process");
+ ///
+ /// let mut stdin = child.stdin.take().expect("Failed to open stdin");
+ /// std::thread::spawn(move || {
+ /// stdin.write_all("Hello, world!".as_bytes()).expect("Failed to write to stdin");
+ /// });
+ ///
+ /// let output = child.wait_with_output().expect("Failed to read stdout");
+ /// assert_eq!(String::from_utf8_lossy(&output.stdout), "!dlrow ,olleH");
+ /// ```
+ ///
+ /// Writing more than a pipe buffer's worth of input to stdin without also reading
+ /// stdout and stderr at the same time may cause a deadlock.
+ /// This is an issue when running any program that doesn't guarantee that it reads
+ /// its entire stdin before writing more than a pipe buffer's worth of output.
+ /// The size of a pipe buffer varies on different targets.
+ ///
+ #[must_use]
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn piped() -> Stdio {
+ Stdio(imp::Stdio::MakePipe)
+ }
+
+ /// The child inherits from the corresponding parent descriptor.
+ ///
+ /// # Examples
+ ///
+ /// With stdout:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let output = Command::new("echo")
+ /// .arg("Hello, world!")
+ /// .stdout(Stdio::inherit())
+ /// .output()
+ /// .expect("Failed to execute command");
+ ///
+ /// assert_eq!(String::from_utf8_lossy(&output.stdout), "");
+ /// // "Hello, world!" echoed to console
+ /// ```
+ ///
+ /// With stdin:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ /// use std::io::{self, Write};
+ ///
+ /// let output = Command::new("rev")
+ /// .stdin(Stdio::inherit())
+ /// .stdout(Stdio::piped())
+ /// .output()
+ /// .expect("Failed to execute command");
+ ///
+ /// print!("You piped in the reverse of: ");
+ /// io::stdout().write_all(&output.stdout).unwrap();
+ /// ```
+ #[must_use]
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn inherit() -> Stdio {
+ Stdio(imp::Stdio::Inherit)
+ }
+
+ /// This stream will be ignored. This is the equivalent of attaching the
+ /// stream to `/dev/null`.
+ ///
+ /// # Examples
+ ///
+ /// With stdout:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let output = Command::new("echo")
+ /// .arg("Hello, world!")
+ /// .stdout(Stdio::null())
+ /// .output()
+ /// .expect("Failed to execute command");
+ ///
+ /// assert_eq!(String::from_utf8_lossy(&output.stdout), "");
+ /// // Nothing echoed to console
+ /// ```
+ ///
+ /// With stdin:
+ ///
+ /// ```no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let output = Command::new("rev")
+ /// .stdin(Stdio::null())
+ /// .stdout(Stdio::piped())
+ /// .output()
+ /// .expect("Failed to execute command");
+ ///
+ /// assert_eq!(String::from_utf8_lossy(&output.stdout), "");
+ /// // Ignores any piped-in input
+ /// ```
+ #[must_use]
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn null() -> Stdio {
+ Stdio(imp::Stdio::Null)
+ }
+
+ /// Returns `true` if this requires [`Command`] to create a new pipe.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(stdio_makes_pipe)]
+ /// use std::process::Stdio;
+ ///
+ /// let io = Stdio::piped();
+ /// assert_eq!(io.makes_pipe(), true);
+ /// ```
+ #[unstable(feature = "stdio_makes_pipe", issue = "98288")]
+ pub fn makes_pipe(&self) -> bool {
+ matches!(self.0, imp::Stdio::MakePipe)
+ }
+}
+
+impl FromInner<imp::Stdio> for Stdio {
+ fn from_inner(inner: imp::Stdio) -> Stdio {
+ Stdio(inner)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Stdio {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Stdio").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStdin> for Stdio {
+ /// Converts a [`ChildStdin`] into a [`Stdio`].
+ ///
+ /// # Examples
+ ///
+ /// `ChildStdin` will be converted to `Stdio` using `Stdio::from` under the hood.
+ ///
+ /// ```rust,no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let reverse = Command::new("rev")
+ /// .stdin(Stdio::piped())
+ /// .spawn()
+ /// .expect("failed reverse command");
+ ///
+ /// let _echo = Command::new("echo")
+ /// .arg("Hello, world!")
+ /// .stdout(reverse.stdin.unwrap()) // Converted into a Stdio here
+ /// .output()
+ /// .expect("failed echo command");
+ ///
+ /// // "!dlrow ,olleH" echoed to console
+ /// ```
+ fn from(child: ChildStdin) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStdout> for Stdio {
+ /// Converts a [`ChildStdout`] into a [`Stdio`].
+ ///
+ /// # Examples
+ ///
+ /// `ChildStdout` will be converted to `Stdio` using `Stdio::from` under the hood.
+ ///
+ /// ```rust,no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let hello = Command::new("echo")
+ /// .arg("Hello, world!")
+ /// .stdout(Stdio::piped())
+ /// .spawn()
+ /// .expect("failed echo command");
+ ///
+ /// let reverse = Command::new("rev")
+ /// .stdin(hello.stdout.unwrap()) // Converted into a Stdio here
+ /// .output()
+ /// .expect("failed reverse command");
+ ///
+ /// assert_eq!(reverse.stdout, b"!dlrow ,olleH\n");
+ /// ```
+ fn from(child: ChildStdout) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStderr> for Stdio {
+ /// Converts a [`ChildStderr`] into a [`Stdio`].
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let reverse = Command::new("rev")
+ /// .arg("non_existing_file.txt")
+ /// .stderr(Stdio::piped())
+ /// .spawn()
+ /// .expect("failed reverse command");
+ ///
+ /// let cat = Command::new("cat")
+ /// .arg("-")
+ /// .stdin(reverse.stderr.unwrap()) // Converted into a Stdio here
+ /// .output()
+ /// .expect("failed echo command");
+ ///
+ /// assert_eq!(
+ /// String::from_utf8_lossy(&cat.stdout),
+ /// "rev: cannot open non_existing_file.txt: No such file or directory\n"
+ /// );
+ /// ```
+ fn from(child: ChildStderr) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<fs::File> for Stdio {
+ /// Converts a [`File`](fs::File) into a [`Stdio`].
+ ///
+ /// # Examples
+ ///
+ /// `File` will be converted to `Stdio` using `Stdio::from` under the hood.
+ ///
+ /// ```rust,no_run
+ /// use std::fs::File;
+ /// use std::process::Command;
+ ///
+ /// // With the `foo.txt` file containing `Hello, world!"
+ /// let file = File::open("foo.txt").unwrap();
+ ///
+ /// let reverse = Command::new("rev")
+ /// .stdin(file) // Implicit File conversion into a Stdio
+ /// .output()
+ /// .expect("failed reverse command");
+ ///
+ /// assert_eq!(reverse.stdout, b"!dlrow ,olleH");
+ /// ```
+ fn from(file: fs::File) -> Stdio {
+ Stdio::from_inner(file.into_inner().into())
+ }
+}
+
+/// Describes the result of a process after it has terminated.
+///
+/// This `struct` is used to represent the exit status or other termination of a child process.
+/// Child processes are created via the [`Command`] struct and their exit
+/// status is exposed through the [`status`] method, or the [`wait`] method
+/// of a [`Child`] process.
+///
+/// An `ExitStatus` represents every possible disposition of a process. On Unix this
+/// is the **wait status**. It is *not* simply an *exit status* (a value passed to `exit`).
+///
+/// For proper error reporting of failed processes, print the value of `ExitStatus` or
+/// `ExitStatusError` using their implementations of [`Display`](crate::fmt::Display).
+///
+/// # Differences from `ExitCode`
+///
+/// [`ExitCode`] is intended for terminating the currently running process, via
+/// the `Termination` trait, in contrast to `ExitStatus`, which represents the
+/// termination of a child process. These APIs are separate due to platform
+/// compatibility differences and their expected usage; it is not generally
+/// possible to exactly reproduce an `ExitStatus` from a child for the current
+/// process after the fact.
+///
+/// [`status`]: Command::status
+/// [`wait`]: Child::wait
+//
+// We speak slightly loosely (here and in various other places in the stdlib docs) about `exit`
+// vs `_exit`. Naming of Unix system calls is not standardised across Unices, so terminology is a
+// matter of convention and tradition. For clarity we usually speak of `exit`, even when we might
+// mean an underlying system call such as `_exit`.
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[stable(feature = "process", since = "1.0.0")]
+pub struct ExitStatus(imp::ExitStatus);
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for ExitStatus {}
+
+impl ExitStatus {
+ /// Was termination successful? Returns a `Result`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(exit_status_error)]
+ /// # if cfg!(unix) {
+ /// use std::process::Command;
+ ///
+ /// let status = Command::new("ls")
+ /// .arg("/dev/nonexistent")
+ /// .status()
+ /// .expect("ls could not be executed");
+ ///
+ /// println!("ls: {status}");
+ /// status.exit_ok().expect_err("/dev/nonexistent could be listed!");
+ /// # } // cfg!(unix)
+ /// ```
+ #[unstable(feature = "exit_status_error", issue = "84908")]
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ self.0.exit_ok().map_err(ExitStatusError)
+ }
+
+ /// Was termination successful? Signal termination is not considered a
+ /// success, and success is defined as a zero exit status.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::process::Command;
+ ///
+ /// let status = Command::new("mkdir")
+ /// .arg("projects")
+ /// .status()
+ /// .expect("failed to execute mkdir");
+ ///
+ /// if status.success() {
+ /// println!("'projects/' directory created");
+ /// } else {
+ /// println!("failed to create 'projects/' directory: {status}");
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn success(&self) -> bool {
+ self.0.exit_ok().is_ok()
+ }
+
+ /// Returns the exit code of the process, if any.
+ ///
+ /// In Unix terms the return value is the **exit status**: the value passed to `exit`, if the
+ /// process finished by calling `exit`. Note that on Unix the exit status is truncated to 8
+ /// bits, and that values that didn't come from a program's call to `exit` may be invented by the
+ /// runtime system (often, for example, 255, 254, 127 or 126).
+ ///
+ /// On Unix, this will return `None` if the process was terminated by a signal.
+ /// [`ExitStatusExt`](crate::os::unix::process::ExitStatusExt) is an
+ /// extension trait for extracting any such signal, and other details, from the `ExitStatus`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// let status = Command::new("mkdir")
+ /// .arg("projects")
+ /// .status()
+ /// .expect("failed to execute mkdir");
+ ///
+ /// match status.code() {
+ /// Some(code) => println!("Exited with status code: {code}"),
+ /// None => println!("Process terminated by signal")
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn code(&self) -> Option<i32> {
+ self.0.code()
+ }
+}
+
+impl AsInner<imp::ExitStatus> for ExitStatus {
+ fn as_inner(&self) -> &imp::ExitStatus {
+ &self.0
+ }
+}
+
+impl FromInner<imp::ExitStatus> for ExitStatus {
+ fn from_inner(s: imp::ExitStatus) -> ExitStatus {
+ ExitStatus(s)
+ }
+}
+
+#[stable(feature = "process", since = "1.0.0")]
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for ExitStatusError {}
+
+/// Describes the result of a process after it has failed
+///
+/// Produced by the [`.exit_ok`](ExitStatus::exit_ok) method on [`ExitStatus`].
+///
+/// # Examples
+///
+/// ```
+/// #![feature(exit_status_error)]
+/// # if cfg!(unix) {
+/// use std::process::{Command, ExitStatusError};
+///
+/// fn run(cmd: &str) -> Result<(),ExitStatusError> {
+/// Command::new(cmd).status().unwrap().exit_ok()?;
+/// Ok(())
+/// }
+///
+/// run("true").unwrap();
+/// run("false").unwrap_err();
+/// # } // cfg!(unix)
+/// ```
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[unstable(feature = "exit_status_error", issue = "84908")]
+// The definition of imp::ExitStatusError should ideally be such that
+// Result<(), imp::ExitStatusError> has an identical representation to imp::ExitStatus.
+pub struct ExitStatusError(imp::ExitStatusError);
+
+#[unstable(feature = "exit_status_error", issue = "84908")]
+impl ExitStatusError {
+ /// Reports the exit code, if applicable, from an `ExitStatusError`.
+ ///
+ /// In Unix terms the return value is the **exit status**: the value passed to `exit`, if the
+ /// process finished by calling `exit`. Note that on Unix the exit status is truncated to 8
+ /// bits, and that values that didn't come from a program's call to `exit` may be invented by the
+ /// runtime system (often, for example, 255, 254, 127 or 126).
+ ///
+ /// On Unix, this will return `None` if the process was terminated by a signal. If you want to
+ /// handle such situations specially, consider using methods from
+ /// [`ExitStatusExt`](crate::os::unix::process::ExitStatusExt).
+ ///
+ /// If the process finished by calling `exit` with a nonzero value, this will return
+ /// that exit status.
+ ///
+ /// If the error was something else, it will return `None`.
+ ///
+ /// If the process exited successfully (ie, by calling `exit(0)`), there is no
+ /// `ExitStatusError`. So the return value from `ExitStatusError::code()` is always nonzero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(exit_status_error)]
+ /// # #[cfg(unix)] {
+ /// use std::process::Command;
+ ///
+ /// let bad = Command::new("false").status().unwrap().exit_ok().unwrap_err();
+ /// assert_eq!(bad.code(), Some(1));
+ /// # } // #[cfg(unix)]
+ /// ```
+ #[must_use]
+ pub fn code(&self) -> Option<i32> {
+ self.code_nonzero().map(Into::into)
+ }
+
+ /// Reports the exit code, if applicable, from an `ExitStatusError`, as a `NonZero`
+ ///
+ /// This is exactly like [`code()`](Self::code), except that it returns a `NonZeroI32`.
+ ///
+ /// Plain `code`, returning a plain integer, is provided because is is often more convenient.
+ /// The returned value from `code()` is indeed also nonzero; use `code_nonzero()` when you want
+ /// a type-level guarantee of nonzeroness.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(exit_status_error)]
+ /// # if cfg!(unix) {
+ /// use std::num::NonZeroI32;
+ /// use std::process::Command;
+ ///
+ /// let bad = Command::new("false").status().unwrap().exit_ok().unwrap_err();
+ /// assert_eq!(bad.code_nonzero().unwrap(), NonZeroI32::try_from(1).unwrap());
+ /// # } // cfg!(unix)
+ /// ```
+ #[must_use]
+ pub fn code_nonzero(&self) -> Option<NonZeroI32> {
+ self.0.code()
+ }
+
+ /// Converts an `ExitStatusError` (back) to an `ExitStatus`.
+ #[must_use]
+ pub fn into_status(&self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+#[unstable(feature = "exit_status_error", issue = "84908")]
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+#[unstable(feature = "exit_status_error", issue = "84908")]
+impl fmt::Display for ExitStatusError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "process exited unsuccessfully: {}", self.into_status())
+ }
+}
+
+#[unstable(feature = "exit_status_error", issue = "84908")]
+impl crate::error::Error for ExitStatusError {}
+
+/// This type represents the status code the current process can return
+/// to its parent under normal termination.
+///
+/// `ExitCode` is intended to be consumed only by the standard library (via
+/// [`Termination::report()`]), and intentionally does not provide accessors like
+/// `PartialEq`, `Eq`, or `Hash`. Instead the standard library provides the
+/// canonical `SUCCESS` and `FAILURE` exit codes as well as `From<u8> for
+/// ExitCode` for constructing other arbitrary exit codes.
+///
+/// # Portability
+///
+/// Numeric values used in this type don't have portable meanings, and
+/// different platforms may mask different amounts of them.
+///
+/// For the platform's canonical successful and unsuccessful codes, see
+/// the [`SUCCESS`] and [`FAILURE`] associated items.
+///
+/// [`SUCCESS`]: ExitCode::SUCCESS
+/// [`FAILURE`]: ExitCode::FAILURE
+///
+/// # Differences from `ExitStatus`
+///
+/// `ExitCode` is intended for terminating the currently running process, via
+/// the `Termination` trait, in contrast to [`ExitStatus`], which represents the
+/// termination of a child process. These APIs are separate due to platform
+/// compatibility differences and their expected usage; it is not generally
+/// possible to exactly reproduce an `ExitStatus` from a child for the current
+/// process after the fact.
+///
+/// # Examples
+///
+/// `ExitCode` can be returned from the `main` function of a crate, as it implements
+/// [`Termination`]:
+///
+/// ```
+/// use std::process::ExitCode;
+/// # fn check_foo() -> bool { true }
+///
+/// fn main() -> ExitCode {
+/// if !check_foo() {
+/// return ExitCode::from(42);
+/// }
+///
+/// ExitCode::SUCCESS
+/// }
+/// ```
+#[derive(Clone, Copy, Debug)]
+#[stable(feature = "process_exitcode", since = "1.61.0")]
+pub struct ExitCode(imp::ExitCode);
+
+/// Allows extension traits within `std`.
+#[unstable(feature = "sealed", issue = "none")]
+impl crate::sealed::Sealed for ExitCode {}
+
+#[stable(feature = "process_exitcode", since = "1.61.0")]
+impl ExitCode {
+ /// The canonical `ExitCode` for successful termination on this platform.
+ ///
+ /// Note that a `()`-returning `main` implicitly results in a successful
+ /// termination, so there's no need to return this from `main` unless
+ /// you're also returning other possible codes.
+ #[stable(feature = "process_exitcode", since = "1.61.0")]
+ pub const SUCCESS: ExitCode = ExitCode(imp::ExitCode::SUCCESS);
+
+ /// The canonical `ExitCode` for unsuccessful termination on this platform.
+ ///
+ /// If you're only returning this and `SUCCESS` from `main`, consider
+ /// instead returning `Err(_)` and `Ok(())` respectively, which will
+ /// return the same codes (but will also `eprintln!` the error).
+ #[stable(feature = "process_exitcode", since = "1.61.0")]
+ pub const FAILURE: ExitCode = ExitCode(imp::ExitCode::FAILURE);
+
+ /// Exit the current process with the given `ExitCode`.
+ ///
+ /// Note that this has the same caveats as [`process::exit()`][exit], namely that this function
+ /// terminates the process immediately, so no destructors on the current stack or any other
+ /// thread's stack will be run. If a clean shutdown is needed, it is recommended to simply
+ /// return this ExitCode from the `main` function, as demonstrated in the [type
+ /// documentation](#examples).
+ ///
+ /// # Differences from `process::exit()`
+ ///
+ /// `process::exit()` accepts any `i32` value as the exit code for the process; however, there
+ /// are platforms that only use a subset of that value (see [`process::exit` platform-specific
+ /// behavior][exit#platform-specific-behavior]). `ExitCode` exists because of this; only
+ /// `ExitCode`s that are supported by a majority of our platforms can be created, so those
+ /// problems don't exist (as much) with this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(exitcode_exit_method)]
+ /// # use std::process::ExitCode;
+ /// # use std::fmt;
+ /// # enum UhOhError { GenericProblem, Specific, WithCode { exit_code: ExitCode, _x: () } }
+ /// # impl fmt::Display for UhOhError {
+ /// # fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { unimplemented!() }
+ /// # }
+ /// // there's no way to gracefully recover from an UhOhError, so we just
+ /// // print a message and exit
+ /// fn handle_unrecoverable_error(err: UhOhError) -> ! {
+ /// eprintln!("UH OH! {err}");
+ /// let code = match err {
+ /// UhOhError::GenericProblem => ExitCode::FAILURE,
+ /// UhOhError::Specific => ExitCode::from(3),
+ /// UhOhError::WithCode { exit_code, .. } => exit_code,
+ /// };
+ /// code.exit_process()
+ /// }
+ /// ```
+ #[unstable(feature = "exitcode_exit_method", issue = "97100")]
+ pub fn exit_process(self) -> ! {
+ exit(self.to_i32())
+ }
+}
+
+impl ExitCode {
+ // This is private/perma-unstable because ExitCode is opaque; we don't know that i32 will serve
+ // all usecases, for example windows seems to use u32, unix uses the 8-15th bits of an i32, we
+ // likely want to isolate users anything that could restrict the platform specific
+ // representation of an ExitCode
+ //
+ // More info: https://internals.rust-lang.org/t/mini-pre-rfc-redesigning-process-exitstatus/5426
+ /// Convert an `ExitCode` into an i32
+ #[unstable(
+ feature = "process_exitcode_internals",
+ reason = "exposed only for libstd",
+ issue = "none"
+ )]
+ #[inline]
+ #[doc(hidden)]
+ pub fn to_i32(self) -> i32 {
+ self.0.as_i32()
+ }
+}
+
+#[stable(feature = "process_exitcode", since = "1.61.0")]
+impl From<u8> for ExitCode {
+ /// Construct an `ExitCode` from an arbitrary u8 value.
+ fn from(code: u8) -> Self {
+ ExitCode(imp::ExitCode::from(code))
+ }
+}
+
+impl AsInner<imp::ExitCode> for ExitCode {
+ fn as_inner(&self) -> &imp::ExitCode {
+ &self.0
+ }
+}
+
+impl FromInner<imp::ExitCode> for ExitCode {
+ fn from_inner(s: imp::ExitCode) -> ExitCode {
+ ExitCode(s)
+ }
+}
+
+impl Child {
+ /// Forces the child process to exit. If the child has already exited, an [`InvalidInput`]
+ /// error is returned.
+ ///
+ /// The mapping to [`ErrorKind`]s is not part of the compatibility contract of the function.
+ ///
+ /// This is equivalent to sending a SIGKILL on Unix platforms.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// let mut command = Command::new("yes");
+ /// if let Ok(mut child) = command.spawn() {
+ /// child.kill().expect("command wasn't running");
+ /// } else {
+ /// println!("yes command didn't start");
+ /// }
+ /// ```
+ ///
+ /// [`ErrorKind`]: io::ErrorKind
+ /// [`InvalidInput`]: io::ErrorKind::InvalidInput
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn kill(&mut self) -> io::Result<()> {
+ self.handle.kill()
+ }
+
+ /// Returns the OS-assigned process identifier associated with this child.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// let mut command = Command::new("ls");
+ /// if let Ok(child) = command.spawn() {
+ /// println!("Child's ID is {}", child.id());
+ /// } else {
+ /// println!("ls command didn't start");
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "process_id", since = "1.3.0")]
+ pub fn id(&self) -> u32 {
+ self.handle.id()
+ }
+
+ /// Waits for the child to exit completely, returning the status that it
+ /// exited with. This function will continue to have the same return value
+ /// after it has been called at least once.
+ ///
+ /// The stdin handle to the child process, if any, will be closed
+ /// before waiting. This helps avoid deadlock: it ensures that the
+ /// child does not block waiting for input from the parent, while
+ /// the parent waits for the child to exit.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// let mut command = Command::new("ls");
+ /// if let Ok(mut child) = command.spawn() {
+ /// child.wait().expect("command wasn't running");
+ /// println!("Child has finished its execution!");
+ /// } else {
+ /// println!("ls command didn't start");
+ /// }
+ /// ```
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ drop(self.stdin.take());
+ self.handle.wait().map(ExitStatus)
+ }
+
+ /// Attempts to collect the exit status of the child if it has already
+ /// exited.
+ ///
+ /// This function will not block the calling thread and will only
+ /// check to see if the child process has exited or not. If the child has
+ /// exited then on Unix the process ID is reaped. This function is
+ /// guaranteed to repeatedly return a successful exit status so long as the
+ /// child has already exited.
+ ///
+ /// If the child has exited, then `Ok(Some(status))` is returned. If the
+ /// exit status is not available at this time then `Ok(None)` is returned.
+ /// If an error occurs, then that error is returned.
+ ///
+ /// Note that unlike `wait`, this function will not attempt to drop stdin.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::Command;
+ ///
+ /// let mut child = Command::new("ls").spawn().unwrap();
+ ///
+ /// match child.try_wait() {
+ /// Ok(Some(status)) => println!("exited with: {status}"),
+ /// Ok(None) => {
+ /// println!("status not ready yet, let's really wait");
+ /// let res = child.wait();
+ /// println!("result: {res:?}");
+ /// }
+ /// Err(e) => println!("error attempting to wait: {e}"),
+ /// }
+ /// ```
+ #[stable(feature = "process_try_wait", since = "1.18.0")]
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ Ok(self.handle.try_wait()?.map(ExitStatus))
+ }
+
+ /// Simultaneously waits for the child to exit and collect all remaining
+ /// output on the stdout/stderr handles, returning an `Output`
+ /// instance.
+ ///
+ /// The stdin handle to the child process, if any, will be closed
+ /// before waiting. This helps avoid deadlock: it ensures that the
+ /// child does not block waiting for input from the parent, while
+ /// the parent waits for the child to exit.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent.
+ /// In order to capture the output into this `Result<Output>` it is
+ /// necessary to create new pipes between parent and child. Use
+ /// `stdout(Stdio::piped())` or `stderr(Stdio::piped())`, respectively.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::process::{Command, Stdio};
+ ///
+ /// let child = Command::new("/bin/cat")
+ /// .arg("file.txt")
+ /// .stdout(Stdio::piped())
+ /// .spawn()
+ /// .expect("failed to execute child");
+ ///
+ /// let output = child
+ /// .wait_with_output()
+ /// .expect("failed to wait on child");
+ ///
+ /// assert!(output.status.success());
+ /// ```
+ ///
+ #[stable(feature = "process", since = "1.0.0")]
+ pub fn wait_with_output(mut self) -> io::Result<Output> {
+ drop(self.stdin.take());
+
+ let (mut stdout, mut stderr) = (Vec::new(), Vec::new());
+ match (self.stdout.take(), self.stderr.take()) {
+ (None, None) => {}
+ (Some(mut out), None) => {
+ let res = out.read_to_end(&mut stdout);
+ res.unwrap();
+ }
+ (None, Some(mut err)) => {
+ let res = err.read_to_end(&mut stderr);
+ res.unwrap();
+ }
+ (Some(out), Some(err)) => {
+ let res = read2(out.inner, &mut stdout, err.inner, &mut stderr);
+ res.unwrap();
+ }
+ }
+
+ let status = self.wait()?;
+ Ok(Output { status, stdout, stderr })
+ }
+}
+
+/// Terminates the current process with the specified exit code.
+///
+/// This function will never return and will immediately terminate the current
+/// process. The exit code is passed through to the underlying OS and will be
+/// available for consumption by another process.
+///
+/// Note that because this function never returns, and that it terminates the
+/// process, no destructors on the current stack or any other thread's stack
+/// will be run. If a clean shutdown is needed it is recommended to only call
+/// this function at a known point where there are no more destructors left
+/// to run; or, preferably, simply return a type implementing [`Termination`]
+/// (such as [`ExitCode`] or `Result`) from the `main` function and avoid this
+/// function altogether:
+///
+/// ```
+/// # use std::io::Error as MyError;
+/// fn main() -> Result<(), MyError> {
+/// // ...
+/// Ok(())
+/// }
+/// ```
+///
+/// ## Platform-specific behavior
+///
+/// **Unix**: On Unix-like platforms, it is unlikely that all 32 bits of `exit`
+/// will be visible to a parent process inspecting the exit code. On most
+/// Unix-like platforms, only the eight least-significant bits are considered.
+///
+/// For example, the exit code for this example will be `0` on Linux, but `256`
+/// on Windows:
+///
+/// ```no_run
+/// use std::process;
+///
+/// process::exit(0x0100);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn exit(code: i32) -> ! {
+ crate::rt::cleanup();
+ crate::sys::os::exit(code)
+}
+
+/// Terminates the process in an abnormal fashion.
+///
+/// The function will never return and will immediately terminate the current
+/// process in a platform specific "abnormal" manner.
+///
+/// Note that because this function never returns, and that it terminates the
+/// process, no destructors on the current stack or any other thread's stack
+/// will be run.
+///
+/// Rust IO buffers (eg, from `BufWriter`) will not be flushed.
+/// Likewise, C stdio buffers will (on most platforms) not be flushed.
+///
+/// This is in contrast to the default behaviour of [`panic!`] which unwinds
+/// the current thread's stack and calls all destructors.
+/// When `panic="abort"` is set, either as an argument to `rustc` or in a
+/// crate's Cargo.toml, [`panic!`] and `abort` are similar. However,
+/// [`panic!`] will still call the [panic hook] while `abort` will not.
+///
+/// If a clean shutdown is needed it is recommended to only call
+/// this function at a known point where there are no more destructors left
+/// to run.
+///
+/// The process's termination will be similar to that from the C `abort()`
+/// function. On Unix, the process will terminate with signal `SIGABRT`, which
+/// typically means that the shell prints "Aborted".
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::process;
+///
+/// fn main() {
+/// println!("aborting");
+///
+/// process::abort();
+///
+/// // execution never gets here
+/// }
+/// ```
+///
+/// The `abort` function terminates the process, so the destructor will not
+/// get run on the example below:
+///
+/// ```no_run
+/// use std::process;
+///
+/// struct HasDrop;
+///
+/// impl Drop for HasDrop {
+/// fn drop(&mut self) {
+/// println!("This will never be printed!");
+/// }
+/// }
+///
+/// fn main() {
+/// let _x = HasDrop;
+/// process::abort();
+/// // the destructor implemented for HasDrop will never get run
+/// }
+/// ```
+///
+/// [panic hook]: crate::panic::set_hook
+#[stable(feature = "process_abort", since = "1.17.0")]
+#[cold]
+pub fn abort() -> ! {
+ crate::sys::abort_internal();
+}
+
+/// Returns the OS-assigned process identifier associated with this process.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```no_run
+/// use std::process;
+///
+/// println!("My pid is {}", process::id());
+/// ```
+///
+///
+#[must_use]
+#[stable(feature = "getpid", since = "1.26.0")]
+pub fn id() -> u32 {
+ crate::sys::os::getpid()
+}
+
+/// A trait for implementing arbitrary return types in the `main` function.
+///
+/// The C-main function only supports returning integers.
+/// So, every type implementing the `Termination` trait has to be converted
+/// to an integer.
+///
+/// The default implementations are returning `libc::EXIT_SUCCESS` to indicate
+/// a successful execution. In case of a failure, `libc::EXIT_FAILURE` is returned.
+///
+/// Because different runtimes have different specifications on the return value
+/// of the `main` function, this trait is likely to be available only on
+/// standard library's runtime for convenience. Other runtimes are not required
+/// to provide similar functionality.
+#[cfg_attr(not(test), lang = "termination")]
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+#[rustc_on_unimplemented(
+ message = "`main` has invalid return type `{Self}`",
+ label = "`main` can only return types that implement `{Termination}`"
+)]
+pub trait Termination {
+ /// Is called to get the representation of the value as status code.
+ /// This status code is returned to the operating system.
+ #[stable(feature = "termination_trait_lib", since = "1.61.0")]
+ fn report(self) -> ExitCode;
+}
+
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+impl Termination for () {
+ #[inline]
+ fn report(self) -> ExitCode {
+ ExitCode::SUCCESS
+ }
+}
+
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+impl Termination for ! {
+ fn report(self) -> ExitCode {
+ self
+ }
+}
+
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+impl Termination for Infallible {
+ fn report(self) -> ExitCode {
+ match self {}
+ }
+}
+
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+impl Termination for ExitCode {
+ #[inline]
+ fn report(self) -> ExitCode {
+ self
+ }
+}
+
+#[stable(feature = "termination_trait_lib", since = "1.61.0")]
+impl<T: Termination, E: fmt::Debug> Termination for Result<T, E> {
+ fn report(self) -> ExitCode {
+ match self {
+ Ok(val) => val.report(),
+ Err(err) => {
+ // Ignore error if the write fails, for example because stderr is
+ // already closed. There is not much point panicking at this point.
+ let _ = writeln!(io::stderr(), "Error: {err:?}");
+ ExitCode::FAILURE
+ }
+ }
+ }
+}
diff --git a/library/std/src/process/tests.rs b/library/std/src/process/tests.rs
new file mode 100644
index 000000000..955ad6891
--- /dev/null
+++ b/library/std/src/process/tests.rs
@@ -0,0 +1,458 @@
+use crate::io::prelude::*;
+
+use super::{Command, Output, Stdio};
+use crate::io::ErrorKind;
+use crate::str;
+
+fn known_command() -> Command {
+ if cfg!(windows) { Command::new("help") } else { Command::new("echo") }
+}
+
+#[cfg(target_os = "android")]
+fn shell_cmd() -> Command {
+ Command::new("/system/bin/sh")
+}
+
+#[cfg(not(target_os = "android"))]
+fn shell_cmd() -> Command {
+ Command::new("/bin/sh")
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn smoke() {
+ let p = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 0"]).spawn()
+ } else {
+ shell_cmd().arg("-c").arg("true").spawn()
+ };
+ assert!(p.is_ok());
+ let mut p = p.unwrap();
+ assert!(p.wait().unwrap().success());
+}
+
+#[test]
+#[cfg_attr(target_os = "android", ignore)]
+fn smoke_failure() {
+ match Command::new("if-this-is-a-binary-then-the-world-has-ended").spawn() {
+ Ok(..) => panic!(),
+ Err(..) => {}
+ }
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn exit_reported_right() {
+ let p = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 1"]).spawn()
+ } else {
+ shell_cmd().arg("-c").arg("false").spawn()
+ };
+ assert!(p.is_ok());
+ let mut p = p.unwrap();
+ assert!(p.wait().unwrap().code() == Some(1));
+ drop(p.wait());
+}
+
+#[test]
+#[cfg(unix)]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn signal_reported_right() {
+ use crate::os::unix::process::ExitStatusExt;
+
+ let mut p = shell_cmd().arg("-c").arg("read a").stdin(Stdio::piped()).spawn().unwrap();
+ p.kill().unwrap();
+ match p.wait().unwrap().signal() {
+ Some(9) => {}
+ result => panic!("not terminated by signal 9 (instead, {result:?})"),
+ }
+}
+
+pub fn run_output(mut cmd: Command) -> String {
+ let p = cmd.spawn();
+ assert!(p.is_ok());
+ let mut p = p.unwrap();
+ assert!(p.stdout.is_some());
+ let mut ret = String::new();
+ p.stdout.as_mut().unwrap().read_to_string(&mut ret).unwrap();
+ assert!(p.wait().unwrap().success());
+ return ret;
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn stdout_works() {
+ if cfg!(target_os = "windows") {
+ let mut cmd = Command::new("cmd");
+ cmd.args(&["/C", "echo foobar"]).stdout(Stdio::piped());
+ assert_eq!(run_output(cmd), "foobar\r\n");
+ } else {
+ let mut cmd = shell_cmd();
+ cmd.arg("-c").arg("echo foobar").stdout(Stdio::piped());
+ assert_eq!(run_output(cmd), "foobar\n");
+ }
+}
+
+#[test]
+#[cfg_attr(any(windows, target_os = "vxworks"), ignore)]
+fn set_current_dir_works() {
+ let mut cmd = shell_cmd();
+ cmd.arg("-c").arg("pwd").current_dir("/").stdout(Stdio::piped());
+ assert_eq!(run_output(cmd), "/\n");
+}
+
+#[test]
+#[cfg_attr(any(windows, target_os = "vxworks"), ignore)]
+fn stdin_works() {
+ let mut p = shell_cmd()
+ .arg("-c")
+ .arg("read line; echo $line")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .unwrap();
+ p.stdin.as_mut().unwrap().write("foobar".as_bytes()).unwrap();
+ drop(p.stdin.take());
+ let mut out = String::new();
+ p.stdout.as_mut().unwrap().read_to_string(&mut out).unwrap();
+ assert!(p.wait().unwrap().success());
+ assert_eq!(out, "foobar\n");
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_process_status() {
+ let mut status = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 1"]).status().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("false").status().unwrap()
+ };
+ assert!(status.code() == Some(1));
+
+ status = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 0"]).status().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("true").status().unwrap()
+ };
+ assert!(status.success());
+}
+
+#[test]
+fn test_process_output_fail_to_start() {
+ match Command::new("/no-binary-by-this-name-should-exist").output() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::NotFound),
+ Ok(..) => panic!(),
+ }
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_process_output_output() {
+ let Output { status, stdout, stderr } = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "echo hello"]).output().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("echo hello").output().unwrap()
+ };
+ let output_str = str::from_utf8(&stdout).unwrap();
+
+ assert!(status.success());
+ assert_eq!(output_str.trim().to_string(), "hello");
+ assert_eq!(stderr, Vec::new());
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_process_output_error() {
+ let Output { status, stdout, stderr } = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "mkdir ."]).output().unwrap()
+ } else {
+ Command::new("mkdir").arg("./").output().unwrap()
+ };
+
+ assert!(status.code().is_some());
+ assert!(status.code() != Some(0));
+ assert_eq!(stdout, Vec::new());
+ assert!(!stderr.is_empty());
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_finish_once() {
+ let mut prog = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 1"]).spawn().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("false").spawn().unwrap()
+ };
+ assert!(prog.wait().unwrap().code() == Some(1));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_finish_twice() {
+ let mut prog = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "exit 1"]).spawn().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("false").spawn().unwrap()
+ };
+ assert!(prog.wait().unwrap().code() == Some(1));
+ assert!(prog.wait().unwrap().code() == Some(1));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn test_wait_with_output_once() {
+ let prog = if cfg!(target_os = "windows") {
+ Command::new("cmd").args(&["/C", "echo hello"]).stdout(Stdio::piped()).spawn().unwrap()
+ } else {
+ shell_cmd().arg("-c").arg("echo hello").stdout(Stdio::piped()).spawn().unwrap()
+ };
+
+ let Output { status, stdout, stderr } = prog.wait_with_output().unwrap();
+ let output_str = str::from_utf8(&stdout).unwrap();
+
+ assert!(status.success());
+ assert_eq!(output_str.trim().to_string(), "hello");
+ assert_eq!(stderr, Vec::new());
+}
+
+#[cfg(all(unix, not(target_os = "android")))]
+pub fn env_cmd() -> Command {
+ Command::new("env")
+}
+#[cfg(target_os = "android")]
+pub fn env_cmd() -> Command {
+ let mut cmd = Command::new("/system/bin/sh");
+ cmd.arg("-c").arg("set");
+ cmd
+}
+
+#[cfg(windows)]
+pub fn env_cmd() -> Command {
+ let mut cmd = Command::new("cmd");
+ cmd.arg("/c").arg("set");
+ cmd
+}
+
+#[test]
+#[cfg_attr(target_os = "vxworks", ignore)]
+fn test_override_env() {
+ use crate::env;
+
+ // In some build environments (such as chrooted Nix builds), `env` can
+ // only be found in the explicitly-provided PATH env variable, not in
+ // default places such as /bin or /usr/bin. So we need to pass through
+ // PATH to our sub-process.
+ let mut cmd = env_cmd();
+ cmd.env_clear().env("RUN_TEST_NEW_ENV", "123");
+ if let Some(p) = env::var_os("PATH") {
+ cmd.env("PATH", &p);
+ }
+ let result = cmd.output().unwrap();
+ let output = String::from_utf8_lossy(&result.stdout).to_string();
+
+ assert!(
+ output.contains("RUN_TEST_NEW_ENV=123"),
+ "didn't find RUN_TEST_NEW_ENV inside of:\n\n{output}",
+ );
+}
+
+#[test]
+#[cfg_attr(target_os = "vxworks", ignore)]
+fn test_add_to_env() {
+ let result = env_cmd().env("RUN_TEST_NEW_ENV", "123").output().unwrap();
+ let output = String::from_utf8_lossy(&result.stdout).to_string();
+
+ assert!(
+ output.contains("RUN_TEST_NEW_ENV=123"),
+ "didn't find RUN_TEST_NEW_ENV inside of:\n\n{output}"
+ );
+}
+
+#[test]
+#[cfg_attr(target_os = "vxworks", ignore)]
+fn test_capture_env_at_spawn() {
+ use crate::env;
+
+ let mut cmd = env_cmd();
+ cmd.env("RUN_TEST_NEW_ENV1", "123");
+
+ // This variable will not be present if the environment has already
+ // been captured above.
+ env::set_var("RUN_TEST_NEW_ENV2", "456");
+ let result = cmd.output().unwrap();
+ env::remove_var("RUN_TEST_NEW_ENV2");
+
+ let output = String::from_utf8_lossy(&result.stdout).to_string();
+
+ assert!(
+ output.contains("RUN_TEST_NEW_ENV1=123"),
+ "didn't find RUN_TEST_NEW_ENV1 inside of:\n\n{output}"
+ );
+ assert!(
+ output.contains("RUN_TEST_NEW_ENV2=456"),
+ "didn't find RUN_TEST_NEW_ENV2 inside of:\n\n{output}"
+ );
+}
+
+// Regression tests for #30858.
+#[test]
+fn test_interior_nul_in_progname_is_error() {
+ match Command::new("has-some-\0\0s-inside").spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+#[test]
+fn test_interior_nul_in_arg_is_error() {
+ match known_command().arg("has-some-\0\0s-inside").spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+#[test]
+fn test_interior_nul_in_args_is_error() {
+ match known_command().args(&["has-some-\0\0s-inside"]).spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+#[test]
+fn test_interior_nul_in_current_dir_is_error() {
+ match known_command().current_dir("has-some-\0\0s-inside").spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+// Regression tests for #30862.
+#[test]
+#[cfg_attr(target_os = "vxworks", ignore)]
+fn test_interior_nul_in_env_key_is_error() {
+ match env_cmd().env("has-some-\0\0s-inside", "value").spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "vxworks", ignore)]
+fn test_interior_nul_in_env_value_is_error() {
+ match env_cmd().env("key", "has-some-\0\0s-inside").spawn() {
+ Err(e) => assert_eq!(e.kind(), ErrorKind::InvalidInput),
+ Ok(_) => panic!(),
+ }
+}
+
+/// Tests that process creation flags work by debugging a process.
+/// Other creation flags make it hard or impossible to detect
+/// behavioral changes in the process.
+#[test]
+#[cfg(windows)]
+fn test_creation_flags() {
+ use crate::os::windows::process::CommandExt;
+ use crate::sys::c::{BOOL, DWORD, INFINITE};
+ #[repr(C, packed)]
+ struct DEBUG_EVENT {
+ pub event_code: DWORD,
+ pub process_id: DWORD,
+ pub thread_id: DWORD,
+ // This is a union in the real struct, but we don't
+ // need this data for the purposes of this test.
+ pub _junk: [u8; 164],
+ }
+
+ extern "system" {
+ fn WaitForDebugEvent(lpDebugEvent: *mut DEBUG_EVENT, dwMilliseconds: DWORD) -> BOOL;
+ fn ContinueDebugEvent(
+ dwProcessId: DWORD,
+ dwThreadId: DWORD,
+ dwContinueStatus: DWORD,
+ ) -> BOOL;
+ }
+
+ const DEBUG_PROCESS: DWORD = 1;
+ const EXIT_PROCESS_DEBUG_EVENT: DWORD = 5;
+ const DBG_EXCEPTION_NOT_HANDLED: DWORD = 0x80010001;
+
+ let mut child =
+ Command::new("cmd").creation_flags(DEBUG_PROCESS).stdin(Stdio::piped()).spawn().unwrap();
+ child.stdin.take().unwrap().write_all(b"exit\r\n").unwrap();
+ let mut events = 0;
+ let mut event = DEBUG_EVENT { event_code: 0, process_id: 0, thread_id: 0, _junk: [0; 164] };
+ loop {
+ if unsafe { WaitForDebugEvent(&mut event as *mut DEBUG_EVENT, INFINITE) } == 0 {
+ panic!("WaitForDebugEvent failed!");
+ }
+ events += 1;
+
+ if event.event_code == EXIT_PROCESS_DEBUG_EVENT {
+ break;
+ }
+
+ if unsafe {
+ ContinueDebugEvent(event.process_id, event.thread_id, DBG_EXCEPTION_NOT_HANDLED)
+ } == 0
+ {
+ panic!("ContinueDebugEvent failed!");
+ }
+ }
+ assert!(events > 0);
+}
+
+#[test]
+fn test_command_implements_send_sync() {
+ fn take_send_sync_type<T: Send + Sync>(_: T) {}
+ take_send_sync_type(Command::new(""))
+}
+
+// Ensure that starting a process with no environment variables works on Windows.
+// This will fail if the environment block is ill-formed.
+#[test]
+#[cfg(windows)]
+fn env_empty() {
+ let p = Command::new("cmd").args(&["/C", "exit 0"]).env_clear().spawn();
+ assert!(p.is_ok());
+}
+
+// See issue #91991
+#[test]
+#[cfg(windows)]
+fn run_bat_script() {
+ let tempdir = crate::sys_common::io::test::tmpdir();
+ let script_path = tempdir.join("hello.cmd");
+
+ crate::fs::write(&script_path, "@echo Hello, %~1!").unwrap();
+ let output = Command::new(&script_path)
+ .arg("fellow Rustaceans")
+ .stdout(crate::process::Stdio::piped())
+ .spawn()
+ .unwrap()
+ .wait_with_output()
+ .unwrap();
+ assert!(output.status.success());
+ assert_eq!(String::from_utf8_lossy(&output.stdout).trim(), "Hello, fellow Rustaceans!");
+}
+
+// See issue #95178
+#[test]
+#[cfg(windows)]
+fn run_canonical_bat_script() {
+ let tempdir = crate::sys_common::io::test::tmpdir();
+ let script_path = tempdir.join("hello.cmd");
+
+ crate::fs::write(&script_path, "@echo Hello, %~1!").unwrap();
+
+ // Try using a canonical path
+ let output = Command::new(&script_path.canonicalize().unwrap())
+ .arg("fellow Rustaceans")
+ .stdout(crate::process::Stdio::piped())
+ .spawn()
+ .unwrap()
+ .wait_with_output()
+ .unwrap();
+ assert!(output.status.success());
+ assert_eq!(String::from_utf8_lossy(&output.stdout).trim(), "Hello, fellow Rustaceans!");
+}
diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs
new file mode 100644
index 000000000..663537a05
--- /dev/null
+++ b/library/std/src/rt.rs
@@ -0,0 +1,150 @@
+//! Runtime services
+//!
+//! The `rt` module provides a narrow set of runtime services,
+//! including the global heap (exported in `heap`) and unwinding and
+//! backtrace support. The APIs in this module are highly unstable,
+//! and should be considered as private implementation details for the
+//! time being.
+
+#![unstable(
+ feature = "rt",
+ reason = "this public module should not exist and is highly likely \
+ to disappear",
+ issue = "none"
+)]
+#![doc(hidden)]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![allow(unused_macros)]
+
+use crate::ffi::CString;
+
+// Re-export some of our utilities which are expected by other crates.
+pub use crate::panicking::{begin_panic, panic_count};
+pub use core::panicking::{panic_display, panic_fmt};
+
+use crate::sync::Once;
+use crate::sys;
+use crate::sys_common::thread_info;
+use crate::thread::Thread;
+
+// Prints to the "panic output", depending on the platform this may be:
+// - the standard error output
+// - some dedicated platform specific output
+// - nothing (so this macro is a no-op)
+macro_rules! rtprintpanic {
+ ($($t:tt)*) => {
+ if let Some(mut out) = crate::sys::stdio::panic_output() {
+ let _ = crate::io::Write::write_fmt(&mut out, format_args!($($t)*));
+ }
+ }
+}
+
+macro_rules! rtabort {
+ ($($t:tt)*) => {
+ {
+ rtprintpanic!("fatal runtime error: {}\n", format_args!($($t)*));
+ crate::sys::abort_internal();
+ }
+ }
+}
+
+macro_rules! rtassert {
+ ($e:expr) => {
+ if !$e {
+ rtabort!(concat!("assertion failed: ", stringify!($e)));
+ }
+ };
+}
+
+macro_rules! rtunwrap {
+ ($ok:ident, $e:expr) => {
+ match $e {
+ $ok(v) => v,
+ ref err => {
+ let err = err.as_ref().map(drop); // map Ok/Some which might not be Debug
+ rtabort!(concat!("unwrap failed: ", stringify!($e), " = {:?}"), err)
+ }
+ }
+ };
+}
+
+// One-time runtime initialization.
+// Runs before `main`.
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+#[cfg_attr(test, allow(dead_code))]
+unsafe fn init(argc: isize, argv: *const *const u8) {
+ unsafe {
+ sys::init(argc, argv);
+
+ let main_guard = sys::thread::guard::init();
+ // Next, set up the current Thread with the guard information we just
+ // created. Note that this isn't necessary in general for new threads,
+ // but we just do this to name the main thread and to give it correct
+ // info about the stack bounds.
+ let thread = Thread::new(Some(rtunwrap!(Ok, CString::new("main"))));
+ thread_info::set(main_guard, thread);
+ }
+}
+
+// One-time runtime cleanup.
+// Runs after `main` or at program exit.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub(crate) fn cleanup() {
+ static CLEANUP: Once = Once::new();
+ CLEANUP.call_once(|| unsafe {
+ // Flush stdout and disable buffering.
+ crate::io::cleanup();
+ // SAFETY: Only called once during runtime cleanup.
+ sys::cleanup();
+ });
+}
+
+// To reduce the generated code of the new `lang_start`, this function is doing
+// the real work.
+#[cfg(not(test))]
+fn lang_start_internal(
+ main: &(dyn Fn() -> i32 + Sync + crate::panic::RefUnwindSafe),
+ argc: isize,
+ argv: *const *const u8,
+) -> Result<isize, !> {
+ use crate::{mem, panic};
+ let rt_abort = move |e| {
+ mem::forget(e);
+ rtabort!("initialization or cleanup bug");
+ };
+ // Guard against the code called by this function from unwinding outside of the Rust-controlled
+ // code, which is UB. This is a requirement imposed by a combination of how the
+ // `#[lang="start"]` attribute is implemented as well as by the implementation of the panicking
+ // mechanism itself.
+ //
+ // There are a couple of instances where unwinding can begin. First is inside of the
+ // `rt::init`, `rt::cleanup` and similar functions controlled by libstd. In those instances a
+ // panic is a libstd implementation bug. A quite likely one too, as there isn't any way to
+ // prevent libstd from accidentally introducing a panic to these functions. Another is from
+ // user code from `main` or, more nefariously, as described in e.g. issue #86030.
+ // SAFETY: Only called once during runtime initialization.
+ panic::catch_unwind(move || unsafe { init(argc, argv) }).map_err(rt_abort)?;
+ let ret_code = panic::catch_unwind(move || panic::catch_unwind(main).unwrap_or(101) as isize)
+ .map_err(move |e| {
+ mem::forget(e);
+ rtabort!("drop of the panic payload panicked");
+ });
+ panic::catch_unwind(cleanup).map_err(rt_abort)?;
+ ret_code
+}
+
+#[cfg(not(test))]
+#[lang = "start"]
+fn lang_start<T: crate::process::Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ let Ok(v) = lang_start_internal(
+ &move || crate::sys_common::backtrace::__rust_begin_short_backtrace(main).report().to_i32(),
+ argc,
+ argv,
+ );
+ v
+}
diff --git a/library/std/src/sync/barrier.rs b/library/std/src/sync/barrier.rs
new file mode 100644
index 000000000..11836b7b6
--- /dev/null
+++ b/library/std/src/sync/barrier.rs
@@ -0,0 +1,174 @@
+#[cfg(test)]
+mod tests;
+
+use crate::fmt;
+use crate::sync::{Condvar, Mutex};
+
+/// A barrier enables multiple threads to synchronize the beginning
+/// of some computation.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::{Arc, Barrier};
+/// use std::thread;
+///
+/// let mut handles = Vec::with_capacity(10);
+/// let barrier = Arc::new(Barrier::new(10));
+/// for _ in 0..10 {
+/// let c = Arc::clone(&barrier);
+/// // The same messages will be printed together.
+/// // You will NOT see any interleaving.
+/// handles.push(thread::spawn(move|| {
+/// println!("before wait");
+/// c.wait();
+/// println!("after wait");
+/// }));
+/// }
+/// // Wait for other threads to finish.
+/// for handle in handles {
+/// handle.join().unwrap();
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Barrier {
+ lock: Mutex<BarrierState>,
+ cvar: Condvar,
+ num_threads: usize,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+ count: usize,
+ generation_id: usize,
+}
+
+/// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads
+/// in the [`Barrier`] have rendezvoused.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::Barrier;
+///
+/// let barrier = Barrier::new(1);
+/// let barrier_wait_result = barrier.wait();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct BarrierWaitResult(bool);
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Barrier {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Barrier").finish_non_exhaustive()
+ }
+}
+
+impl Barrier {
+ /// Creates a new barrier that can block a given number of threads.
+ ///
+ /// A barrier will block `n`-1 threads which call [`wait()`] and then wake
+ /// up all threads at once when the `n`th thread calls [`wait()`].
+ ///
+ /// [`wait()`]: Barrier::wait
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Barrier;
+ ///
+ /// let barrier = Barrier::new(10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new(n: usize) -> Barrier {
+ Barrier {
+ lock: Mutex::new(BarrierState { count: 0, generation_id: 0 }),
+ cvar: Condvar::new(),
+ num_threads: n,
+ }
+ }
+
+ /// Blocks the current thread until all threads have rendezvoused here.
+ ///
+ /// Barriers are re-usable after all threads have rendezvoused once, and can
+ /// be used continuously.
+ ///
+ /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
+ /// returns `true` from [`BarrierWaitResult::is_leader()`] when returning
+ /// from this function, and all other threads will receive a result that
+ /// will return `false` from [`BarrierWaitResult::is_leader()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Barrier};
+ /// use std::thread;
+ ///
+ /// let mut handles = Vec::with_capacity(10);
+ /// let barrier = Arc::new(Barrier::new(10));
+ /// for _ in 0..10 {
+ /// let c = Arc::clone(&barrier);
+ /// // The same messages will be printed together.
+ /// // You will NOT see any interleaving.
+ /// handles.push(thread::spawn(move|| {
+ /// println!("before wait");
+ /// c.wait();
+ /// println!("after wait");
+ /// }));
+ /// }
+ /// // Wait for other threads to finish.
+ /// for handle in handles {
+ /// handle.join().unwrap();
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn wait(&self) -> BarrierWaitResult {
+ let mut lock = self.lock.lock().unwrap();
+ let local_gen = lock.generation_id;
+ lock.count += 1;
+ if lock.count < self.num_threads {
+ // We need a while loop to guard against spurious wakeups.
+ // https://en.wikipedia.org/wiki/Spurious_wakeup
+ while local_gen == lock.generation_id {
+ lock = self.cvar.wait(lock).unwrap();
+ }
+ BarrierWaitResult(false)
+ } else {
+ lock.count = 0;
+ lock.generation_id = lock.generation_id.wrapping_add(1);
+ self.cvar.notify_all();
+ BarrierWaitResult(true)
+ }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for BarrierWaitResult {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BarrierWaitResult").field("is_leader", &self.is_leader()).finish()
+ }
+}
+
+impl BarrierWaitResult {
+ /// Returns `true` if this thread is the "leader thread" for the call to
+ /// [`Barrier::wait()`].
+ ///
+ /// Only one thread will have `true` returned from their result, all other
+ /// threads will have `false` returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Barrier;
+ ///
+ /// let barrier = Barrier::new(1);
+ /// let barrier_wait_result = barrier.wait();
+ /// println!("{:?}", barrier_wait_result.is_leader());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn is_leader(&self) -> bool {
+ self.0
+ }
+}
diff --git a/library/std/src/sync/barrier/tests.rs b/library/std/src/sync/barrier/tests.rs
new file mode 100644
index 000000000..834a3e751
--- /dev/null
+++ b/library/std/src/sync/barrier/tests.rs
@@ -0,0 +1,35 @@
+use crate::sync::mpsc::{channel, TryRecvError};
+use crate::sync::{Arc, Barrier};
+use crate::thread;
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_barrier() {
+ const N: usize = 10;
+
+ let barrier = Arc::new(Barrier::new(N));
+ let (tx, rx) = channel();
+
+ for _ in 0..N - 1 {
+ let c = barrier.clone();
+ let tx = tx.clone();
+ thread::spawn(move || {
+ tx.send(c.wait().is_leader()).unwrap();
+ });
+ }
+
+ // At this point, all spawned threads should be blocked,
+ // so we shouldn't get anything from the port
+ assert!(matches!(rx.try_recv(), Err(TryRecvError::Empty)));
+
+ let mut leader_found = barrier.wait().is_leader();
+
+ // Now, the barrier is cleared and we should get data.
+ for _ in 0..N - 1 {
+ if rx.recv().unwrap() {
+ assert!(!leader_found);
+ leader_found = true;
+ }
+ }
+ assert!(leader_found);
+}
diff --git a/library/std/src/sync/condvar.rs b/library/std/src/sync/condvar.rs
new file mode 100644
index 000000000..eb1e7135a
--- /dev/null
+++ b/library/std/src/sync/condvar.rs
@@ -0,0 +1,564 @@
+#[cfg(test)]
+mod tests;
+
+use crate::fmt;
+use crate::sync::{mutex, poison, LockResult, MutexGuard, PoisonError};
+use crate::sys_common::condvar as sys;
+use crate::time::{Duration, Instant};
+
+/// A type indicating whether a timed wait on a condition variable returned
+/// due to a time out or not.
+///
+/// It is returned by the [`wait_timeout`] method.
+///
+/// [`wait_timeout`]: Condvar::wait_timeout
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+#[stable(feature = "wait_timeout", since = "1.5.0")]
+pub struct WaitTimeoutResult(bool);
+
+impl WaitTimeoutResult {
+ /// Returns `true` if the wait was known to have timed out.
+ ///
+ /// # Examples
+ ///
+ /// This example spawns a thread which will update the boolean value and
+ /// then wait 100 milliseconds before notifying the condvar.
+ ///
+ /// The main thread will wait with a timeout on the condvar and then leave
+ /// once the boolean has been updated and notified.
+ ///
+ /// ```
+ /// use std::sync::{Arc, Condvar, Mutex};
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move || {
+ /// let (lock, cvar) = &*pair2;
+ ///
+ /// // Let's wait 20 milliseconds before notifying the condvar.
+ /// thread::sleep(Duration::from_millis(20));
+ ///
+ /// let mut started = lock.lock().unwrap();
+ /// // We update the boolean value.
+ /// *started = true;
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// loop {
+ /// // Let's put a timeout on the condvar's wait.
+ /// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap();
+ /// // 10 milliseconds have passed, or maybe the value changed!
+ /// started = result.0;
+ /// if *started == true {
+ /// // We received the notification and the value has been updated, we can leave.
+ /// break
+ /// }
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "wait_timeout", since = "1.5.0")]
+ pub fn timed_out(&self) -> bool {
+ self.0
+ }
+}
+
+/// A Condition Variable
+///
+/// Condition variables represent the ability to block a thread such that it
+/// consumes no CPU time while waiting for an event to occur. Condition
+/// variables are typically associated with a boolean predicate (a condition)
+/// and a mutex. The predicate is always verified inside of the mutex before
+/// determining that a thread must block.
+///
+/// Functions in this module will block the current **thread** of execution.
+/// Note that any attempt to use multiple mutexes on the same condition
+/// variable may result in a runtime panic.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::{Arc, Mutex, Condvar};
+/// use std::thread;
+///
+/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+/// let pair2 = Arc::clone(&pair);
+///
+/// // Inside of our lock, spawn a new thread, and then wait for it to start.
+/// thread::spawn(move|| {
+/// let (lock, cvar) = &*pair2;
+/// let mut started = lock.lock().unwrap();
+/// *started = true;
+/// // We notify the condvar that the value has changed.
+/// cvar.notify_one();
+/// });
+///
+/// // Wait for the thread to start up.
+/// let (lock, cvar) = &*pair;
+/// let mut started = lock.lock().unwrap();
+/// while !*started {
+/// started = cvar.wait(started).unwrap();
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Condvar {
+ inner: sys::Condvar,
+}
+
+impl Condvar {
+ /// Creates a new condition variable which is ready to be waited on and
+ /// notified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Condvar;
+ ///
+ /// let condvar = Condvar::new();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new() -> Condvar {
+ Condvar { inner: sys::Condvar::new() }
+ }
+
+ /// Blocks the current thread until this condition variable receives a
+ /// notification.
+ ///
+ /// This function will atomically unlock the mutex specified (represented by
+ /// `guard`) and block the current thread. This means that any calls
+ /// to [`notify_one`] or [`notify_all`] which happen logically after the
+ /// mutex is unlocked are candidates to wake this thread up. When this
+ /// function call returns, the lock specified will have been re-acquired.
+ ///
+ /// Note that this function is susceptible to spurious wakeups. Condition
+ /// variables normally have a boolean predicate associated with them, and
+ /// the predicate must always be checked each time this function returns to
+ /// protect against spurious wakeups.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the mutex being waited on is
+ /// poisoned when this thread re-acquires the lock. For more information,
+ /// see information about [poisoning] on the [`Mutex`] type.
+ ///
+ /// # Panics
+ ///
+ /// This function may [`panic!`] if it is used with more than one mutex
+ /// over time.
+ ///
+ /// [`notify_one`]: Self::notify_one
+ /// [`notify_all`]: Self::notify_all
+ /// [poisoning]: super::Mutex#poisoning
+ /// [`Mutex`]: super::Mutex
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut started = lock.lock().unwrap();
+ /// *started = true;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
+ /// while !*started {
+ /// started = cvar.wait(started).unwrap();
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
+ let poisoned = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ self.inner.wait(lock);
+ mutex::guard_poison(&guard).get()
+ };
+ if poisoned { Err(PoisonError::new(guard)) } else { Ok(guard) }
+ }
+
+ /// Blocks the current thread until this condition variable receives a
+ /// notification and the provided condition is false.
+ ///
+ /// This function will atomically unlock the mutex specified (represented by
+ /// `guard`) and block the current thread. This means that any calls
+ /// to [`notify_one`] or [`notify_all`] which happen logically after the
+ /// mutex is unlocked are candidates to wake this thread up. When this
+ /// function call returns, the lock specified will have been re-acquired.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the mutex being waited on is
+ /// poisoned when this thread re-acquires the lock. For more information,
+ /// see information about [poisoning] on the [`Mutex`] type.
+ ///
+ /// [`notify_one`]: Self::notify_one
+ /// [`notify_all`]: Self::notify_all
+ /// [poisoning]: super::Mutex#poisoning
+ /// [`Mutex`]: super::Mutex
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ ///
+ /// let pair = Arc::new((Mutex::new(true), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut pending = lock.lock().unwrap();
+ /// *pending = false;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// // As long as the value inside the `Mutex<bool>` is `true`, we wait.
+ /// let _guard = cvar.wait_while(lock.lock().unwrap(), |pending| { *pending }).unwrap();
+ /// ```
+ #[stable(feature = "wait_until", since = "1.42.0")]
+ pub fn wait_while<'a, T, F>(
+ &self,
+ mut guard: MutexGuard<'a, T>,
+ mut condition: F,
+ ) -> LockResult<MutexGuard<'a, T>>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ while condition(&mut *guard) {
+ guard = self.wait(guard)?;
+ }
+ Ok(guard)
+ }
+
+ /// Waits on this condition variable for a notification, timing out after a
+ /// specified duration.
+ ///
+ /// The semantics of this function are equivalent to [`wait`]
+ /// except that the thread will be blocked for roughly no longer
+ /// than `ms` milliseconds. This method should not be used for
+ /// precise timing due to anomalies such as preemption or platform
+ /// differences that might not cause the maximum amount of time
+ /// waited to be precisely `ms`.
+ ///
+ /// Note that the best effort is made to ensure that the time waited is
+ /// measured with a monotonic clock, and not affected by the changes made to
+ /// the system time.
+ ///
+ /// The returned boolean is `false` only if the timeout is known
+ /// to have elapsed.
+ ///
+ /// Like [`wait`], the lock specified will be re-acquired when this function
+ /// returns, regardless of whether the timeout elapsed or not.
+ ///
+ /// [`wait`]: Self::wait
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut started = lock.lock().unwrap();
+ /// *started = true;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
+ /// loop {
+ /// let result = cvar.wait_timeout_ms(started, 10).unwrap();
+ /// // 10 milliseconds have passed, or maybe the value changed!
+ /// started = result.0;
+ /// if *started == true {
+ /// // We received the notification and the value has been updated, we can leave.
+ /// break
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.6.0", note = "replaced by `std::sync::Condvar::wait_timeout`")]
+ pub fn wait_timeout_ms<'a, T>(
+ &self,
+ guard: MutexGuard<'a, T>,
+ ms: u32,
+ ) -> LockResult<(MutexGuard<'a, T>, bool)> {
+ let res = self.wait_timeout(guard, Duration::from_millis(ms as u64));
+ poison::map_result(res, |(a, b)| (a, !b.timed_out()))
+ }
+
+ /// Waits on this condition variable for a notification, timing out after a
+ /// specified duration.
+ ///
+ /// The semantics of this function are equivalent to [`wait`] except that
+ /// the thread will be blocked for roughly no longer than `dur`. This
+ /// method should not be used for precise timing due to anomalies such as
+ /// preemption or platform differences that might not cause the maximum
+ /// amount of time waited to be precisely `dur`.
+ ///
+ /// Note that the best effort is made to ensure that the time waited is
+ /// measured with a monotonic clock, and not affected by the changes made to
+ /// the system time. This function is susceptible to spurious wakeups.
+ /// Condition variables normally have a boolean predicate associated with
+ /// them, and the predicate must always be checked each time this function
+ /// returns to protect against spurious wakeups. Additionally, it is
+ /// typically desirable for the timeout to not exceed some duration in
+ /// spite of spurious wakes, thus the sleep-duration is decremented by the
+ /// amount slept. Alternatively, use the `wait_timeout_while` method
+ /// to wait with a timeout while a predicate is true.
+ ///
+ /// The returned [`WaitTimeoutResult`] value indicates if the timeout is
+ /// known to have elapsed.
+ ///
+ /// Like [`wait`], the lock specified will be re-acquired when this function
+ /// returns, regardless of whether the timeout elapsed or not.
+ ///
+ /// [`wait`]: Self::wait
+ /// [`wait_timeout_while`]: Self::wait_timeout_while
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut started = lock.lock().unwrap();
+ /// *started = true;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // wait for the thread to start up
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// // as long as the value inside the `Mutex<bool>` is `false`, we wait
+ /// loop {
+ /// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap();
+ /// // 10 milliseconds have passed, or maybe the value changed!
+ /// started = result.0;
+ /// if *started == true {
+ /// // We received the notification and the value has been updated, we can leave.
+ /// break
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "wait_timeout", since = "1.5.0")]
+ pub fn wait_timeout<'a, T>(
+ &self,
+ guard: MutexGuard<'a, T>,
+ dur: Duration,
+ ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
+ let (poisoned, result) = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ let success = self.inner.wait_timeout(lock, dur);
+ (mutex::guard_poison(&guard).get(), WaitTimeoutResult(!success))
+ };
+ if poisoned { Err(PoisonError::new((guard, result))) } else { Ok((guard, result)) }
+ }
+
+ /// Waits on this condition variable for a notification, timing out after a
+ /// specified duration.
+ ///
+ /// The semantics of this function are equivalent to [`wait_while`] except
+ /// that the thread will be blocked for roughly no longer than `dur`. This
+ /// method should not be used for precise timing due to anomalies such as
+ /// preemption or platform differences that might not cause the maximum
+ /// amount of time waited to be precisely `dur`.
+ ///
+ /// Note that the best effort is made to ensure that the time waited is
+ /// measured with a monotonic clock, and not affected by the changes made to
+ /// the system time.
+ ///
+ /// The returned [`WaitTimeoutResult`] value indicates if the timeout is
+ /// known to have elapsed without the condition being met.
+ ///
+ /// Like [`wait_while`], the lock specified will be re-acquired when this
+ /// function returns, regardless of whether the timeout elapsed or not.
+ ///
+ /// [`wait_while`]: Self::wait_while
+ /// [`wait_timeout`]: Self::wait_timeout
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let pair = Arc::new((Mutex::new(true), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut pending = lock.lock().unwrap();
+ /// *pending = false;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // wait for the thread to start up
+ /// let (lock, cvar) = &*pair;
+ /// let result = cvar.wait_timeout_while(
+ /// lock.lock().unwrap(),
+ /// Duration::from_millis(100),
+ /// |&mut pending| pending,
+ /// ).unwrap();
+ /// if result.1.timed_out() {
+ /// // timed-out without the condition ever evaluating to false.
+ /// }
+ /// // access the locked mutex via result.0
+ /// ```
+ #[stable(feature = "wait_timeout_until", since = "1.42.0")]
+ pub fn wait_timeout_while<'a, T, F>(
+ &self,
+ mut guard: MutexGuard<'a, T>,
+ dur: Duration,
+ mut condition: F,
+ ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let start = Instant::now();
+ loop {
+ if !condition(&mut *guard) {
+ return Ok((guard, WaitTimeoutResult(false)));
+ }
+ let timeout = match dur.checked_sub(start.elapsed()) {
+ Some(timeout) => timeout,
+ None => return Ok((guard, WaitTimeoutResult(true))),
+ };
+ guard = self.wait_timeout(guard, timeout)?.0;
+ }
+ }
+
+ /// Wakes up one blocked thread on this condvar.
+ ///
+ /// If there is a blocked thread on this condition variable, then it will
+ /// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
+ /// `notify_one` are not buffered in any way.
+ ///
+ /// To wake up all threads, see [`notify_all`].
+ ///
+ /// [`wait`]: Self::wait
+ /// [`wait_timeout`]: Self::wait_timeout
+ /// [`notify_all`]: Self::notify_all
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut started = lock.lock().unwrap();
+ /// *started = true;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_one();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
+ /// while !*started {
+ /// started = cvar.wait(started).unwrap();
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn notify_one(&self) {
+ self.inner.notify_one()
+ }
+
+ /// Wakes up all blocked threads on this condvar.
+ ///
+ /// This method will ensure that any current waiters on the condition
+ /// variable are awoken. Calls to `notify_all()` are not buffered in any
+ /// way.
+ ///
+ /// To wake up only one thread, see [`notify_one`].
+ ///
+ /// [`notify_one`]: Self::notify_one
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex, Condvar};
+ /// use std::thread;
+ ///
+ /// let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ /// let pair2 = Arc::clone(&pair);
+ ///
+ /// thread::spawn(move|| {
+ /// let (lock, cvar) = &*pair2;
+ /// let mut started = lock.lock().unwrap();
+ /// *started = true;
+ /// // We notify the condvar that the value has changed.
+ /// cvar.notify_all();
+ /// });
+ ///
+ /// // Wait for the thread to start up.
+ /// let (lock, cvar) = &*pair;
+ /// let mut started = lock.lock().unwrap();
+ /// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
+ /// while !*started {
+ /// started = cvar.wait(started).unwrap();
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn notify_all(&self) {
+ self.inner.notify_all()
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Condvar {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Condvar").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "condvar_default", since = "1.10.0")]
+impl Default for Condvar {
+ /// Creates a `Condvar` which is ready to be waited on and notified.
+ fn default() -> Condvar {
+ Condvar::new()
+ }
+}
diff --git a/library/std/src/sync/condvar/tests.rs b/library/std/src/sync/condvar/tests.rs
new file mode 100644
index 000000000..24f467f0b
--- /dev/null
+++ b/library/std/src/sync/condvar/tests.rs
@@ -0,0 +1,190 @@
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::mpsc::channel;
+use crate::sync::{Arc, Condvar, Mutex};
+use crate::thread;
+use crate::time::Duration;
+
+#[test]
+fn smoke() {
+ let c = Condvar::new();
+ c.notify_one();
+ c.notify_all();
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn notify_one() {
+ let m = Arc::new(Mutex::new(()));
+ let m2 = m.clone();
+ let c = Arc::new(Condvar::new());
+ let c2 = c.clone();
+
+ let g = m.lock().unwrap();
+ let _t = thread::spawn(move || {
+ let _g = m2.lock().unwrap();
+ c2.notify_one();
+ });
+ let g = c.wait(g).unwrap();
+ drop(g);
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn notify_all() {
+ const N: usize = 10;
+
+ let data = Arc::new((Mutex::new(0), Condvar::new()));
+ let (tx, rx) = channel();
+ for _ in 0..N {
+ let data = data.clone();
+ let tx = tx.clone();
+ thread::spawn(move || {
+ let &(ref lock, ref cond) = &*data;
+ let mut cnt = lock.lock().unwrap();
+ *cnt += 1;
+ if *cnt == N {
+ tx.send(()).unwrap();
+ }
+ while *cnt != 0 {
+ cnt = cond.wait(cnt).unwrap();
+ }
+ tx.send(()).unwrap();
+ });
+ }
+ drop(tx);
+
+ let &(ref lock, ref cond) = &*data;
+ rx.recv().unwrap();
+ let mut cnt = lock.lock().unwrap();
+ *cnt = 0;
+ cond.notify_all();
+ drop(cnt);
+
+ for _ in 0..N {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_while() {
+ let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ let pair2 = pair.clone();
+
+ // Inside of our lock, spawn a new thread, and then wait for it to start.
+ thread::spawn(move || {
+ let &(ref lock, ref cvar) = &*pair2;
+ let mut started = lock.lock().unwrap();
+ *started = true;
+ // We notify the condvar that the value has changed.
+ cvar.notify_one();
+ });
+
+ // Wait for the thread to start up.
+ let &(ref lock, ref cvar) = &*pair;
+ let guard = cvar.wait_while(lock.lock().unwrap(), |started| !*started);
+ assert!(*guard.unwrap());
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_timeout_wait() {
+ let m = Arc::new(Mutex::new(()));
+ let c = Arc::new(Condvar::new());
+
+ loop {
+ let g = m.lock().unwrap();
+ let (_g, no_timeout) = c.wait_timeout(g, Duration::from_millis(1)).unwrap();
+ // spurious wakeups mean this isn't necessarily true
+ // so execute test again, if not timeout
+ if !no_timeout.timed_out() {
+ continue;
+ }
+
+ break;
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_timeout_while_wait() {
+ let m = Arc::new(Mutex::new(()));
+ let c = Arc::new(Condvar::new());
+
+ let g = m.lock().unwrap();
+ let (_g, wait) = c.wait_timeout_while(g, Duration::from_millis(1), |_| true).unwrap();
+ // no spurious wakeups. ensure it timed-out
+ assert!(wait.timed_out());
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_timeout_while_instant_satisfy() {
+ let m = Arc::new(Mutex::new(()));
+ let c = Arc::new(Condvar::new());
+
+ let g = m.lock().unwrap();
+ let (_g, wait) = c.wait_timeout_while(g, Duration::from_millis(0), |_| false).unwrap();
+ // ensure it didn't time-out even if we were not given any time.
+ assert!(!wait.timed_out());
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_timeout_while_wake() {
+ let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ let pair_copy = pair.clone();
+
+ let &(ref m, ref c) = &*pair;
+ let g = m.lock().unwrap();
+ let _t = thread::spawn(move || {
+ let &(ref lock, ref cvar) = &*pair_copy;
+ let mut started = lock.lock().unwrap();
+ thread::sleep(Duration::from_millis(1));
+ *started = true;
+ cvar.notify_one();
+ });
+ let (g2, wait) = c
+ .wait_timeout_while(g, Duration::from_millis(u64::MAX), |&mut notified| !notified)
+ .unwrap();
+ // ensure it didn't time-out even if we were not given any time.
+ assert!(!wait.timed_out());
+ assert!(*g2);
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn wait_timeout_wake() {
+ let m = Arc::new(Mutex::new(()));
+ let c = Arc::new(Condvar::new());
+
+ loop {
+ let g = m.lock().unwrap();
+
+ let c2 = c.clone();
+ let m2 = m.clone();
+
+ let notified = Arc::new(AtomicBool::new(false));
+ let notified_copy = notified.clone();
+
+ let t = thread::spawn(move || {
+ let _g = m2.lock().unwrap();
+ thread::sleep(Duration::from_millis(1));
+ notified_copy.store(true, Ordering::SeqCst);
+ c2.notify_one();
+ });
+ let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap();
+ assert!(!timeout_res.timed_out());
+ // spurious wakeups mean this isn't necessarily true
+ // so execute test again, if not notified
+ if !notified.load(Ordering::SeqCst) {
+ t.join().unwrap();
+ continue;
+ }
+ drop(g);
+
+ t.join().unwrap();
+
+ break;
+ }
+}
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
new file mode 100644
index 000000000..535cc1c42
--- /dev/null
+++ b/library/std/src/sync/lazy_lock.rs
@@ -0,0 +1,121 @@
+use crate::cell::Cell;
+use crate::fmt;
+use crate::ops::Deref;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::sync::OnceLock;
+
+/// A value which is initialized on the first access.
+///
+/// This type is a thread-safe `Lazy`, and can be used in statics.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::collections::HashMap;
+///
+/// use std::sync::LazyLock;
+///
+/// static HASHMAP: LazyLock<HashMap<i32, String>> = LazyLock::new(|| {
+/// println!("initializing");
+/// let mut m = HashMap::new();
+/// m.insert(13, "Spica".to_string());
+/// m.insert(74, "Hoyten".to_string());
+/// m
+/// });
+///
+/// fn main() {
+/// println!("ready");
+/// std::thread::spawn(|| {
+/// println!("{:?}", HASHMAP.get(&13));
+/// }).join().unwrap();
+/// println!("{:?}", HASHMAP.get(&74));
+///
+/// // Prints:
+/// // ready
+/// // initializing
+/// // Some("Spica")
+/// // Some("Hoyten")
+/// }
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct LazyLock<T, F = fn() -> T> {
+ cell: OnceLock<T>,
+ init: Cell<Option<F>>,
+}
+
+impl<T, F> LazyLock<T, F> {
+ /// Creates a new lazy value with the given initializing
+ /// function.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub const fn new(f: F) -> LazyLock<T, F> {
+ LazyLock { cell: OnceLock::new(), init: Cell::new(Some(f)) }
+ }
+}
+
+impl<T, F: FnOnce() -> T> LazyLock<T, F> {
+ /// Forces the evaluation of this lazy value and
+ /// returns a reference to result. This is equivalent
+ /// to the `Deref` impl, but is explicit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::LazyLock;
+ ///
+ /// let lazy = LazyLock::new(|| 92);
+ ///
+ /// assert_eq!(LazyLock::force(&lazy), &92);
+ /// assert_eq!(&*lazy, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn force(this: &LazyLock<T, F>) -> &T {
+ this.cell.get_or_init(|| match this.init.take() {
+ Some(f) => f(),
+ None => panic!("Lazy instance has previously been poisoned"),
+ })
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ LazyLock::force(self)
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Default> Default for LazyLock<T> {
+ /// Creates a new lazy value using `Default` as the initializing function.
+ fn default() -> LazyLock<T> {
+ LazyLock::new(T::default)
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Lazy").field("cell", &self.cell).finish_non_exhaustive()
+ }
+}
+
+// We never create a `&F` from a `&LazyLock<T, F>` so it is fine
+// to not impl `Sync` for `F`
+// we do create a `&mut Option<F>` in `force`, but this is
+// properly synchronized, so it only happens once
+// so it also does not contribute to this impl.
+#[unstable(feature = "once_cell", issue = "74465")]
+unsafe impl<T, F: Send> Sync for LazyLock<T, F> where OnceLock<T>: Sync {}
+// auto-derived `Send` impl is OK.
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> where OnceLock<T>: RefUnwindSafe {}
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T, F: UnwindSafe> UnwindSafe for LazyLock<T, F> where OnceLock<T>: UnwindSafe {}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sync/lazy_lock/tests.rs b/library/std/src/sync/lazy_lock/tests.rs
new file mode 100644
index 000000000..f11b66bfc
--- /dev/null
+++ b/library/std/src/sync/lazy_lock/tests.rs
@@ -0,0 +1,143 @@
+use crate::{
+ cell::LazyCell,
+ panic,
+ sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Mutex,
+ },
+ sync::{LazyLock, OnceLock},
+ thread,
+};
+
+fn spawn_and_wait<R: Send + 'static>(f: impl FnOnce() -> R + Send + 'static) -> R {
+ thread::spawn(f).join().unwrap()
+}
+
+#[test]
+fn lazy_default() {
+ static CALLED: AtomicUsize = AtomicUsize::new(0);
+
+ struct Foo(u8);
+ impl Default for Foo {
+ fn default() -> Self {
+ CALLED.fetch_add(1, SeqCst);
+ Foo(42)
+ }
+ }
+
+ let lazy: LazyCell<Mutex<Foo>> = <_>::default();
+
+ assert_eq!(CALLED.load(SeqCst), 0);
+
+ assert_eq!(lazy.lock().unwrap().0, 42);
+ assert_eq!(CALLED.load(SeqCst), 1);
+
+ lazy.lock().unwrap().0 = 21;
+
+ assert_eq!(lazy.lock().unwrap().0, 21);
+ assert_eq!(CALLED.load(SeqCst), 1);
+}
+
+#[test]
+fn lazy_poisoning() {
+ let x: LazyCell<String> = LazyCell::new(|| panic!("kaboom"));
+ for _ in 0..2 {
+ let res = panic::catch_unwind(panic::AssertUnwindSafe(|| x.len()));
+ assert!(res.is_err());
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn sync_lazy_new() {
+ static CALLED: AtomicUsize = AtomicUsize::new(0);
+ static SYNC_LAZY: LazyLock<i32> = LazyLock::new(|| {
+ CALLED.fetch_add(1, SeqCst);
+ 92
+ });
+
+ assert_eq!(CALLED.load(SeqCst), 0);
+
+ spawn_and_wait(|| {
+ let y = *SYNC_LAZY - 30;
+ assert_eq!(y, 62);
+ assert_eq!(CALLED.load(SeqCst), 1);
+ });
+
+ let y = *SYNC_LAZY - 30;
+ assert_eq!(y, 62);
+ assert_eq!(CALLED.load(SeqCst), 1);
+}
+
+#[test]
+fn sync_lazy_default() {
+ static CALLED: AtomicUsize = AtomicUsize::new(0);
+
+ struct Foo(u8);
+ impl Default for Foo {
+ fn default() -> Self {
+ CALLED.fetch_add(1, SeqCst);
+ Foo(42)
+ }
+ }
+
+ let lazy: LazyLock<Mutex<Foo>> = <_>::default();
+
+ assert_eq!(CALLED.load(SeqCst), 0);
+
+ assert_eq!(lazy.lock().unwrap().0, 42);
+ assert_eq!(CALLED.load(SeqCst), 1);
+
+ lazy.lock().unwrap().0 = 21;
+
+ assert_eq!(lazy.lock().unwrap().0, 21);
+ assert_eq!(CALLED.load(SeqCst), 1);
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn static_sync_lazy() {
+ static XS: LazyLock<Vec<i32>> = LazyLock::new(|| {
+ let mut xs = Vec::new();
+ xs.push(1);
+ xs.push(2);
+ xs.push(3);
+ xs
+ });
+
+ spawn_and_wait(|| {
+ assert_eq!(&*XS, &vec![1, 2, 3]);
+ });
+
+ assert_eq!(&*XS, &vec![1, 2, 3]);
+}
+
+#[test]
+fn static_sync_lazy_via_fn() {
+ fn xs() -> &'static Vec<i32> {
+ static XS: OnceLock<Vec<i32>> = OnceLock::new();
+ XS.get_or_init(|| {
+ let mut xs = Vec::new();
+ xs.push(1);
+ xs.push(2);
+ xs.push(3);
+ xs
+ })
+ }
+ assert_eq!(xs(), &vec![1, 2, 3]);
+}
+
+#[test]
+fn sync_lazy_poisoning() {
+ let x: LazyLock<String> = LazyLock::new(|| panic!("kaboom"));
+ for _ in 0..2 {
+ let res = panic::catch_unwind(|| x.len());
+ assert!(res.is_err());
+ }
+}
+
+#[test]
+fn is_sync_send() {
+ fn assert_traits<T: Send + Sync>() {}
+ assert_traits::<LazyLock<String>>();
+}
diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs
new file mode 100644
index 000000000..7b507a169
--- /dev/null
+++ b/library/std/src/sync/mod.rs
@@ -0,0 +1,189 @@
+//! Useful synchronization primitives.
+//!
+//! ## The need for synchronization
+//!
+//! Conceptually, a Rust program is a series of operations which will
+//! be executed on a computer. The timeline of events happening in the
+//! program is consistent with the order of the operations in the code.
+//!
+//! Consider the following code, operating on some global static variables:
+//!
+//! ```rust
+//! static mut A: u32 = 0;
+//! static mut B: u32 = 0;
+//! static mut C: u32 = 0;
+//!
+//! fn main() {
+//! unsafe {
+//! A = 3;
+//! B = 4;
+//! A = A + B;
+//! C = B;
+//! println!("{A} {B} {C}");
+//! C = A;
+//! }
+//! }
+//! ```
+//!
+//! It appears as if some variables stored in memory are changed, an addition
+//! is performed, result is stored in `A` and the variable `C` is
+//! modified twice.
+//!
+//! When only a single thread is involved, the results are as expected:
+//! the line `7 4 4` gets printed.
+//!
+//! As for what happens behind the scenes, when optimizations are enabled the
+//! final generated machine code might look very different from the code:
+//!
+//! - The first store to `C` might be moved before the store to `A` or `B`,
+//! _as if_ we had written `C = 4; A = 3; B = 4`.
+//!
+//! - Assignment of `A + B` to `A` might be removed, since the sum can be stored
+//! in a temporary location until it gets printed, with the global variable
+//! never getting updated.
+//!
+//! - The final result could be determined just by looking at the code
+//! at compile time, so [constant folding] might turn the whole
+//! block into a simple `println!("7 4 4")`.
+//!
+//! The compiler is allowed to perform any combination of these
+//! optimizations, as long as the final optimized code, when executed,
+//! produces the same results as the one without optimizations.
+//!
+//! Due to the [concurrency] involved in modern computers, assumptions
+//! about the program's execution order are often wrong. Access to
+//! global variables can lead to nondeterministic results, **even if**
+//! compiler optimizations are disabled, and it is **still possible**
+//! to introduce synchronization bugs.
+//!
+//! Note that thanks to Rust's safety guarantees, accessing global (static)
+//! variables requires `unsafe` code, assuming we don't use any of the
+//! synchronization primitives in this module.
+//!
+//! [constant folding]: https://en.wikipedia.org/wiki/Constant_folding
+//! [concurrency]: https://en.wikipedia.org/wiki/Concurrency_(computer_science)
+//!
+//! ## Out-of-order execution
+//!
+//! Instructions can execute in a different order from the one we define, due to
+//! various reasons:
+//!
+//! - The **compiler** reordering instructions: If the compiler can issue an
+//! instruction at an earlier point, it will try to do so. For example, it
+//! might hoist memory loads at the top of a code block, so that the CPU can
+//! start [prefetching] the values from memory.
+//!
+//! In single-threaded scenarios, this can cause issues when writing
+//! signal handlers or certain kinds of low-level code.
+//! Use [compiler fences] to prevent this reordering.
+//!
+//! - A **single processor** executing instructions [out-of-order]:
+//! Modern CPUs are capable of [superscalar] execution,
+//! i.e., multiple instructions might be executing at the same time,
+//! even though the machine code describes a sequential process.
+//!
+//! This kind of reordering is handled transparently by the CPU.
+//!
+//! - A **multiprocessor** system executing multiple hardware threads
+//! at the same time: In multi-threaded scenarios, you can use two
+//! kinds of primitives to deal with synchronization:
+//! - [memory fences] to ensure memory accesses are made visible to
+//! other CPUs in the right order.
+//! - [atomic operations] to ensure simultaneous access to the same
+//! memory location doesn't lead to undefined behavior.
+//!
+//! [prefetching]: https://en.wikipedia.org/wiki/Cache_prefetching
+//! [compiler fences]: crate::sync::atomic::compiler_fence
+//! [out-of-order]: https://en.wikipedia.org/wiki/Out-of-order_execution
+//! [superscalar]: https://en.wikipedia.org/wiki/Superscalar_processor
+//! [memory fences]: crate::sync::atomic::fence
+//! [atomic operations]: crate::sync::atomic
+//!
+//! ## Higher-level synchronization objects
+//!
+//! Most of the low-level synchronization primitives are quite error-prone and
+//! inconvenient to use, which is why the standard library also exposes some
+//! higher-level synchronization objects.
+//!
+//! These abstractions can be built out of lower-level primitives.
+//! For efficiency, the sync objects in the standard library are usually
+//! implemented with help from the operating system's kernel, which is
+//! able to reschedule the threads while they are blocked on acquiring
+//! a lock.
+//!
+//! The following is an overview of the available synchronization
+//! objects:
+//!
+//! - [`Arc`]: Atomically Reference-Counted pointer, which can be used
+//! in multithreaded environments to prolong the lifetime of some
+//! data until all the threads have finished using it.
+//!
+//! - [`Barrier`]: Ensures multiple threads will wait for each other
+//! to reach a point in the program, before continuing execution all
+//! together.
+//!
+//! - [`Condvar`]: Condition Variable, providing the ability to block
+//! a thread while waiting for an event to occur.
+//!
+//! - [`mpsc`]: Multi-producer, single-consumer queues, used for
+//! message-based communication. Can provide a lightweight
+//! inter-thread synchronisation mechanism, at the cost of some
+//! extra memory.
+//!
+//! - [`Mutex`]: Mutual Exclusion mechanism, which ensures that at
+//! most one thread at a time is able to access some data.
+//!
+//! - [`Once`]: Used for thread-safe, one-time initialization of a
+//! global variable.
+//!
+//! - [`RwLock`]: Provides a mutual exclusion mechanism which allows
+//! multiple readers at the same time, while allowing only one
+//! writer at a time. In some cases, this can be more efficient than
+//! a mutex.
+//!
+//! [`Arc`]: crate::sync::Arc
+//! [`Barrier`]: crate::sync::Barrier
+//! [`Condvar`]: crate::sync::Condvar
+//! [`mpsc`]: crate::sync::mpsc
+//! [`Mutex`]: crate::sync::Mutex
+//! [`Once`]: crate::sync::Once
+//! [`RwLock`]: crate::sync::RwLock
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use alloc_crate::sync::{Arc, Weak};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::sync::atomic;
+#[unstable(feature = "exclusive_wrapper", issue = "98407")]
+pub use core::sync::Exclusive;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::barrier::{Barrier, BarrierWaitResult};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::condvar::{Condvar, WaitTimeoutResult};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::mutex::{Mutex, MutexGuard};
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+pub use self::once::{Once, OnceState, ONCE_INIT};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::poison::{LockResult, PoisonError, TryLockError, TryLockResult};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
+
+#[unstable(feature = "once_cell", issue = "74465")]
+pub use self::lazy_lock::LazyLock;
+#[unstable(feature = "once_cell", issue = "74465")]
+pub use self::once_lock::OnceLock;
+
+pub mod mpsc;
+
+mod barrier;
+mod condvar;
+mod lazy_lock;
+mod mutex;
+mod once;
+mod once_lock;
+mod poison;
+mod rwlock;
diff --git a/library/std/src/sync/mpsc/blocking.rs b/library/std/src/sync/mpsc/blocking.rs
new file mode 100644
index 000000000..021df7b09
--- /dev/null
+++ b/library/std/src/sync/mpsc/blocking.rs
@@ -0,0 +1,82 @@
+//! Generic support for building blocking abstractions.
+
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::Arc;
+use crate::thread::{self, Thread};
+use crate::time::Instant;
+
+struct Inner {
+ thread: Thread,
+ woken: AtomicBool,
+}
+
+unsafe impl Send for Inner {}
+unsafe impl Sync for Inner {}
+
+#[derive(Clone)]
+pub struct SignalToken {
+ inner: Arc<Inner>,
+}
+
+pub struct WaitToken {
+ inner: Arc<Inner>,
+}
+
+impl !Send for WaitToken {}
+
+impl !Sync for WaitToken {}
+
+pub fn tokens() -> (WaitToken, SignalToken) {
+ let inner = Arc::new(Inner { thread: thread::current(), woken: AtomicBool::new(false) });
+ let wait_token = WaitToken { inner: inner.clone() };
+ let signal_token = SignalToken { inner };
+ (wait_token, signal_token)
+}
+
+impl SignalToken {
+ pub fn signal(&self) -> bool {
+ let wake = self
+ .inner
+ .woken
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok();
+ if wake {
+ self.inner.thread.unpark();
+ }
+ wake
+ }
+
+ /// Converts to an unsafe raw pointer. Useful for storing in a pipe's state
+ /// flag.
+ #[inline]
+ pub unsafe fn to_raw(self) -> *mut u8 {
+ Arc::into_raw(self.inner) as *mut u8
+ }
+
+ /// Converts from an unsafe raw pointer. Useful for retrieving a pipe's state
+ /// flag.
+ #[inline]
+ pub unsafe fn from_raw(signal_ptr: *mut u8) -> SignalToken {
+ SignalToken { inner: Arc::from_raw(signal_ptr as *mut Inner) }
+ }
+}
+
+impl WaitToken {
+ pub fn wait(self) {
+ while !self.inner.woken.load(Ordering::SeqCst) {
+ thread::park()
+ }
+ }
+
+ /// Returns `true` if we wake up normally.
+ pub fn wait_max_until(self, end: Instant) -> bool {
+ while !self.inner.woken.load(Ordering::SeqCst) {
+ let now = Instant::now();
+ if now >= end {
+ return false;
+ }
+ thread::park_timeout(end - now)
+ }
+ true
+ }
+}
diff --git a/library/std/src/sync/mpsc/cache_aligned.rs b/library/std/src/sync/mpsc/cache_aligned.rs
new file mode 100644
index 000000000..9197f0d6e
--- /dev/null
+++ b/library/std/src/sync/mpsc/cache_aligned.rs
@@ -0,0 +1,25 @@
+use crate::ops::{Deref, DerefMut};
+
+#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[cfg_attr(target_arch = "aarch64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "aarch64"), repr(align(64)))]
+pub(super) struct CacheAligned<T>(pub T);
+
+impl<T> Deref for CacheAligned<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<T> DerefMut for CacheAligned<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<T> CacheAligned<T> {
+ pub(super) fn new(t: T) -> Self {
+ CacheAligned(t)
+ }
+}
diff --git a/library/std/src/sync/mpsc/mod.rs b/library/std/src/sync/mpsc/mod.rs
new file mode 100644
index 000000000..e85a87239
--- /dev/null
+++ b/library/std/src/sync/mpsc/mod.rs
@@ -0,0 +1,1669 @@
+//! Multi-producer, single-consumer FIFO queue communication primitives.
+//!
+//! This module provides message-based communication over channels, concretely
+//! defined among three types:
+//!
+//! * [`Sender`]
+//! * [`SyncSender`]
+//! * [`Receiver`]
+//!
+//! A [`Sender`] or [`SyncSender`] is used to send data to a [`Receiver`]. Both
+//! senders are clone-able (multi-producer) such that many threads can send
+//! simultaneously to one receiver (single-consumer).
+//!
+//! These channels come in two flavors:
+//!
+//! 1. An asynchronous, infinitely buffered channel. The [`channel`] function
+//! will return a `(Sender, Receiver)` tuple where all sends will be
+//! **asynchronous** (they never block). The channel conceptually has an
+//! infinite buffer.
+//!
+//! 2. A synchronous, bounded channel. The [`sync_channel`] function will
+//! return a `(SyncSender, Receiver)` tuple where the storage for pending
+//! messages is a pre-allocated buffer of a fixed size. All sends will be
+//! **synchronous** by blocking until there is buffer space available. Note
+//! that a bound of 0 is allowed, causing the channel to become a "rendezvous"
+//! channel where each sender atomically hands off a message to a receiver.
+//!
+//! [`send`]: Sender::send
+//!
+//! ## Disconnection
+//!
+//! The send and receive operations on channels will all return a [`Result`]
+//! indicating whether the operation succeeded or not. An unsuccessful operation
+//! is normally indicative of the other half of a channel having "hung up" by
+//! being dropped in its corresponding thread.
+//!
+//! Once half of a channel has been deallocated, most operations can no longer
+//! continue to make progress, so [`Err`] will be returned. Many applications
+//! will continue to [`unwrap`] the results returned from this module,
+//! instigating a propagation of failure among threads if one unexpectedly dies.
+//!
+//! [`unwrap`]: Result::unwrap
+//!
+//! # Examples
+//!
+//! Simple usage:
+//!
+//! ```
+//! use std::thread;
+//! use std::sync::mpsc::channel;
+//!
+//! // Create a simple streaming channel
+//! let (tx, rx) = channel();
+//! thread::spawn(move|| {
+//! tx.send(10).unwrap();
+//! });
+//! assert_eq!(rx.recv().unwrap(), 10);
+//! ```
+//!
+//! Shared usage:
+//!
+//! ```
+//! use std::thread;
+//! use std::sync::mpsc::channel;
+//!
+//! // Create a shared channel that can be sent along from many threads
+//! // where tx is the sending half (tx for transmission), and rx is the receiving
+//! // half (rx for receiving).
+//! let (tx, rx) = channel();
+//! for i in 0..10 {
+//! let tx = tx.clone();
+//! thread::spawn(move|| {
+//! tx.send(i).unwrap();
+//! });
+//! }
+//!
+//! for _ in 0..10 {
+//! let j = rx.recv().unwrap();
+//! assert!(0 <= j && j < 10);
+//! }
+//! ```
+//!
+//! Propagating panics:
+//!
+//! ```
+//! use std::sync::mpsc::channel;
+//!
+//! // The call to recv() will return an error because the channel has already
+//! // hung up (or been deallocated)
+//! let (tx, rx) = channel::<i32>();
+//! drop(tx);
+//! assert!(rx.recv().is_err());
+//! ```
+//!
+//! Synchronous channels:
+//!
+//! ```
+//! use std::thread;
+//! use std::sync::mpsc::sync_channel;
+//!
+//! let (tx, rx) = sync_channel::<i32>(0);
+//! thread::spawn(move|| {
+//! // This will wait for the parent thread to start receiving
+//! tx.send(53).unwrap();
+//! });
+//! rx.recv().unwrap();
+//! ```
+//!
+//! Unbounded receive loop:
+//!
+//! ```
+//! use std::sync::mpsc::sync_channel;
+//! use std::thread;
+//!
+//! let (tx, rx) = sync_channel(3);
+//!
+//! for _ in 0..3 {
+//! // It would be the same without thread and clone here
+//! // since there will still be one `tx` left.
+//! let tx = tx.clone();
+//! // cloned tx dropped within thread
+//! thread::spawn(move || tx.send("ok").unwrap());
+//! }
+//!
+//! // Drop the last sender to stop `rx` waiting for message.
+//! // The program will not complete if we comment this out.
+//! // **All** `tx` needs to be dropped for `rx` to have `Err`.
+//! drop(tx);
+//!
+//! // Unbounded receiver waiting for all senders to complete.
+//! while let Ok(msg) = rx.recv() {
+//! println!("{msg}");
+//! }
+//!
+//! println!("completed");
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod sync_tests;
+
+// A description of how Rust's channel implementation works
+//
+// Channels are supposed to be the basic building block for all other
+// concurrent primitives that are used in Rust. As a result, the channel type
+// needs to be highly optimized, flexible, and broad enough for use everywhere.
+//
+// The choice of implementation of all channels is to be built on lock-free data
+// structures. The channels themselves are then consequently also lock-free data
+// structures. As always with lock-free code, this is a very "here be dragons"
+// territory, especially because I'm unaware of any academic papers that have
+// gone into great length about channels of these flavors.
+//
+// ## Flavors of channels
+//
+// From the perspective of a consumer of this library, there is only one flavor
+// of channel. This channel can be used as a stream and cloned to allow multiple
+// senders. Under the hood, however, there are actually three flavors of
+// channels in play.
+//
+// * Flavor::Oneshots - these channels are highly optimized for the one-send use
+// case. They contain as few atomics as possible and
+// involve one and exactly one allocation.
+// * Streams - these channels are optimized for the non-shared use case. They
+// use a different concurrent queue that is more tailored for this
+// use case. The initial allocation of this flavor of channel is not
+// optimized.
+// * Shared - this is the most general form of channel that this module offers,
+// a channel with multiple senders. This type is as optimized as it
+// can be, but the previous two types mentioned are much faster for
+// their use-cases.
+//
+// ## Concurrent queues
+//
+// The basic idea of Rust's Sender/Receiver types is that send() never blocks,
+// but recv() obviously blocks. This means that under the hood there must be
+// some shared and concurrent queue holding all of the actual data.
+//
+// With two flavors of channels, two flavors of queues are also used. We have
+// chosen to use queues from a well-known author that are abbreviated as SPSC
+// and MPSC (single producer, single consumer and multiple producer, single
+// consumer). SPSC queues are used for streams while MPSC queues are used for
+// shared channels.
+//
+// ### SPSC optimizations
+//
+// The SPSC queue found online is essentially a linked list of nodes where one
+// half of the nodes are the "queue of data" and the other half of nodes are a
+// cache of unused nodes. The unused nodes are used such that an allocation is
+// not required on every push() and a free doesn't need to happen on every
+// pop().
+//
+// As found online, however, the cache of nodes is of an infinite size. This
+// means that if a channel at one point in its life had 50k items in the queue,
+// then the queue will always have the capacity for 50k items. I believed that
+// this was an unnecessary limitation of the implementation, so I have altered
+// the queue to optionally have a bound on the cache size.
+//
+// By default, streams will have an unbounded SPSC queue with a small-ish cache
+// size. The hope is that the cache is still large enough to have very fast
+// send() operations while not too large such that millions of channels can
+// coexist at once.
+//
+// ### MPSC optimizations
+//
+// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
+// a linked list under the hood to earn its unboundedness, but I have not put
+// forth much effort into having a cache of nodes similar to the SPSC queue.
+//
+// For now, I believe that this is "ok" because shared channels are not the most
+// common type, but soon we may wish to revisit this queue choice and determine
+// another candidate for backend storage of shared channels.
+//
+// ## Overview of the Implementation
+//
+// Now that there's a little background on the concurrent queues used, it's
+// worth going into much more detail about the channels themselves. The basic
+// pseudocode for a send/recv are:
+//
+//
+// send(t) recv()
+// queue.push(t) return if queue.pop()
+// if increment() == -1 deschedule {
+// wakeup() if decrement() > 0
+// cancel_deschedule()
+// }
+// queue.pop()
+//
+// As mentioned before, there are no locks in this implementation, only atomic
+// instructions are used.
+//
+// ### The internal atomic counter
+//
+// Every channel has a shared counter with each half to keep track of the size
+// of the queue. This counter is used to abort descheduling by the receiver and
+// to know when to wake up on the sending side.
+//
+// As seen in the pseudocode, senders will increment this count and receivers
+// will decrement the count. The theory behind this is that if a sender sees a
+// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
+// then it doesn't need to block.
+//
+// The recv() method has a beginning call to pop(), and if successful, it needs
+// to decrement the count. It is a crucial implementation detail that this
+// decrement does *not* happen to the shared counter. If this were the case,
+// then it would be possible for the counter to be very negative when there were
+// no receivers waiting, in which case the senders would have to determine when
+// it was actually appropriate to wake up a receiver.
+//
+// Instead, the "steal count" is kept track of separately (not atomically
+// because it's only used by receivers), and then the decrement() call when
+// descheduling will lump in all of the recent steals into one large decrement.
+//
+// The implication of this is that if a sender sees a -1 count, then there's
+// guaranteed to be a waiter waiting!
+//
+// ## Native Implementation
+//
+// A major goal of these channels is to work seamlessly on and off the runtime.
+// All of the previous race conditions have been worded in terms of
+// scheduler-isms (which is obviously not available without the runtime).
+//
+// For now, native usage of channels (off the runtime) will fall back onto
+// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
+// is still entirely lock-free, the "deschedule" blocks above are surrounded by
+// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
+// condition variable.
+//
+// ## Select
+//
+// Being able to support selection over channels has greatly influenced this
+// design, and not only does selection need to work inside the runtime, but also
+// outside the runtime.
+//
+// The implementation is fairly straightforward. The goal of select() is not to
+// return some data, but only to return which channel can receive data without
+// blocking. The implementation is essentially the entire blocking procedure
+// followed by an increment as soon as its woken up. The cancellation procedure
+// involves an increment and swapping out of to_wake to acquire ownership of the
+// thread to unblock.
+//
+// Sadly this current implementation requires multiple allocations, so I have
+// seen the throughput of select() be much worse than it should be. I do not
+// believe that there is anything fundamental that needs to change about these
+// channels, however, in order to support a more efficient select().
+//
+// FIXME: Select is now removed, so these factors are ready to be cleaned up!
+//
+// # Conclusion
+//
+// And now that you've seen all the races that I found and attempted to fix,
+// here's the code for you to find some more!
+
+use crate::cell::UnsafeCell;
+use crate::error;
+use crate::fmt;
+use crate::mem;
+use crate::sync::Arc;
+use crate::time::{Duration, Instant};
+
+mod blocking;
+mod mpsc_queue;
+mod oneshot;
+mod shared;
+mod spsc_queue;
+mod stream;
+mod sync;
+
+mod cache_aligned;
+
+/// The receiving half of Rust's [`channel`] (or [`sync_channel`]) type.
+/// This half can only be owned by one thread.
+///
+/// Messages sent to the channel can be retrieved using [`recv`].
+///
+/// [`recv`]: Receiver::recv
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+/// use std::time::Duration;
+///
+/// let (send, recv) = channel();
+///
+/// thread::spawn(move || {
+/// send.send("Hello world!").unwrap();
+/// thread::sleep(Duration::from_secs(2)); // block for two seconds
+/// send.send("Delayed for 2 seconds").unwrap();
+/// });
+///
+/// println!("{}", recv.recv().unwrap()); // Received immediately
+/// println!("Waiting...");
+/// println!("{}", recv.recv().unwrap()); // Received after 2 seconds
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Receiver")]
+pub struct Receiver<T> {
+ inner: UnsafeCell<Flavor<T>>,
+}
+
+// The receiver port can be sent from place to place, so long as it
+// is not used to receive non-sendable things.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for Receiver<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> !Sync for Receiver<T> {}
+
+/// An iterator over messages on a [`Receiver`], created by [`iter`].
+///
+/// This iterator will block whenever [`next`] is called,
+/// waiting for a new message, and [`None`] will be returned
+/// when the corresponding channel has hung up.
+///
+/// [`iter`]: Receiver::iter
+/// [`next`]: Iterator::next
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+///
+/// let (send, recv) = channel();
+///
+/// thread::spawn(move || {
+/// send.send(1u8).unwrap();
+/// send.send(2u8).unwrap();
+/// send.send(3u8).unwrap();
+/// });
+///
+/// for x in recv.iter() {
+/// println!("Got: {x}");
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Iter<'a, T: 'a> {
+ rx: &'a Receiver<T>,
+}
+
+/// An iterator that attempts to yield all pending values for a [`Receiver`],
+/// created by [`try_iter`].
+///
+/// [`None`] will be returned when there are no pending values remaining or
+/// if the corresponding channel has hung up.
+///
+/// This iterator will never block the caller in order to wait for data to
+/// become available. Instead, it will return [`None`].
+///
+/// [`try_iter`]: Receiver::try_iter
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+/// use std::time::Duration;
+///
+/// let (sender, receiver) = channel();
+///
+/// // Nothing is in the buffer yet
+/// assert!(receiver.try_iter().next().is_none());
+/// println!("Nothing in the buffer...");
+///
+/// thread::spawn(move || {
+/// sender.send(1).unwrap();
+/// sender.send(2).unwrap();
+/// sender.send(3).unwrap();
+/// });
+///
+/// println!("Going to sleep...");
+/// thread::sleep(Duration::from_secs(2)); // block for two seconds
+///
+/// for x in receiver.try_iter() {
+/// println!("Got: {x}");
+/// }
+/// ```
+#[stable(feature = "receiver_try_iter", since = "1.15.0")]
+#[derive(Debug)]
+pub struct TryIter<'a, T: 'a> {
+ rx: &'a Receiver<T>,
+}
+
+/// An owning iterator over messages on a [`Receiver`],
+/// created by [`into_iter`].
+///
+/// This iterator will block whenever [`next`]
+/// is called, waiting for a new message, and [`None`] will be
+/// returned if the corresponding channel has hung up.
+///
+/// [`into_iter`]: Receiver::into_iter
+/// [`next`]: Iterator::next
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+///
+/// let (send, recv) = channel();
+///
+/// thread::spawn(move || {
+/// send.send(1u8).unwrap();
+/// send.send(2u8).unwrap();
+/// send.send(3u8).unwrap();
+/// });
+///
+/// for x in recv.into_iter() {
+/// println!("Got: {x}");
+/// }
+/// ```
+#[stable(feature = "receiver_into_iter", since = "1.1.0")]
+#[derive(Debug)]
+pub struct IntoIter<T> {
+ rx: Receiver<T>,
+}
+
+/// The sending-half of Rust's asynchronous [`channel`] type. This half can only be
+/// owned by one thread, but it can be cloned to send to other threads.
+///
+/// Messages can be sent through this channel with [`send`].
+///
+/// Note: all senders (the original and the clones) need to be dropped for the receiver
+/// to stop blocking to receive messages with [`Receiver::recv`].
+///
+/// [`send`]: Sender::send
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+///
+/// let (sender, receiver) = channel();
+/// let sender2 = sender.clone();
+///
+/// // First thread owns sender
+/// thread::spawn(move || {
+/// sender.send(1).unwrap();
+/// });
+///
+/// // Second thread owns sender2
+/// thread::spawn(move || {
+/// sender2.send(2).unwrap();
+/// });
+///
+/// let msg = receiver.recv().unwrap();
+/// let msg2 = receiver.recv().unwrap();
+///
+/// assert_eq!(3, msg + msg2);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Sender<T> {
+ inner: UnsafeCell<Flavor<T>>,
+}
+
+// The send port can be sent from place to place, so long as it
+// is not used to send non-sendable things.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for Sender<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> !Sync for Sender<T> {}
+
+/// The sending-half of Rust's synchronous [`sync_channel`] type.
+///
+/// Messages can be sent through this channel with [`send`] or [`try_send`].
+///
+/// [`send`] will block if there is no space in the internal buffer.
+///
+/// [`send`]: SyncSender::send
+/// [`try_send`]: SyncSender::try_send
+///
+/// # Examples
+///
+/// ```rust
+/// use std::sync::mpsc::sync_channel;
+/// use std::thread;
+///
+/// // Create a sync_channel with buffer size 2
+/// let (sync_sender, receiver) = sync_channel(2);
+/// let sync_sender2 = sync_sender.clone();
+///
+/// // First thread owns sync_sender
+/// thread::spawn(move || {
+/// sync_sender.send(1).unwrap();
+/// sync_sender.send(2).unwrap();
+/// });
+///
+/// // Second thread owns sync_sender2
+/// thread::spawn(move || {
+/// sync_sender2.send(3).unwrap();
+/// // thread will now block since the buffer is full
+/// println!("Thread unblocked!");
+/// });
+///
+/// let mut msg;
+///
+/// msg = receiver.recv().unwrap();
+/// println!("message {msg} received");
+///
+/// // "Thread unblocked!" will be printed now
+///
+/// msg = receiver.recv().unwrap();
+/// println!("message {msg} received");
+///
+/// msg = receiver.recv().unwrap();
+///
+/// println!("message {msg} received");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SyncSender<T> {
+ inner: Arc<sync::Packet<T>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for SyncSender<T> {}
+
+/// An error returned from the [`Sender::send`] or [`SyncSender::send`]
+/// function on **channel**s.
+///
+/// A **send** operation can only fail if the receiving end of a channel is
+/// disconnected, implying that the data could never be received. The error
+/// contains the data being sent as a payload so it can be recovered.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct SendError<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
+
+/// An error returned from the [`recv`] function on a [`Receiver`].
+///
+/// The [`recv`] operation can only fail if the sending half of a
+/// [`channel`] (or [`sync_channel`]) is disconnected, implying that no further
+/// messages will ever be received.
+///
+/// [`recv`]: Receiver::recv
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RecvError;
+
+/// This enumeration is the list of the possible reasons that [`try_recv`] could
+/// not return data when called. This can occur with both a [`channel`] and
+/// a [`sync_channel`].
+///
+/// [`try_recv`]: Receiver::try_recv
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum TryRecvError {
+ /// This **channel** is currently empty, but the **Sender**(s) have not yet
+ /// disconnected, so data may yet become available.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Empty,
+
+ /// The **channel**'s sending half has become disconnected, and there will
+ /// never be any more data received on it.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Disconnected,
+}
+
+/// This enumeration is the list of possible errors that made [`recv_timeout`]
+/// unable to return data when called. This can occur with both a [`channel`] and
+/// a [`sync_channel`].
+///
+/// [`recv_timeout`]: Receiver::recv_timeout
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[stable(feature = "mpsc_recv_timeout", since = "1.12.0")]
+pub enum RecvTimeoutError {
+ /// This **channel** is currently empty, but the **Sender**(s) have not yet
+ /// disconnected, so data may yet become available.
+ #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")]
+ Timeout,
+ /// The **channel**'s sending half has become disconnected, and there will
+ /// never be any more data received on it.
+ #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")]
+ Disconnected,
+}
+
+/// This enumeration is the list of the possible error outcomes for the
+/// [`try_send`] method.
+///
+/// [`try_send`]: SyncSender::try_send
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub enum TrySendError<T> {
+ /// The data could not be sent on the [`sync_channel`] because it would require that
+ /// the callee block to send the data.
+ ///
+ /// If this is a buffered channel, then the buffer is full at this time. If
+ /// this is not a buffered channel, then there is no [`Receiver`] available to
+ /// acquire the data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Full(#[stable(feature = "rust1", since = "1.0.0")] T),
+
+ /// This [`sync_channel`]'s receiving half has disconnected, so the data could not be
+ /// sent. The data is returned back to the callee in this case.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Disconnected(#[stable(feature = "rust1", since = "1.0.0")] T),
+}
+
+enum Flavor<T> {
+ Oneshot(Arc<oneshot::Packet<T>>),
+ Stream(Arc<stream::Packet<T>>),
+ Shared(Arc<shared::Packet<T>>),
+ Sync(Arc<sync::Packet<T>>),
+}
+
+#[doc(hidden)]
+trait UnsafeFlavor<T> {
+ fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>>;
+ unsafe fn inner_mut(&self) -> &mut Flavor<T> {
+ &mut *self.inner_unsafe().get()
+ }
+ unsafe fn inner(&self) -> &Flavor<T> {
+ &*self.inner_unsafe().get()
+ }
+}
+impl<T> UnsafeFlavor<T> for Sender<T> {
+ fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> {
+ &self.inner
+ }
+}
+impl<T> UnsafeFlavor<T> for Receiver<T> {
+ fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> {
+ &self.inner
+ }
+}
+
+/// Creates a new asynchronous channel, returning the sender/receiver halves.
+/// All data sent on the [`Sender`] will become available on the [`Receiver`] in
+/// the same order as it was sent, and no [`send`] will block the calling thread
+/// (this channel has an "infinite buffer", unlike [`sync_channel`], which will
+/// block after its buffer limit is reached). [`recv`] will block until a message
+/// is available while there is at least one [`Sender`] alive (including clones).
+///
+/// The [`Sender`] can be cloned to [`send`] to the same channel multiple times, but
+/// only one [`Receiver`] is supported.
+///
+/// If the [`Receiver`] is disconnected while trying to [`send`] with the
+/// [`Sender`], the [`send`] method will return a [`SendError`]. Similarly, if the
+/// [`Sender`] is disconnected while trying to [`recv`], the [`recv`] method will
+/// return a [`RecvError`].
+///
+/// [`send`]: Sender::send
+/// [`recv`]: Receiver::recv
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::mpsc::channel;
+/// use std::thread;
+///
+/// let (sender, receiver) = channel();
+///
+/// // Spawn off an expensive computation
+/// thread::spawn(move|| {
+/// # fn expensive_computation() {}
+/// sender.send(expensive_computation()).unwrap();
+/// });
+///
+/// // Do some useful work for awhile
+///
+/// // Let's see what that answer was
+/// println!("{:?}", receiver.recv().unwrap());
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let a = Arc::new(oneshot::Packet::new());
+ (Sender::new(Flavor::Oneshot(a.clone())), Receiver::new(Flavor::Oneshot(a)))
+}
+
+/// Creates a new synchronous, bounded channel.
+/// All data sent on the [`SyncSender`] will become available on the [`Receiver`]
+/// in the same order as it was sent. Like asynchronous [`channel`]s, the
+/// [`Receiver`] will block until a message becomes available. `sync_channel`
+/// differs greatly in the semantics of the sender, however.
+///
+/// This channel has an internal buffer on which messages will be queued.
+/// `bound` specifies the buffer size. When the internal buffer becomes full,
+/// future sends will *block* waiting for the buffer to open up. Note that a
+/// buffer size of 0 is valid, in which case this becomes "rendezvous channel"
+/// where each [`send`] will not return until a [`recv`] is paired with it.
+///
+/// The [`SyncSender`] can be cloned to [`send`] to the same channel multiple
+/// times, but only one [`Receiver`] is supported.
+///
+/// Like asynchronous channels, if the [`Receiver`] is disconnected while trying
+/// to [`send`] with the [`SyncSender`], the [`send`] method will return a
+/// [`SendError`]. Similarly, If the [`SyncSender`] is disconnected while trying
+/// to [`recv`], the [`recv`] method will return a [`RecvError`].
+///
+/// [`send`]: SyncSender::send
+/// [`recv`]: Receiver::recv
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::mpsc::sync_channel;
+/// use std::thread;
+///
+/// let (sender, receiver) = sync_channel(1);
+///
+/// // this returns immediately
+/// sender.send(1).unwrap();
+///
+/// thread::spawn(move|| {
+/// // this will block until the previous message has been received
+/// sender.send(2).unwrap();
+/// });
+///
+/// assert_eq!(receiver.recv().unwrap(), 1);
+/// assert_eq!(receiver.recv().unwrap(), 2);
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
+ let a = Arc::new(sync::Packet::new(bound));
+ (SyncSender::new(a.clone()), Receiver::new(Flavor::Sync(a)))
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Sender<T> {
+ fn new(inner: Flavor<T>) -> Sender<T> {
+ Sender { inner: UnsafeCell::new(inner) }
+ }
+
+ /// Attempts to send a value on this channel, returning it back if it could
+ /// not be sent.
+ ///
+ /// A successful send occurs when it is determined that the other end of
+ /// the channel has not hung up already. An unsuccessful send would be one
+ /// where the corresponding receiver has already been deallocated. Note
+ /// that a return value of [`Err`] means that the data will never be
+ /// received, but a return value of [`Ok`] does *not* mean that the data
+ /// will be received. It is possible for the corresponding receiver to
+ /// hang up immediately after this function returns [`Ok`].
+ ///
+ /// This method will never block the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::mpsc::channel;
+ ///
+ /// let (tx, rx) = channel();
+ ///
+ /// // This send is always successful
+ /// tx.send(1).unwrap();
+ ///
+ /// // This send will fail because the receiver is gone
+ /// drop(rx);
+ /// assert_eq!(tx.send(1).unwrap_err().0, 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ let (new_inner, ret) = match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => {
+ if !p.sent() {
+ return p.send(t).map_err(SendError);
+ } else {
+ let a = Arc::new(stream::Packet::new());
+ let rx = Receiver::new(Flavor::Stream(a.clone()));
+ match p.upgrade(rx) {
+ oneshot::UpSuccess => {
+ let ret = a.send(t);
+ (a, ret)
+ }
+ oneshot::UpDisconnected => (a, Err(t)),
+ oneshot::UpWoke(token) => {
+ // This send cannot panic because the thread is
+ // asleep (we're looking at it), so the receiver
+ // can't go away.
+ a.send(t).ok().unwrap();
+ token.signal();
+ (a, Ok(()))
+ }
+ }
+ }
+ }
+ Flavor::Stream(ref p) => return p.send(t).map_err(SendError),
+ Flavor::Shared(ref p) => return p.send(t).map_err(SendError),
+ Flavor::Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ let tmp = Sender::new(Flavor::Stream(new_inner));
+ mem::swap(self.inner_mut(), tmp.inner_mut());
+ }
+ ret.map_err(SendError)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Sender<T> {
+ /// Clone a sender to send to other threads.
+ ///
+ /// Note, be aware of the lifetime of the sender because all senders
+ /// (including the original) need to be dropped in order for
+ /// [`Receiver::recv`] to stop blocking.
+ fn clone(&self) -> Sender<T> {
+ let packet = match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => {
+ let a = Arc::new(shared::Packet::new());
+ {
+ let guard = a.postinit_lock();
+ let rx = Receiver::new(Flavor::Shared(a.clone()));
+ let sleeper = match p.upgrade(rx) {
+ oneshot::UpSuccess | oneshot::UpDisconnected => None,
+ oneshot::UpWoke(task) => Some(task),
+ };
+ a.inherit_blocker(sleeper, guard);
+ }
+ a
+ }
+ Flavor::Stream(ref p) => {
+ let a = Arc::new(shared::Packet::new());
+ {
+ let guard = a.postinit_lock();
+ let rx = Receiver::new(Flavor::Shared(a.clone()));
+ let sleeper = match p.upgrade(rx) {
+ stream::UpSuccess | stream::UpDisconnected => None,
+ stream::UpWoke(task) => Some(task),
+ };
+ a.inherit_blocker(sleeper, guard);
+ }
+ a
+ }
+ Flavor::Shared(ref p) => {
+ p.clone_chan();
+ return Sender::new(Flavor::Shared(p.clone()));
+ }
+ Flavor::Sync(..) => unreachable!(),
+ };
+
+ unsafe {
+ let tmp = Sender::new(Flavor::Shared(packet.clone()));
+ mem::swap(self.inner_mut(), tmp.inner_mut());
+ }
+ Sender::new(Flavor::Shared(packet))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => p.drop_chan(),
+ Flavor::Stream(ref p) => p.drop_chan(),
+ Flavor::Shared(ref p) => p.drop_chan(),
+ Flavor::Sync(..) => unreachable!(),
+ }
+ }
+}
+
+#[stable(feature = "mpsc_debug", since = "1.8.0")]
+impl<T> fmt::Debug for Sender<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Sender").finish_non_exhaustive()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SyncSender
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> SyncSender<T> {
+ fn new(inner: Arc<sync::Packet<T>>) -> SyncSender<T> {
+ SyncSender { inner }
+ }
+
+ /// Sends a value on this synchronous channel.
+ ///
+ /// This function will *block* until space in the internal buffer becomes
+ /// available or a receiver is available to hand off the message to.
+ ///
+ /// Note that a successful send does *not* guarantee that the receiver will
+ /// ever see the data if there is a buffer on this channel. Items may be
+ /// enqueued in the internal buffer for the receiver to receive at a later
+ /// time. If the buffer size is 0, however, the channel becomes a rendezvous
+ /// channel and it guarantees that the receiver has indeed received
+ /// the data if this function returns success.
+ ///
+ /// This function will never panic, but it may return [`Err`] if the
+ /// [`Receiver`] has disconnected and is no longer able to receive
+ /// information.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::mpsc::sync_channel;
+ /// use std::thread;
+ ///
+ /// // Create a rendezvous sync_channel with buffer size 0
+ /// let (sync_sender, receiver) = sync_channel(0);
+ ///
+ /// thread::spawn(move || {
+ /// println!("sending message...");
+ /// sync_sender.send(1).unwrap();
+ /// // Thread is now blocked until the message is received
+ ///
+ /// println!("...message received!");
+ /// });
+ ///
+ /// let msg = receiver.recv().unwrap();
+ /// assert_eq!(1, msg);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn send(&self, t: T) -> Result<(), SendError<T>> {
+ self.inner.send(t).map_err(SendError)
+ }
+
+ /// Attempts to send a value on this channel without blocking.
+ ///
+ /// This method differs from [`send`] by returning immediately if the
+ /// channel's buffer is full or no receiver is waiting to acquire some
+ /// data. Compared with [`send`], this function has two failure cases
+ /// instead of one (one for disconnection, one for a full buffer).
+ ///
+ /// See [`send`] for notes about guarantees of whether the
+ /// receiver has received the data or not if this function is successful.
+ ///
+ /// [`send`]: Self::send
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::mpsc::sync_channel;
+ /// use std::thread;
+ ///
+ /// // Create a sync_channel with buffer size 1
+ /// let (sync_sender, receiver) = sync_channel(1);
+ /// let sync_sender2 = sync_sender.clone();
+ ///
+ /// // First thread owns sync_sender
+ /// thread::spawn(move || {
+ /// sync_sender.send(1).unwrap();
+ /// sync_sender.send(2).unwrap();
+ /// // Thread blocked
+ /// });
+ ///
+ /// // Second thread owns sync_sender2
+ /// thread::spawn(move || {
+ /// // This will return an error and send
+ /// // no message if the buffer is full
+ /// let _ = sync_sender2.try_send(3);
+ /// });
+ ///
+ /// let mut msg;
+ /// msg = receiver.recv().unwrap();
+ /// println!("message {msg} received");
+ ///
+ /// msg = receiver.recv().unwrap();
+ /// println!("message {msg} received");
+ ///
+ /// // Third message may have never been sent
+ /// match receiver.try_recv() {
+ /// Ok(msg) => println!("message {msg} received"),
+ /// Err(_) => println!("the third message was never sent"),
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
+ self.inner.try_send(t)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for SyncSender<T> {
+ fn clone(&self) -> SyncSender<T> {
+ self.inner.clone_chan();
+ SyncSender::new(self.inner.clone())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Drop for SyncSender<T> {
+ fn drop(&mut self) {
+ self.inner.drop_chan();
+ }
+}
+
+#[stable(feature = "mpsc_debug", since = "1.8.0")]
+impl<T> fmt::Debug for SyncSender<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SyncSender").finish_non_exhaustive()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Receiver
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Receiver<T> {
+ fn new(inner: Flavor<T>) -> Receiver<T> {
+ Receiver { inner: UnsafeCell::new(inner) }
+ }
+
+ /// Attempts to return a pending value on this receiver without blocking.
+ ///
+ /// This method will never block the caller in order to wait for data to
+ /// become available. Instead, this will always return immediately with a
+ /// possible option of pending data on the channel.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// block on a receiver.
+ ///
+ /// Compared with [`recv`], this function has two failure cases instead of one
+ /// (one for disconnection, one for an empty buffer).
+ ///
+ /// [`recv`]: Self::recv
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::mpsc::{Receiver, channel};
+ ///
+ /// let (_, receiver): (_, Receiver<i32>) = channel();
+ ///
+ /// assert!(receiver.try_recv().is_err());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_recv(&self) -> Result<T, TryRecvError> {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => match p.try_recv() {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Empty) => return Err(TryRecvError::Empty),
+ Err(oneshot::Disconnected) => return Err(TryRecvError::Disconnected),
+ Err(oneshot::Upgraded(rx)) => rx,
+ },
+ Flavor::Stream(ref p) => match p.try_recv() {
+ Ok(t) => return Ok(t),
+ Err(stream::Empty) => return Err(TryRecvError::Empty),
+ Err(stream::Disconnected) => return Err(TryRecvError::Disconnected),
+ Err(stream::Upgraded(rx)) => rx,
+ },
+ Flavor::Shared(ref p) => match p.try_recv() {
+ Ok(t) => return Ok(t),
+ Err(shared::Empty) => return Err(TryRecvError::Empty),
+ Err(shared::Disconnected) => return Err(TryRecvError::Disconnected),
+ },
+ Flavor::Sync(ref p) => match p.try_recv() {
+ Ok(t) => return Ok(t),
+ Err(sync::Empty) => return Err(TryRecvError::Empty),
+ Err(sync::Disconnected) => return Err(TryRecvError::Disconnected),
+ },
+ };
+ unsafe {
+ mem::swap(self.inner_mut(), new_port.inner_mut());
+ }
+ }
+ }
+
+ /// Attempts to wait for a value on this receiver, returning an error if the
+ /// corresponding channel has hung up.
+ ///
+ /// This function will always block the current thread if there is no data
+ /// available and it's possible for more data to be sent (at least one sender
+ /// still exists). Once a message is sent to the corresponding [`Sender`]
+ /// (or [`SyncSender`]), this receiver will wake up and return that
+ /// message.
+ ///
+ /// If the corresponding [`Sender`] has disconnected, or it disconnects while
+ /// this call is blocking, this call will wake up and return [`Err`] to
+ /// indicate that no more messages can ever be received on this channel.
+ /// However, since channels are buffered, messages sent before the disconnect
+ /// will still be properly received.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::mpsc;
+ /// use std::thread;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ /// let handle = thread::spawn(move || {
+ /// send.send(1u8).unwrap();
+ /// });
+ ///
+ /// handle.join().unwrap();
+ ///
+ /// assert_eq!(Ok(1), recv.recv());
+ /// ```
+ ///
+ /// Buffering behavior:
+ ///
+ /// ```
+ /// use std::sync::mpsc;
+ /// use std::thread;
+ /// use std::sync::mpsc::RecvError;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ /// let handle = thread::spawn(move || {
+ /// send.send(1u8).unwrap();
+ /// send.send(2).unwrap();
+ /// send.send(3).unwrap();
+ /// drop(send);
+ /// });
+ ///
+ /// // wait for the thread to join so we ensure the sender is dropped
+ /// handle.join().unwrap();
+ ///
+ /// assert_eq!(Ok(1), recv.recv());
+ /// assert_eq!(Ok(2), recv.recv());
+ /// assert_eq!(Ok(3), recv.recv());
+ /// assert_eq!(Err(RecvError), recv.recv());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn recv(&self) -> Result<T, RecvError> {
+ loop {
+ let new_port = match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => match p.recv(None) {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Disconnected) => return Err(RecvError),
+ Err(oneshot::Upgraded(rx)) => rx,
+ Err(oneshot::Empty) => unreachable!(),
+ },
+ Flavor::Stream(ref p) => match p.recv(None) {
+ Ok(t) => return Ok(t),
+ Err(stream::Disconnected) => return Err(RecvError),
+ Err(stream::Upgraded(rx)) => rx,
+ Err(stream::Empty) => unreachable!(),
+ },
+ Flavor::Shared(ref p) => match p.recv(None) {
+ Ok(t) => return Ok(t),
+ Err(shared::Disconnected) => return Err(RecvError),
+ Err(shared::Empty) => unreachable!(),
+ },
+ Flavor::Sync(ref p) => return p.recv(None).map_err(|_| RecvError),
+ };
+ unsafe {
+ mem::swap(self.inner_mut(), new_port.inner_mut());
+ }
+ }
+ }
+
+ /// Attempts to wait for a value on this receiver, returning an error if the
+ /// corresponding channel has hung up, or if it waits more than `timeout`.
+ ///
+ /// This function will always block the current thread if there is no data
+ /// available and it's possible for more data to be sent (at least one sender
+ /// still exists). Once a message is sent to the corresponding [`Sender`]
+ /// (or [`SyncSender`]), this receiver will wake up and return that
+ /// message.
+ ///
+ /// If the corresponding [`Sender`] has disconnected, or it disconnects while
+ /// this call is blocking, this call will wake up and return [`Err`] to
+ /// indicate that no more messages can ever be received on this channel.
+ /// However, since channels are buffered, messages sent before the disconnect
+ /// will still be properly received.
+ ///
+ /// # Known Issues
+ ///
+ /// There is currently a known issue (see [`#39364`]) that causes `recv_timeout`
+ /// to panic unexpectedly with the following example:
+ ///
+ /// ```no_run
+ /// use std::sync::mpsc::channel;
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let (tx, rx) = channel::<String>();
+ ///
+ /// thread::spawn(move || {
+ /// let d = Duration::from_millis(10);
+ /// loop {
+ /// println!("recv");
+ /// let _r = rx.recv_timeout(d);
+ /// }
+ /// });
+ ///
+ /// thread::sleep(Duration::from_millis(100));
+ /// let _c1 = tx.clone();
+ ///
+ /// thread::sleep(Duration::from_secs(1));
+ /// ```
+ ///
+ /// [`#39364`]: https://github.com/rust-lang/rust/issues/39364
+ ///
+ /// # Examples
+ ///
+ /// Successfully receiving value before encountering timeout:
+ ///
+ /// ```no_run
+ /// use std::thread;
+ /// use std::time::Duration;
+ /// use std::sync::mpsc;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ ///
+ /// thread::spawn(move || {
+ /// send.send('a').unwrap();
+ /// });
+ ///
+ /// assert_eq!(
+ /// recv.recv_timeout(Duration::from_millis(400)),
+ /// Ok('a')
+ /// );
+ /// ```
+ ///
+ /// Receiving an error upon reaching timeout:
+ ///
+ /// ```no_run
+ /// use std::thread;
+ /// use std::time::Duration;
+ /// use std::sync::mpsc;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ ///
+ /// thread::spawn(move || {
+ /// thread::sleep(Duration::from_millis(800));
+ /// send.send('a').unwrap();
+ /// });
+ ///
+ /// assert_eq!(
+ /// recv.recv_timeout(Duration::from_millis(400)),
+ /// Err(mpsc::RecvTimeoutError::Timeout)
+ /// );
+ /// ```
+ #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")]
+ pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> {
+ // Do an optimistic try_recv to avoid the performance impact of
+ // Instant::now() in the full-channel case.
+ match self.try_recv() {
+ Ok(result) => Ok(result),
+ Err(TryRecvError::Disconnected) => Err(RecvTimeoutError::Disconnected),
+ Err(TryRecvError::Empty) => match Instant::now().checked_add(timeout) {
+ Some(deadline) => self.recv_deadline(deadline),
+ // So far in the future that it's practically the same as waiting indefinitely.
+ None => self.recv().map_err(RecvTimeoutError::from),
+ },
+ }
+ }
+
+ /// Attempts to wait for a value on this receiver, returning an error if the
+ /// corresponding channel has hung up, or if `deadline` is reached.
+ ///
+ /// This function will always block the current thread if there is no data
+ /// available and it's possible for more data to be sent. Once a message is
+ /// sent to the corresponding [`Sender`] (or [`SyncSender`]), then this
+ /// receiver will wake up and return that message.
+ ///
+ /// If the corresponding [`Sender`] has disconnected, or it disconnects while
+ /// this call is blocking, this call will wake up and return [`Err`] to
+ /// indicate that no more messages can ever be received on this channel.
+ /// However, since channels are buffered, messages sent before the disconnect
+ /// will still be properly received.
+ ///
+ /// # Examples
+ ///
+ /// Successfully receiving value before reaching deadline:
+ ///
+ /// ```no_run
+ /// #![feature(deadline_api)]
+ /// use std::thread;
+ /// use std::time::{Duration, Instant};
+ /// use std::sync::mpsc;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ ///
+ /// thread::spawn(move || {
+ /// send.send('a').unwrap();
+ /// });
+ ///
+ /// assert_eq!(
+ /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)),
+ /// Ok('a')
+ /// );
+ /// ```
+ ///
+ /// Receiving an error upon reaching deadline:
+ ///
+ /// ```no_run
+ /// #![feature(deadline_api)]
+ /// use std::thread;
+ /// use std::time::{Duration, Instant};
+ /// use std::sync::mpsc;
+ ///
+ /// let (send, recv) = mpsc::channel();
+ ///
+ /// thread::spawn(move || {
+ /// thread::sleep(Duration::from_millis(800));
+ /// send.send('a').unwrap();
+ /// });
+ ///
+ /// assert_eq!(
+ /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)),
+ /// Err(mpsc::RecvTimeoutError::Timeout)
+ /// );
+ /// ```
+ #[unstable(feature = "deadline_api", issue = "46316")]
+ pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> {
+ use self::RecvTimeoutError::*;
+
+ loop {
+ let port_or_empty = match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => match p.recv(Some(deadline)) {
+ Ok(t) => return Ok(t),
+ Err(oneshot::Disconnected) => return Err(Disconnected),
+ Err(oneshot::Upgraded(rx)) => Some(rx),
+ Err(oneshot::Empty) => None,
+ },
+ Flavor::Stream(ref p) => match p.recv(Some(deadline)) {
+ Ok(t) => return Ok(t),
+ Err(stream::Disconnected) => return Err(Disconnected),
+ Err(stream::Upgraded(rx)) => Some(rx),
+ Err(stream::Empty) => None,
+ },
+ Flavor::Shared(ref p) => match p.recv(Some(deadline)) {
+ Ok(t) => return Ok(t),
+ Err(shared::Disconnected) => return Err(Disconnected),
+ Err(shared::Empty) => None,
+ },
+ Flavor::Sync(ref p) => match p.recv(Some(deadline)) {
+ Ok(t) => return Ok(t),
+ Err(sync::Disconnected) => return Err(Disconnected),
+ Err(sync::Empty) => None,
+ },
+ };
+
+ if let Some(new_port) = port_or_empty {
+ unsafe {
+ mem::swap(self.inner_mut(), new_port.inner_mut());
+ }
+ }
+
+ // If we're already passed the deadline, and we're here without
+ // data, return a timeout, else try again.
+ if Instant::now() >= deadline {
+ return Err(Timeout);
+ }
+ }
+ }
+
+ /// Returns an iterator that will block waiting for messages, but never
+ /// [`panic!`]. It will return [`None`] when the channel has hung up.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::sync::mpsc::channel;
+ /// use std::thread;
+ ///
+ /// let (send, recv) = channel();
+ ///
+ /// thread::spawn(move || {
+ /// send.send(1).unwrap();
+ /// send.send(2).unwrap();
+ /// send.send(3).unwrap();
+ /// });
+ ///
+ /// let mut iter = recv.iter();
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), Some(3));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { rx: self }
+ }
+
+ /// Returns an iterator that will attempt to yield all pending values.
+ /// It will return `None` if there are no more pending values or if the
+ /// channel has hung up. The iterator will never [`panic!`] or block the
+ /// user by waiting for values.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::sync::mpsc::channel;
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let (sender, receiver) = channel();
+ ///
+ /// // nothing is in the buffer yet
+ /// assert!(receiver.try_iter().next().is_none());
+ ///
+ /// thread::spawn(move || {
+ /// thread::sleep(Duration::from_secs(1));
+ /// sender.send(1).unwrap();
+ /// sender.send(2).unwrap();
+ /// sender.send(3).unwrap();
+ /// });
+ ///
+ /// // nothing is in the buffer yet
+ /// assert!(receiver.try_iter().next().is_none());
+ ///
+ /// // block for two seconds
+ /// thread::sleep(Duration::from_secs(2));
+ ///
+ /// let mut iter = receiver.try_iter();
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), Some(3));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[stable(feature = "receiver_try_iter", since = "1.15.0")]
+ pub fn try_iter(&self) -> TryIter<'_, T> {
+ TryIter { rx: self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.rx.recv().ok()
+ }
+}
+
+#[stable(feature = "receiver_try_iter", since = "1.15.0")]
+impl<'a, T> Iterator for TryIter<'a, T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.rx.try_recv().ok()
+ }
+}
+
+#[stable(feature = "receiver_into_iter", since = "1.1.0")]
+impl<'a, T> IntoIterator for &'a Receiver<T> {
+ type Item = T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "receiver_into_iter", since = "1.1.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ self.rx.recv().ok()
+ }
+}
+
+#[stable(feature = "receiver_into_iter", since = "1.1.0")]
+impl<T> IntoIterator for Receiver<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { rx: self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ match *unsafe { self.inner() } {
+ Flavor::Oneshot(ref p) => p.drop_port(),
+ Flavor::Stream(ref p) => p.drop_port(),
+ Flavor::Shared(ref p) => p.drop_port(),
+ Flavor::Sync(ref p) => p.drop_port(),
+ }
+ }
+}
+
+#[stable(feature = "mpsc_debug", since = "1.8.0")]
+impl<T> fmt::Debug for Receiver<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Receiver").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendError").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "sending on a closed channel".fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Send> error::Error for SendError<T> {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "sending on a closed channel"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ TrySendError::Full(..) => "Full(..)".fmt(f),
+ TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ TrySendError::Full(..) => "sending on a full channel".fmt(f),
+ TrySendError::Disconnected(..) => "sending on a closed channel".fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Send> error::Error for TrySendError<T> {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ match *self {
+ TrySendError::Full(..) => "sending on a full channel",
+ TrySendError::Disconnected(..) => "sending on a closed channel",
+ }
+ }
+}
+
+#[stable(feature = "mpsc_error_conversions", since = "1.24.0")]
+impl<T> From<SendError<T>> for TrySendError<T> {
+ /// Converts a `SendError<T>` into a `TrySendError<T>`.
+ ///
+ /// This conversion always returns a `TrySendError::Disconnected` containing the data in the `SendError<T>`.
+ ///
+ /// No data is allocated on the heap.
+ fn from(err: SendError<T>) -> TrySendError<T> {
+ match err {
+ SendError(t) => TrySendError::Disconnected(t),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for RecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "receiving on a closed channel".fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl error::Error for RecvError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "receiving on a closed channel"
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for TryRecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ TryRecvError::Empty => "receiving on an empty channel".fmt(f),
+ TryRecvError::Disconnected => "receiving on a closed channel".fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl error::Error for TryRecvError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ match *self {
+ TryRecvError::Empty => "receiving on an empty channel",
+ TryRecvError::Disconnected => "receiving on a closed channel",
+ }
+ }
+}
+
+#[stable(feature = "mpsc_error_conversions", since = "1.24.0")]
+impl From<RecvError> for TryRecvError {
+ /// Converts a `RecvError` into a `TryRecvError`.
+ ///
+ /// This conversion always returns `TryRecvError::Disconnected`.
+ ///
+ /// No data is allocated on the heap.
+ fn from(err: RecvError) -> TryRecvError {
+ match err {
+ RecvError => TryRecvError::Disconnected,
+ }
+ }
+}
+
+#[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")]
+impl fmt::Display for RecvTimeoutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ RecvTimeoutError::Timeout => "timed out waiting on channel".fmt(f),
+ RecvTimeoutError::Disconnected => "channel is empty and sending half is closed".fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")]
+impl error::Error for RecvTimeoutError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ match *self {
+ RecvTimeoutError::Timeout => "timed out waiting on channel",
+ RecvTimeoutError::Disconnected => "channel is empty and sending half is closed",
+ }
+ }
+}
+
+#[stable(feature = "mpsc_error_conversions", since = "1.24.0")]
+impl From<RecvError> for RecvTimeoutError {
+ /// Converts a `RecvError` into a `RecvTimeoutError`.
+ ///
+ /// This conversion always returns `RecvTimeoutError::Disconnected`.
+ ///
+ /// No data is allocated on the heap.
+ fn from(err: RecvError) -> RecvTimeoutError {
+ match err {
+ RecvError => RecvTimeoutError::Disconnected,
+ }
+ }
+}
diff --git a/library/std/src/sync/mpsc/mpsc_queue.rs b/library/std/src/sync/mpsc/mpsc_queue.rs
new file mode 100644
index 000000000..cdd64a5de
--- /dev/null
+++ b/library/std/src/sync/mpsc/mpsc_queue.rs
@@ -0,0 +1,117 @@
+//! A mostly lock-free multi-producer, single consumer queue.
+//!
+//! This module contains an implementation of a concurrent MPSC queue. This
+//! queue can be used to share data between threads, and is also used as the
+//! building block of channels in rust.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue might not be appropriate for all use-cases.
+
+// https://www.1024cores.net/home/lock-free-algorithms
+// /queues/non-intrusive-mpsc-node-based-queue
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+pub use self::PopResult::*;
+
+use core::cell::UnsafeCell;
+use core::ptr;
+
+use crate::boxed::Box;
+use crate::sync::atomic::{AtomicPtr, Ordering};
+
+/// A result of the `pop` function.
+pub enum PopResult<T> {
+ /// Some data has been popped
+ Data(T),
+ /// The queue is empty
+ Empty,
+ /// The queue is in an inconsistent state. Popping data should succeed, but
+ /// some pushers have yet to make enough progress in order allow a pop to
+ /// succeed. It is recommended that a pop() occur "in the near future" in
+ /// order to see if the sender has made progress or not
+ Inconsistent,
+}
+
+struct Node<T> {
+ next: AtomicPtr<Node<T>>,
+ value: Option<T>,
+}
+
+/// The multi-producer single-consumer structure. This is not cloneable, but it
+/// may be safely shared so long as it is guaranteed that there is only one
+/// popper at a time (many pushers are allowed).
+pub struct Queue<T> {
+ head: AtomicPtr<Node<T>>,
+ tail: UnsafeCell<*mut Node<T>>,
+}
+
+unsafe impl<T: Send> Send for Queue<T> {}
+unsafe impl<T: Send> Sync for Queue<T> {}
+
+impl<T> Node<T> {
+ unsafe fn new(v: Option<T>) -> *mut Node<T> {
+ Box::into_raw(box Node { next: AtomicPtr::new(ptr::null_mut()), value: v })
+ }
+}
+
+impl<T> Queue<T> {
+ /// Creates a new queue that is safe to share among multiple producers and
+ /// one consumer.
+ pub fn new() -> Queue<T> {
+ let stub = unsafe { Node::new(None) };
+ Queue { head: AtomicPtr::new(stub), tail: UnsafeCell::new(stub) }
+ }
+
+ /// Pushes a new value onto this queue.
+ pub fn push(&self, t: T) {
+ unsafe {
+ let n = Node::new(Some(t));
+ let prev = self.head.swap(n, Ordering::AcqRel);
+ (*prev).next.store(n, Ordering::Release);
+ }
+ }
+
+ /// Pops some data from this queue.
+ ///
+ /// Note that the current implementation means that this function cannot
+ /// return `Option<T>`. It is possible for this queue to be in an
+ /// inconsistent state where many pushes have succeeded and completely
+ /// finished, but pops cannot return `Some(t)`. This inconsistent state
+ /// happens when a pusher is pre-empted at an inopportune moment.
+ ///
+ /// This inconsistent state means that this queue does indeed have data, but
+ /// it does not currently have access to it at this time.
+ pub fn pop(&self) -> PopResult<T> {
+ unsafe {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Ordering::Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ assert!((*tail).value.is_none());
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take().unwrap();
+ let _: Box<Node<T>> = Box::from_raw(tail);
+ return Data(ret);
+ }
+
+ if self.head.load(Ordering::Acquire) == tail { Empty } else { Inconsistent }
+ }
+ }
+}
+
+impl<T> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.tail.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Ordering::Relaxed);
+ let _: Box<Node<T>> = Box::from_raw(cur);
+ cur = next;
+ }
+ }
+ }
+}
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
new file mode 100644
index 000000000..9f4f31ed0
--- /dev/null
+++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
@@ -0,0 +1,47 @@
+use super::{Data, Empty, Inconsistent, Queue};
+use crate::sync::mpsc::channel;
+use crate::sync::Arc;
+use crate::thread;
+
+#[test]
+fn test_full() {
+ let q: Queue<Box<_>> = Queue::new();
+ q.push(Box::new(1));
+ q.push(Box::new(2));
+}
+
+#[test]
+fn test() {
+ let nthreads = 8;
+ let nmsgs = 1000;
+ let q = Queue::new();
+ match q.pop() {
+ Empty => {}
+ Inconsistent | Data(..) => panic!(),
+ }
+ let (tx, rx) = channel();
+ let q = Arc::new(q);
+
+ for _ in 0..nthreads {
+ let tx = tx.clone();
+ let q = q.clone();
+ thread::spawn(move || {
+ for i in 0..nmsgs {
+ q.push(i);
+ }
+ tx.send(()).unwrap();
+ });
+ }
+
+ let mut i = 0;
+ while i < nthreads * nmsgs {
+ match q.pop() {
+ Empty | Inconsistent => {}
+ Data(_) => i += 1,
+ }
+ }
+ drop(tx);
+ for _ in 0..nthreads {
+ rx.recv().unwrap();
+ }
+}
diff --git a/library/std/src/sync/mpsc/oneshot.rs b/library/std/src/sync/mpsc/oneshot.rs
new file mode 100644
index 000000000..0e259b8ae
--- /dev/null
+++ b/library/std/src/sync/mpsc/oneshot.rs
@@ -0,0 +1,315 @@
+/// Oneshot channels/ports
+///
+/// This is the initial flavor of channels/ports used for comm module. This is
+/// an optimization for the one-use case of a channel. The major optimization of
+/// this type is to have one and exactly one allocation when the chan/port pair
+/// is created.
+///
+/// Another possible optimization would be to not use an Arc box because
+/// in theory we know when the shared packet can be deallocated (no real need
+/// for the atomic reference counting), but I was having trouble how to destroy
+/// the data early in a drop of a Port.
+///
+/// # Implementation
+///
+/// Oneshots are implemented around one atomic usize variable. This variable
+/// indicates both the state of the port/chan but also contains any threads
+/// blocked on the port. All atomic operations happen on this one word.
+///
+/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
+/// on behalf of the channel side of things (it can be mentally thought of as
+/// consuming the port). This upgrade is then also stored in the shared packet.
+/// The one caveat to consider is that when a port sees a disconnected channel
+/// it must check for data because there is no "data plus upgrade" state.
+pub use self::Failure::*;
+use self::MyUpgrade::*;
+pub use self::UpgradeResult::*;
+
+use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering};
+use crate::sync::mpsc::blocking::{self, SignalToken};
+use crate::sync::mpsc::Receiver;
+use crate::time::Instant;
+
+// Various states you can find a port in.
+const EMPTY: *mut u8 = ptr::invalid_mut::<u8>(0); // initial state: no data, no blocked receiver
+const DATA: *mut u8 = ptr::invalid_mut::<u8>(1); // data ready for receiver to take
+const DISCONNECTED: *mut u8 = ptr::invalid_mut::<u8>(2); // channel is disconnected OR upgraded
+// Any other value represents a pointer to a SignalToken value. The
+// protocol ensures that when the state moves *to* a pointer,
+// ownership of the token is given to the packet, and when the state
+// moves *from* a pointer, ownership of the token is transferred to
+// whoever changed the state.
+
+pub struct Packet<T> {
+ // Internal state of the chan/port pair (stores the blocked thread as well)
+ state: AtomicPtr<u8>,
+ // One-shot data slot location
+ data: UnsafeCell<Option<T>>,
+ // when used for the second time, a oneshot channel must be upgraded, and
+ // this contains the slot for the upgrade
+ upgrade: UnsafeCell<MyUpgrade<T>>,
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(SignalToken),
+}
+
+enum MyUpgrade<T> {
+ NothingSent,
+ SendUsed,
+ GoUp(Receiver<T>),
+}
+
+impl<T> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ data: UnsafeCell::new(None),
+ upgrade: UnsafeCell::new(NothingSent),
+ state: AtomicPtr::new(EMPTY),
+ }
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ unsafe {
+ // Sanity check
+ match *self.upgrade.get() {
+ NothingSent => {}
+ _ => panic!("sending on a oneshot that's already sent on "),
+ }
+ assert!((*self.data.get()).is_none());
+ ptr::write(self.data.get(), Some(t));
+ ptr::write(self.upgrade.get(), SendUsed);
+
+ match self.state.swap(DATA, Ordering::SeqCst) {
+ // Sent the data, no one was waiting
+ EMPTY => Ok(()),
+
+ // Couldn't send the data, the port hung up first. Return the data
+ // back up the stack.
+ DISCONNECTED => {
+ self.state.swap(DISCONNECTED, Ordering::SeqCst);
+ ptr::write(self.upgrade.get(), NothingSent);
+ Err((&mut *self.data.get()).take().unwrap())
+ }
+
+ // Not possible, these are one-use channels
+ DATA => unreachable!(),
+
+ // There is a thread waiting on the other end. We leave the 'DATA'
+ // state inside so it'll pick it up on the other end.
+ ptr => {
+ SignalToken::from_raw(ptr).signal();
+ Ok(())
+ }
+ }
+ }
+ }
+
+ // Just tests whether this channel has been sent on or not, this is only
+ // safe to use from the sender.
+ pub fn sent(&self) -> bool {
+ unsafe { !matches!(*self.upgrade.get(), NothingSent) }
+ }
+
+ pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure<T>> {
+ // Attempt to not block the thread (it's a little expensive). If it looks
+ // like we're not empty, then immediately go through to `try_recv`.
+ if self.state.load(Ordering::SeqCst) == EMPTY {
+ let (wait_token, signal_token) = blocking::tokens();
+ let ptr = unsafe { signal_token.to_raw() };
+
+ // race with senders to enter the blocking state
+ if self.state.compare_exchange(EMPTY, ptr, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
+ if let Some(deadline) = deadline {
+ let timed_out = !wait_token.wait_max_until(deadline);
+ // Try to reset the state
+ if timed_out {
+ self.abort_selection().map_err(Upgraded)?;
+ }
+ } else {
+ wait_token.wait();
+ debug_assert!(self.state.load(Ordering::SeqCst) != EMPTY);
+ }
+ } else {
+ // drop the signal token, since we never blocked
+ drop(unsafe { SignalToken::from_raw(ptr) });
+ }
+ }
+
+ self.try_recv()
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure<T>> {
+ unsafe {
+ match self.state.load(Ordering::SeqCst) {
+ EMPTY => Err(Empty),
+
+ // We saw some data on the channel, but the channel can be used
+ // again to send us an upgrade. As a result, we need to re-insert
+ // into the channel that there's no data available (otherwise we'll
+ // just see DATA next time). This is done as a cmpxchg because if
+ // the state changes under our feet we'd rather just see that state
+ // change.
+ DATA => {
+ let _ = self.state.compare_exchange(
+ DATA,
+ EMPTY,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ );
+ match (&mut *self.data.get()).take() {
+ Some(data) => Ok(data),
+ None => unreachable!(),
+ }
+ }
+
+ // There's no guarantee that we receive before an upgrade happens,
+ // and an upgrade flags the channel as disconnected, so when we see
+ // this we first need to check if there's data available and *then*
+ // we go through and process the upgrade.
+ DISCONNECTED => match (&mut *self.data.get()).take() {
+ Some(data) => Ok(data),
+ None => match ptr::replace(self.upgrade.get(), SendUsed) {
+ SendUsed | NothingSent => Err(Disconnected),
+ GoUp(upgrade) => Err(Upgraded(upgrade)),
+ },
+ },
+
+ // We are the sole receiver; there cannot be a blocking
+ // receiver already.
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ // Returns whether the upgrade was completed. If the upgrade wasn't
+ // completed, then the port couldn't get sent to the other half (it will
+ // never receive it).
+ pub fn upgrade(&self, up: Receiver<T>) -> UpgradeResult {
+ unsafe {
+ let prev = match *self.upgrade.get() {
+ NothingSent => NothingSent,
+ SendUsed => SendUsed,
+ _ => panic!("upgrading again"),
+ };
+ ptr::write(self.upgrade.get(), GoUp(up));
+
+ match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
+ // If the channel is empty or has data on it, then we're good to go.
+ // Senders will check the data before the upgrade (in case we
+ // plastered over the DATA state).
+ DATA | EMPTY => UpSuccess,
+
+ // If the other end is already disconnected, then we failed the
+ // upgrade. Be sure to trash the port we were given.
+ DISCONNECTED => {
+ ptr::replace(self.upgrade.get(), prev);
+ UpDisconnected
+ }
+
+ // If someone's waiting, we gotta wake them up
+ ptr => UpWoke(SignalToken::from_raw(ptr)),
+ }
+ }
+ }
+
+ pub fn drop_chan(&self) {
+ match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
+ DATA | DISCONNECTED | EMPTY => {}
+
+ // If someone's waiting, we gotta wake them up
+ ptr => unsafe {
+ SignalToken::from_raw(ptr).signal();
+ },
+ }
+ }
+
+ pub fn drop_port(&self) {
+ match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
+ // An empty channel has nothing to do, and a remotely disconnected
+ // channel also has nothing to do b/c we're about to run the drop
+ // glue
+ DISCONNECTED | EMPTY => {}
+
+ // There's data on the channel, so make sure we destroy it promptly.
+ // This is why not using an arc is a little difficult (need the box
+ // to stay valid while we take the data).
+ DATA => unsafe {
+ (&mut *self.data.get()).take().unwrap();
+ },
+
+ // We're the only ones that can block on this port
+ _ => unreachable!(),
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // Remove a previous selecting thread from this port. This ensures that the
+ // blocked thread will no longer be visible to any other threads.
+ //
+ // The return value indicates whether there's data on this port.
+ pub fn abort_selection(&self) -> Result<bool, Receiver<T>> {
+ let state = match self.state.load(Ordering::SeqCst) {
+ // Each of these states means that no further activity will happen
+ // with regard to abortion selection
+ s @ (EMPTY | DATA | DISCONNECTED) => s,
+
+ // If we've got a blocked thread, then use an atomic to gain ownership
+ // of it (may fail)
+ ptr => self
+ .state
+ .compare_exchange(ptr, EMPTY, Ordering::SeqCst, Ordering::SeqCst)
+ .unwrap_or_else(|x| x),
+ };
+
+ // Now that we've got ownership of our state, figure out what to do
+ // about it.
+ match state {
+ EMPTY => unreachable!(),
+ // our thread used for select was stolen
+ DATA => Ok(true),
+
+ // If the other end has hung up, then we have complete ownership
+ // of the port. First, check if there was data waiting for us. This
+ // is possible if the other end sent something and then hung up.
+ //
+ // We then need to check to see if there was an upgrade requested,
+ // and if so, the upgraded port needs to have its selection aborted.
+ DISCONNECTED => unsafe {
+ if (*self.data.get()).is_some() {
+ Ok(true)
+ } else {
+ match ptr::replace(self.upgrade.get(), SendUsed) {
+ GoUp(port) => Err(port),
+ _ => Ok(true),
+ }
+ }
+ },
+
+ // We woke ourselves up from select.
+ ptr => unsafe {
+ drop(SignalToken::from_raw(ptr));
+ Ok(false)
+ },
+ }
+ }
+}
+
+impl<T> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(Ordering::SeqCst), DISCONNECTED);
+ }
+}
diff --git a/library/std/src/sync/mpsc/shared.rs b/library/std/src/sync/mpsc/shared.rs
new file mode 100644
index 000000000..51917bd96
--- /dev/null
+++ b/library/std/src/sync/mpsc/shared.rs
@@ -0,0 +1,501 @@
+/// Shared channels.
+///
+/// This is the flavor of channels which are not necessarily optimized for any
+/// particular use case, but are the most general in how they are used. Shared
+/// channels are cloneable allowing for multiple senders.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module. You'll also note that the implementation of the shared and stream
+/// channels are quite similar, and this is no coincidence!
+pub use self::Failure::*;
+use self::StartResult::*;
+
+use core::cmp;
+use core::intrinsics::abort;
+
+use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicUsize, Ordering};
+use crate::sync::mpsc::blocking::{self, SignalToken};
+use crate::sync::mpsc::mpsc_queue as mpsc;
+use crate::sync::{Mutex, MutexGuard};
+use crate::thread;
+use crate::time::Instant;
+
+const DISCONNECTED: isize = isize::MIN;
+const FUDGE: isize = 1024;
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+#[cfg(test)]
+const MAX_STEALS: isize = 5;
+#[cfg(not(test))]
+const MAX_STEALS: isize = 1 << 20;
+const EMPTY: *mut u8 = ptr::null_mut(); // initial state: no data, no blocked receiver
+
+pub struct Packet<T> {
+ queue: mpsc::Queue<T>,
+ cnt: AtomicIsize, // How many items are on this channel
+ steals: UnsafeCell<isize>, // How many times has a port received without blocking?
+ to_wake: AtomicPtr<u8>, // SignalToken for wake up
+
+ // The number of channels which are currently using this packet.
+ channels: AtomicUsize,
+
+ // See the discussion in Port::drop and the channel send methods for what
+ // these are used for
+ port_dropped: AtomicBool,
+ sender_drain: AtomicIsize,
+
+ // this lock protects various portions of this implementation during
+ // select()
+ select_lock: Mutex<()>,
+}
+
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+#[derive(PartialEq, Eq)]
+enum StartResult {
+ Installed,
+ Abort,
+}
+
+impl<T> Packet<T> {
+ // Creation of a packet *must* be followed by a call to postinit_lock
+ // and later by inherit_blocker
+ pub fn new() -> Packet<T> {
+ Packet {
+ queue: mpsc::Queue::new(),
+ cnt: AtomicIsize::new(0),
+ steals: UnsafeCell::new(0),
+ to_wake: AtomicPtr::new(EMPTY),
+ channels: AtomicUsize::new(2),
+ port_dropped: AtomicBool::new(false),
+ sender_drain: AtomicIsize::new(0),
+ select_lock: Mutex::new(()),
+ }
+ }
+
+ // This function should be used after newly created Packet
+ // was wrapped with an Arc
+ // In other case mutex data will be duplicated while cloning
+ // and that could cause problems on platforms where it is
+ // represented by opaque data structure
+ pub fn postinit_lock(&self) -> MutexGuard<'_, ()> {
+ self.select_lock.lock().unwrap()
+ }
+
+ // This function is used at the creation of a shared packet to inherit a
+ // previously blocked thread. This is done to prevent spurious wakeups of
+ // threads in select().
+ //
+ // This can only be called at channel-creation time
+ pub fn inherit_blocker(&self, token: Option<SignalToken>, guard: MutexGuard<'_, ()>) {
+ if let Some(token) = token {
+ assert_eq!(self.cnt.load(Ordering::SeqCst), 0);
+ assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
+ self.to_wake.store(unsafe { token.to_raw() }, Ordering::SeqCst);
+ self.cnt.store(-1, Ordering::SeqCst);
+
+ // This store is a little sketchy. What's happening here is that
+ // we're transferring a blocker from a oneshot or stream channel to
+ // this shared channel. In doing so, we never spuriously wake them
+ // up and rather only wake them up at the appropriate time. This
+ // implementation of shared channels assumes that any blocking
+ // recv() will undo the increment of steals performed in try_recv()
+ // once the recv is complete. This thread that we're inheriting,
+ // however, is not in the middle of recv. Hence, the first time we
+ // wake them up, they're going to wake up from their old port, move
+ // on to the upgraded port, and then call the block recv() function.
+ //
+ // When calling this function, they'll find there's data immediately
+ // available, counting it as a steal. This in fact wasn't a steal
+ // because we appropriately blocked them waiting for data.
+ //
+ // To offset this bad increment, we initially set the steal count to
+ // -1. You'll find some special code in abort_selection() as well to
+ // ensure that this -1 steal count doesn't escape too far.
+ unsafe {
+ *self.steals.get() = -1;
+ }
+ }
+
+ // When the shared packet is constructed, we grabbed this lock. The
+ // purpose of this lock is to ensure that abort_selection() doesn't
+ // interfere with this method. After we unlock this lock, we're
+ // signifying that we're done modifying self.cnt and self.to_wake and
+ // the port is ready for the world to continue using it.
+ drop(guard);
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ // See Port::drop for what's going on
+ if self.port_dropped.load(Ordering::SeqCst) {
+ return Err(t);
+ }
+
+ // Note that the multiple sender case is a little trickier
+ // semantically than the single sender case. The logic for
+ // incrementing is "add and if disconnected store disconnected".
+ // This could end up leading some senders to believe that there
+ // wasn't a disconnect if in fact there was a disconnect. This means
+ // that while one thread is attempting to re-store the disconnected
+ // states, other threads could walk through merrily incrementing
+ // this very-negative disconnected count. To prevent senders from
+ // spuriously attempting to send when the channels is actually
+ // disconnected, the count has a ranged check here.
+ //
+ // This is also done for another reason. Remember that the return
+ // value of this function is:
+ //
+ // `true` == the data *may* be received, this essentially has no
+ // meaning
+ // `false` == the data will *never* be received, this has a lot of
+ // meaning
+ //
+ // In the SPSC case, we have a check of 'queue.is_empty()' to see
+ // whether the data was actually received, but this same condition
+ // means nothing in a multi-producer context. As a result, this
+ // preflight check serves as the definitive "this will never be
+ // received". Once we get beyond this check, we have permanently
+ // entered the realm of "this may be received"
+ if self.cnt.load(Ordering::SeqCst) < DISCONNECTED + FUDGE {
+ return Err(t);
+ }
+
+ self.queue.push(t);
+ match self.cnt.fetch_add(1, Ordering::SeqCst) {
+ -1 => {
+ self.take_to_wake().signal();
+ }
+
+ // In this case, we have possibly failed to send our data, and
+ // we need to consider re-popping the data in order to fully
+ // destroy it. We must arbitrate among the multiple senders,
+ // however, because the queues that we're using are
+ // single-consumer queues. In order to do this, all exiting
+ // pushers will use an atomic count in order to count those
+ // flowing through. Pushers who see 0 are required to drain as
+ // much as possible, and then can only exit when they are the
+ // only pusher (otherwise they must try again).
+ n if n < DISCONNECTED + FUDGE => {
+ // see the comment in 'try' for a shared channel for why this
+ // window of "not disconnected" is ok.
+ self.cnt.store(DISCONNECTED, Ordering::SeqCst);
+
+ if self.sender_drain.fetch_add(1, Ordering::SeqCst) == 0 {
+ loop {
+ // drain the queue, for info on the thread yield see the
+ // discussion in try_recv
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => {}
+ mpsc::Empty => break,
+ mpsc::Inconsistent => thread::yield_now(),
+ }
+ }
+ // maybe we're done, if we're not the last ones
+ // here, then we need to go try again.
+ if self.sender_drain.fetch_sub(1, Ordering::SeqCst) == 1 {
+ break;
+ }
+ }
+
+ // At this point, there may still be data on the queue,
+ // but only if the count hasn't been incremented and
+ // some other sender hasn't finished pushing data just
+ // yet. That sender in question will drain its own data.
+ }
+ }
+
+ // Can't make any assumptions about this case like in the SPSC case.
+ _ => {}
+ }
+
+ Ok(())
+ }
+
+ pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure> {
+ // This code is essentially the exact same as that found in the stream
+ // case (see stream.rs)
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ let (wait_token, signal_token) = blocking::tokens();
+ if self.decrement(signal_token) == Installed {
+ if let Some(deadline) = deadline {
+ let timed_out = !wait_token.wait_max_until(deadline);
+ if timed_out {
+ self.abort_selection(false);
+ }
+ } else {
+ wait_token.wait();
+ }
+ }
+
+ match self.try_recv() {
+ data @ Ok(..) => unsafe {
+ *self.steals.get() -= 1;
+ data
+ },
+ data => data,
+ }
+ }
+
+ // Essentially the exact same thing as the stream decrement function.
+ // Returns true if blocking should proceed.
+ fn decrement(&self, token: SignalToken) -> StartResult {
+ unsafe {
+ assert_eq!(
+ self.to_wake.load(Ordering::SeqCst),
+ EMPTY,
+ "This is a known bug in the Rust standard library. See https://github.com/rust-lang/rust/issues/39364"
+ );
+ let ptr = token.to_raw();
+ self.to_wake.store(ptr, Ordering::SeqCst);
+
+ let steals = ptr::replace(self.steals.get(), 0);
+
+ match self.cnt.fetch_sub(1 + steals, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, Ordering::SeqCst);
+ }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 {
+ return Installed;
+ }
+ }
+ }
+
+ self.to_wake.store(EMPTY, Ordering::SeqCst);
+ drop(SignalToken::from_raw(ptr));
+ Abort
+ }
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure> {
+ let ret = match self.queue.pop() {
+ mpsc::Data(t) => Some(t),
+ mpsc::Empty => None,
+
+ // This is a bit of an interesting case. The channel is reported as
+ // having data available, but our pop() has failed due to the queue
+ // being in an inconsistent state. This means that there is some
+ // pusher somewhere which has yet to complete, but we are guaranteed
+ // that a pop will eventually succeed. In this case, we spin in a
+ // yield loop because the remote sender should finish their enqueue
+ // operation "very quickly".
+ //
+ // Avoiding this yield loop would require a different queue
+ // abstraction which provides the guarantee that after M pushes have
+ // succeeded, at least M pops will succeed. The current queues
+ // guarantee that if there are N active pushes, you can pop N times
+ // once all N have finished.
+ mpsc::Inconsistent => {
+ let data;
+ loop {
+ thread::yield_now();
+ match self.queue.pop() {
+ mpsc::Data(t) => {
+ data = t;
+ break;
+ }
+ mpsc::Empty => panic!("inconsistent => empty"),
+ mpsc::Inconsistent => {}
+ }
+ }
+ Some(data)
+ }
+ };
+ match ret {
+ // See the discussion in the stream implementation for why we
+ // might decrement steals.
+ Some(data) => unsafe {
+ if *self.steals.get() > MAX_STEALS {
+ match self.cnt.swap(0, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, Ordering::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, *self.steals.get());
+ *self.steals.get() -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(*self.steals.get() >= 0);
+ }
+ *self.steals.get() += 1;
+ Ok(data)
+ },
+
+ // See the discussion in the stream implementation for why we try
+ // again.
+ None => {
+ match self.cnt.load(Ordering::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+ _ => {
+ match self.queue.pop() {
+ mpsc::Data(t) => Ok(t),
+ mpsc::Empty => Err(Disconnected),
+ // with no senders, an inconsistency is impossible.
+ mpsc::Inconsistent => unreachable!(),
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&self) {
+ let old_count = self.channels.fetch_add(1, Ordering::SeqCst);
+
+ // See comments on Arc::clone() on why we do this (for `mem::forget`).
+ if old_count > MAX_REFCOUNT {
+ abort();
+ }
+ }
+
+ // Decrement the reference count on a channel. This is called whenever a
+ // Chan is dropped and may end up waking up a receiver. It's the receiver's
+ // responsibility on the other end to figure out that we've disconnected.
+ pub fn drop_chan(&self) {
+ match self.channels.fetch_sub(1, Ordering::SeqCst) {
+ 1 => {}
+ n if n > 1 => return,
+ n => panic!("bad number of channels left {n}"),
+ }
+
+ match self.cnt.swap(DISCONNECTED, Ordering::SeqCst) {
+ -1 => {
+ self.take_to_wake().signal();
+ }
+ DISCONNECTED => {}
+ n => {
+ assert!(n >= 0);
+ }
+ }
+ }
+
+ // See the long discussion inside of stream.rs for why the queue is drained,
+ // and why it is done in this fashion.
+ pub fn drop_port(&self) {
+ self.port_dropped.store(true, Ordering::SeqCst);
+ let mut steals = unsafe { *self.steals.get() };
+ while {
+ match self.cnt.compare_exchange(
+ steals,
+ DISCONNECTED,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
+ Ok(_) => false,
+ Err(old) => old != DISCONNECTED,
+ }
+ } {
+ // See the discussion in 'try_recv' for why we yield
+ // control of this thread.
+ loop {
+ match self.queue.pop() {
+ mpsc::Data(..) => {
+ steals += 1;
+ }
+ mpsc::Empty | mpsc::Inconsistent => break,
+ }
+ }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&self) -> SignalToken {
+ let ptr = self.to_wake.load(Ordering::SeqCst);
+ self.to_wake.store(EMPTY, Ordering::SeqCst);
+ assert!(ptr != EMPTY);
+ unsafe { SignalToken::from_raw(ptr) }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // increment the count on the channel (used for selection)
+ fn bump(&self, amt: isize) -> isize {
+ match self.cnt.fetch_add(amt, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.cnt.store(DISCONNECTED, Ordering::SeqCst);
+ DISCONNECTED
+ }
+ n => n,
+ }
+ }
+
+ // Cancels a previous thread waiting on this port, returning whether there's
+ // data on the port.
+ //
+ // This is similar to the stream implementation (hence fewer comments), but
+ // uses a different value for the "steals" variable.
+ pub fn abort_selection(&self, _was_upgrade: bool) -> bool {
+ // Before we do anything else, we bounce on this lock. The reason for
+ // doing this is to ensure that any upgrade-in-progress is gone and
+ // done with. Without this bounce, we can race with inherit_blocker
+ // about looking at and dealing with to_wake. Once we have acquired the
+ // lock, we are guaranteed that inherit_blocker is done.
+ {
+ let _guard = self.select_lock.lock().unwrap();
+ }
+
+ // Like the stream implementation, we want to make sure that the count
+ // on the channel goes non-negative. We don't know how negative the
+ // stream currently is, so instead of using a steal value of 1, we load
+ // the channel count and figure out what we should do to make it
+ // positive.
+ let steals = {
+ let cnt = self.cnt.load(Ordering::SeqCst);
+ if cnt < 0 && cnt != DISCONNECTED { -cnt } else { 0 }
+ };
+ let prev = self.bump(steals + 1);
+
+ if prev == DISCONNECTED {
+ assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
+ true
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+ if prev < 0 {
+ drop(self.take_to_wake());
+ } else {
+ while self.to_wake.load(Ordering::SeqCst) != EMPTY {
+ thread::yield_now();
+ }
+ }
+ unsafe {
+ // if the number of steals is -1, it was the pre-emptive -1 steal
+ // count from when we inherited a blocker. This is fine because
+ // we're just going to overwrite it with a real value.
+ let old = self.steals.get();
+ assert!(*old == 0 || *old == -1);
+ *old = steals;
+ prev >= 0
+ }
+ }
+ }
+}
+
+impl<T> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.cnt.load(Ordering::SeqCst), DISCONNECTED);
+ assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
+ assert_eq!(self.channels.load(Ordering::SeqCst), 0);
+ }
+}
diff --git a/library/std/src/sync/mpsc/spsc_queue.rs b/library/std/src/sync/mpsc/spsc_queue.rs
new file mode 100644
index 000000000..7e745eb31
--- /dev/null
+++ b/library/std/src/sync/mpsc/spsc_queue.rs
@@ -0,0 +1,236 @@
+//! A single-producer single-consumer concurrent queue
+//!
+//! This module contains the implementation of an SPSC queue which can be used
+//! concurrently between two threads. This data structure is safe to use and
+//! enforces the semantics that there is one pusher and one popper.
+
+// https://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use core::cell::UnsafeCell;
+use core::ptr;
+
+use crate::boxed::Box;
+use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+use super::cache_aligned::CacheAligned;
+
+// Node within the linked list queue of messages to send
+struct Node<T> {
+ // FIXME: this could be an uninitialized T if we're careful enough, and
+ // that would reduce memory usage (and be a bit faster).
+ // is it worth it?
+ value: Option<T>, // nullable for re-use of nodes
+ cached: bool, // This node goes into the node cache
+ next: AtomicPtr<Node<T>>, // next node in the queue
+}
+
+/// The single-producer single-consumer queue. This structure is not cloneable,
+/// but it can be safely shared in an Arc if it is guaranteed that there
+/// is only one popper and one pusher touching the queue at any one point in
+/// time.
+pub struct Queue<T, ProducerAddition = (), ConsumerAddition = ()> {
+ // consumer fields
+ consumer: CacheAligned<Consumer<T, ConsumerAddition>>,
+
+ // producer fields
+ producer: CacheAligned<Producer<T, ProducerAddition>>,
+}
+
+struct Consumer<T, Addition> {
+ tail: UnsafeCell<*mut Node<T>>, // where to pop from
+ tail_prev: AtomicPtr<Node<T>>, // where to pop from
+ cache_bound: usize, // maximum cache size
+ cached_nodes: AtomicUsize, // number of nodes marked as cacheable
+ addition: Addition,
+}
+
+struct Producer<T, Addition> {
+ head: UnsafeCell<*mut Node<T>>, // where to push to
+ first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
+ tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
+ addition: Addition,
+}
+
+unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> {}
+
+unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> {}
+
+impl<T> Node<T> {
+ fn new() -> *mut Node<T> {
+ Box::into_raw(box Node {
+ value: None,
+ cached: false,
+ next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
+ })
+ }
+}
+
+impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> {
+ /// Creates a new queue. With given additional elements in the producer and
+ /// consumer portions of the queue.
+ ///
+ /// Due to the performance implications of cache-contention,
+ /// we wish to keep fields used mainly by the producer on a separate cache
+ /// line than those used by the consumer.
+ /// Since cache lines are usually 64 bytes, it is unreasonably expensive to
+ /// allocate one for small fields, so we allow users to insert additional
+ /// fields into the cache lines already allocated by this for the producer
+ /// and consumer.
+ ///
+ /// This is unsafe as the type system doesn't enforce a single
+ /// consumer-producer relationship. It also allows the consumer to `pop`
+ /// items while there is a `peek` active due to all methods having a
+ /// non-mutable receiver.
+ ///
+ /// # Arguments
+ ///
+ /// * `bound` - This queue implementation is implemented with a linked
+ /// list, and this means that a push is always a malloc. In
+ /// order to amortize this cost, an internal cache of nodes is
+ /// maintained to prevent a malloc from always being
+ /// necessary. This bound is the limit on the size of the
+ /// cache (if desired). If the value is 0, then the cache has
+ /// no bound. Otherwise, the cache will never grow larger than
+ /// `bound` (although the queue itself could be much larger.
+ pub unsafe fn with_additions(
+ bound: usize,
+ producer_addition: ProducerAddition,
+ consumer_addition: ConsumerAddition,
+ ) -> Self {
+ let n1 = Node::new();
+ let n2 = Node::new();
+ (*n1).next.store(n2, Ordering::Relaxed);
+ Queue {
+ consumer: CacheAligned::new(Consumer {
+ tail: UnsafeCell::new(n2),
+ tail_prev: AtomicPtr::new(n1),
+ cache_bound: bound,
+ cached_nodes: AtomicUsize::new(0),
+ addition: consumer_addition,
+ }),
+ producer: CacheAligned::new(Producer {
+ head: UnsafeCell::new(n2),
+ first: UnsafeCell::new(n1),
+ tail_copy: UnsafeCell::new(n1),
+ addition: producer_addition,
+ }),
+ }
+ }
+
+ /// Pushes a new value onto this queue. Note that to use this function
+ /// safely, it must be externally guaranteed that there is only one pusher.
+ pub fn push(&self, t: T) {
+ unsafe {
+ // Acquire a node (which either uses a cached one or allocates a new
+ // one), and then append this to the 'head' node.
+ let n = self.alloc();
+ assert!((*n).value.is_none());
+ (*n).value = Some(t);
+ (*n).next.store(ptr::null_mut(), Ordering::Relaxed);
+ (**self.producer.head.get()).next.store(n, Ordering::Release);
+ *(&self.producer.head).get() = n;
+ }
+ }
+
+ unsafe fn alloc(&self) -> *mut Node<T> {
+ // First try to see if we can consume the 'first' node for our uses.
+ if *self.producer.first.get() != *self.producer.tail_copy.get() {
+ let ret = *self.producer.first.get();
+ *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
+ return ret;
+ }
+ // If the above fails, then update our copy of the tail and try
+ // again.
+ *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire);
+ if *self.producer.first.get() != *self.producer.tail_copy.get() {
+ let ret = *self.producer.first.get();
+ *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
+ return ret;
+ }
+ // If all of that fails, then we have to allocate a new node
+ // (there's nothing in the node cache).
+ Node::new()
+ }
+
+ /// Attempts to pop a value from this queue. Remember that to use this type
+ /// safely you must ensure that there is only one popper at a time.
+ pub fn pop(&self) -> Option<T> {
+ unsafe {
+ // The `tail` node is not actually a used node, but rather a
+ // sentinel from where we should start popping from. Hence, look at
+ // tail's next field and see if we can use it. If we do a pop, then
+ // the current tail node is a candidate for going into the cache.
+ let tail = *self.consumer.tail.get();
+ let next = (*tail).next.load(Ordering::Acquire);
+ if next.is_null() {
+ return None;
+ }
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take();
+
+ *self.consumer.0.tail.get() = next;
+ if self.consumer.cache_bound == 0 {
+ self.consumer.tail_prev.store(tail, Ordering::Release);
+ } else {
+ let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed);
+ if cached_nodes < self.consumer.cache_bound && !(*tail).cached {
+ self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed);
+ (*tail).cached = true;
+ }
+
+ if (*tail).cached {
+ self.consumer.tail_prev.store(tail, Ordering::Release);
+ } else {
+ (*self.consumer.tail_prev.load(Ordering::Relaxed))
+ .next
+ .store(next, Ordering::Relaxed);
+ // We have successfully erased all references to 'tail', so
+ // now we can safely drop it.
+ let _: Box<Node<T>> = Box::from_raw(tail);
+ }
+ }
+ ret
+ }
+ }
+
+ /// Attempts to peek at the head of the queue, returning `None` if the queue
+ /// has no data currently
+ ///
+ /// # Warning
+ /// The reference returned is invalid if it is not used before the consumer
+ /// pops the value off the queue. If the producer then pushes another value
+ /// onto the queue, it will overwrite the value pointed to by the reference.
+ pub fn peek(&self) -> Option<&mut T> {
+ // This is essentially the same as above with all the popping bits
+ // stripped out.
+ unsafe {
+ let tail = *self.consumer.tail.get();
+ let next = (*tail).next.load(Ordering::Acquire);
+ if next.is_null() { None } else { (*next).value.as_mut() }
+ }
+ }
+
+ pub fn producer_addition(&self) -> &ProducerAddition {
+ &self.producer.addition
+ }
+
+ pub fn consumer_addition(&self) -> &ConsumerAddition {
+ &self.consumer.addition
+ }
+}
+
+impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.producer.first.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Ordering::Relaxed);
+ let _n: Box<Node<T>> = Box::from_raw(cur);
+ cur = next;
+ }
+ }
+ }
+}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
new file mode 100644
index 000000000..467ef3dbd
--- /dev/null
+++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs
@@ -0,0 +1,101 @@
+use super::Queue;
+use crate::sync::mpsc::channel;
+use crate::sync::Arc;
+use crate::thread;
+
+#[test]
+fn smoke() {
+ unsafe {
+ let queue = Queue::with_additions(0, (), ());
+ queue.push(1);
+ queue.push(2);
+ assert_eq!(queue.pop(), Some(1));
+ assert_eq!(queue.pop(), Some(2));
+ assert_eq!(queue.pop(), None);
+ queue.push(3);
+ queue.push(4);
+ assert_eq!(queue.pop(), Some(3));
+ assert_eq!(queue.pop(), Some(4));
+ assert_eq!(queue.pop(), None);
+ }
+}
+
+#[test]
+fn peek() {
+ unsafe {
+ let queue = Queue::with_additions(0, (), ());
+ queue.push(vec![1]);
+
+ // Ensure the borrowchecker works
+ match queue.peek() {
+ Some(vec) => {
+ assert_eq!(&*vec, &[1]);
+ }
+ None => unreachable!(),
+ }
+
+ match queue.pop() {
+ Some(vec) => {
+ assert_eq!(&*vec, &[1]);
+ }
+ None => unreachable!(),
+ }
+ }
+}
+
+#[test]
+fn drop_full() {
+ unsafe {
+ let q: Queue<Box<_>> = Queue::with_additions(0, (), ());
+ q.push(Box::new(1));
+ q.push(Box::new(2));
+ }
+}
+
+#[test]
+fn smoke_bound() {
+ unsafe {
+ let q = Queue::with_additions(0, (), ());
+ q.push(1);
+ q.push(2);
+ assert_eq!(q.pop(), Some(1));
+ assert_eq!(q.pop(), Some(2));
+ assert_eq!(q.pop(), None);
+ q.push(3);
+ q.push(4);
+ assert_eq!(q.pop(), Some(3));
+ assert_eq!(q.pop(), Some(4));
+ assert_eq!(q.pop(), None);
+ }
+}
+
+#[test]
+fn stress() {
+ unsafe {
+ stress_bound(0);
+ stress_bound(1);
+ }
+
+ unsafe fn stress_bound(bound: usize) {
+ let q = Arc::new(Queue::with_additions(bound, (), ()));
+
+ let (tx, rx) = channel();
+ let q2 = q.clone();
+ let _t = thread::spawn(move || {
+ for _ in 0..100000 {
+ loop {
+ match q2.pop() {
+ Some(1) => break,
+ Some(_) => panic!(),
+ None => {}
+ }
+ }
+ }
+ tx.send(()).unwrap();
+ });
+ for _ in 0..100000 {
+ q.push(1);
+ }
+ rx.recv().unwrap();
+ }
+}
diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs
new file mode 100644
index 000000000..4c3812c79
--- /dev/null
+++ b/library/std/src/sync/mpsc/stream.rs
@@ -0,0 +1,457 @@
+/// Stream channels
+///
+/// This is the flavor of channels which are optimized for one sender and one
+/// receiver. The sender will be upgraded to a shared channel if the channel is
+/// cloned.
+///
+/// High level implementation details can be found in the comment of the parent
+/// module.
+pub use self::Failure::*;
+use self::Message::*;
+pub use self::UpgradeResult::*;
+
+use core::cmp;
+
+use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::thread;
+use crate::time::Instant;
+
+use crate::sync::atomic::{AtomicBool, AtomicIsize, AtomicPtr, Ordering};
+use crate::sync::mpsc::blocking::{self, SignalToken};
+use crate::sync::mpsc::spsc_queue as spsc;
+use crate::sync::mpsc::Receiver;
+
+const DISCONNECTED: isize = isize::MIN;
+#[cfg(test)]
+const MAX_STEALS: isize = 5;
+#[cfg(not(test))]
+const MAX_STEALS: isize = 1 << 20;
+const EMPTY: *mut u8 = ptr::null_mut(); // initial state: no data, no blocked receiver
+
+pub struct Packet<T> {
+ // internal queue for all messages
+ queue: spsc::Queue<Message<T>, ProducerAddition, ConsumerAddition>,
+}
+
+struct ProducerAddition {
+ cnt: AtomicIsize, // How many items are on this channel
+ to_wake: AtomicPtr<u8>, // SignalToken for the blocked thread to wake up
+
+ port_dropped: AtomicBool, // flag if the channel has been destroyed.
+}
+
+struct ConsumerAddition {
+ steals: UnsafeCell<isize>, // How many times has a port received without blocking?
+}
+
+pub enum Failure<T> {
+ Empty,
+ Disconnected,
+ Upgraded(Receiver<T>),
+}
+
+pub enum UpgradeResult {
+ UpSuccess,
+ UpDisconnected,
+ UpWoke(SignalToken),
+}
+
+// Any message could contain an "upgrade request" to a new shared port, so the
+// internal queue it's a queue of T, but rather Message<T>
+enum Message<T> {
+ Data(T),
+ GoUp(Receiver<T>),
+}
+
+impl<T> Packet<T> {
+ pub fn new() -> Packet<T> {
+ Packet {
+ queue: unsafe {
+ spsc::Queue::with_additions(
+ 128,
+ ProducerAddition {
+ cnt: AtomicIsize::new(0),
+ to_wake: AtomicPtr::new(EMPTY),
+
+ port_dropped: AtomicBool::new(false),
+ },
+ ConsumerAddition { steals: UnsafeCell::new(0) },
+ )
+ },
+ }
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ // If the other port has deterministically gone away, then definitely
+ // must return the data back up the stack. Otherwise, the data is
+ // considered as being sent.
+ if self.queue.producer_addition().port_dropped.load(Ordering::SeqCst) {
+ return Err(t);
+ }
+
+ match self.do_send(Data(t)) {
+ UpSuccess | UpDisconnected => {}
+ UpWoke(token) => {
+ token.signal();
+ }
+ }
+ Ok(())
+ }
+
+ pub fn upgrade(&self, up: Receiver<T>) -> UpgradeResult {
+ // If the port has gone away, then there's no need to proceed any
+ // further.
+ if self.queue.producer_addition().port_dropped.load(Ordering::SeqCst) {
+ return UpDisconnected;
+ }
+
+ self.do_send(GoUp(up))
+ }
+
+ fn do_send(&self, t: Message<T>) -> UpgradeResult {
+ self.queue.push(t);
+ match self.queue.producer_addition().cnt.fetch_add(1, Ordering::SeqCst) {
+ // As described in the mod's doc comment, -1 == wakeup
+ -1 => UpWoke(self.take_to_wake()),
+ // As as described before, SPSC queues must be >= -2
+ -2 => UpSuccess,
+
+ // Be sure to preserve the disconnected state, and the return value
+ // in this case is going to be whether our data was received or not.
+ // This manifests itself on whether we have an empty queue or not.
+ //
+ // Primarily, are required to drain the queue here because the port
+ // will never remove this data. We can only have at most one item to
+ // drain (the port drains the rest).
+ DISCONNECTED => {
+ self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
+ let first = self.queue.pop();
+ let second = self.queue.pop();
+ assert!(second.is_none());
+
+ match first {
+ Some(..) => UpSuccess, // we failed to send the data
+ None => UpDisconnected, // we successfully sent data
+ }
+ }
+
+ // Otherwise we just sent some data on a non-waiting queue, so just
+ // make sure the world is sane and carry on!
+ n => {
+ assert!(n >= 0);
+ UpSuccess
+ }
+ }
+ }
+
+ // Consumes ownership of the 'to_wake' field.
+ fn take_to_wake(&self) -> SignalToken {
+ let ptr = self.queue.producer_addition().to_wake.load(Ordering::SeqCst);
+ self.queue.producer_addition().to_wake.store(EMPTY, Ordering::SeqCst);
+ assert!(ptr != EMPTY);
+ unsafe { SignalToken::from_raw(ptr) }
+ }
+
+ // Decrements the count on the channel for a sleeper, returning the sleeper
+ // back if it shouldn't sleep. Note that this is the location where we take
+ // steals into account.
+ fn decrement(&self, token: SignalToken) -> Result<(), SignalToken> {
+ assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
+ let ptr = unsafe { token.to_raw() };
+ self.queue.producer_addition().to_wake.store(ptr, Ordering::SeqCst);
+
+ let steals = unsafe { ptr::replace(self.queue.consumer_addition().steals.get(), 0) };
+
+ match self.queue.producer_addition().cnt.fetch_sub(1 + steals, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
+ }
+ // If we factor in our steals and notice that the channel has no
+ // data, we successfully sleep
+ n => {
+ assert!(n >= 0);
+ if n - steals <= 0 {
+ return Ok(());
+ }
+ }
+ }
+
+ self.queue.producer_addition().to_wake.store(EMPTY, Ordering::SeqCst);
+ Err(unsafe { SignalToken::from_raw(ptr) })
+ }
+
+ pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure<T>> {
+ // Optimistic preflight check (scheduling is expensive).
+ match self.try_recv() {
+ Err(Empty) => {}
+ data => return data,
+ }
+
+ // Welp, our channel has no data. Deschedule the current thread and
+ // initiate the blocking protocol.
+ let (wait_token, signal_token) = blocking::tokens();
+ if self.decrement(signal_token).is_ok() {
+ if let Some(deadline) = deadline {
+ let timed_out = !wait_token.wait_max_until(deadline);
+ if timed_out {
+ self.abort_selection(/* was_upgrade = */ false).map_err(Upgraded)?;
+ }
+ } else {
+ wait_token.wait();
+ }
+ }
+
+ match self.try_recv() {
+ // Messages which actually popped from the queue shouldn't count as
+ // a steal, so offset the decrement here (we already have our
+ // "steal" factored into the channel count above).
+ data @ (Ok(..) | Err(Upgraded(..))) => unsafe {
+ *self.queue.consumer_addition().steals.get() -= 1;
+ data
+ },
+
+ data => data,
+ }
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure<T>> {
+ match self.queue.pop() {
+ // If we stole some data, record to that effect (this will be
+ // factored into cnt later on).
+ //
+ // Note that we don't allow steals to grow without bound in order to
+ // prevent eventual overflow of either steals or cnt as an overflow
+ // would have catastrophic results. Sometimes, steals > cnt, but
+ // other times cnt > steals, so we don't know the relation between
+ // steals and cnt. This code path is executed only rarely, so we do
+ // a pretty slow operation, of swapping 0 into cnt, taking steals
+ // down as much as possible (without going negative), and then
+ // adding back in whatever we couldn't factor into steals.
+ Some(data) => unsafe {
+ if *self.queue.consumer_addition().steals.get() > MAX_STEALS {
+ match self.queue.producer_addition().cnt.swap(0, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.queue
+ .producer_addition()
+ .cnt
+ .store(DISCONNECTED, Ordering::SeqCst);
+ }
+ n => {
+ let m = cmp::min(n, *self.queue.consumer_addition().steals.get());
+ *self.queue.consumer_addition().steals.get() -= m;
+ self.bump(n - m);
+ }
+ }
+ assert!(*self.queue.consumer_addition().steals.get() >= 0);
+ }
+ *self.queue.consumer_addition().steals.get() += 1;
+ match data {
+ Data(t) => Ok(t),
+ GoUp(up) => Err(Upgraded(up)),
+ }
+ },
+
+ None => {
+ match self.queue.producer_addition().cnt.load(Ordering::SeqCst) {
+ n if n != DISCONNECTED => Err(Empty),
+
+ // This is a little bit of a tricky case. We failed to pop
+ // data above, and then we have viewed that the channel is
+ // disconnected. In this window more data could have been
+ // sent on the channel. It doesn't really make sense to
+ // return that the channel is disconnected when there's
+ // actually data on it, so be extra sure there's no data by
+ // popping one more time.
+ //
+ // We can ignore steals because the other end is
+ // disconnected and we'll never need to really factor in our
+ // steals again.
+ _ => match self.queue.pop() {
+ Some(Data(t)) => Ok(t),
+ Some(GoUp(up)) => Err(Upgraded(up)),
+ None => Err(Disconnected),
+ },
+ }
+ }
+ }
+ }
+
+ pub fn drop_chan(&self) {
+ // Dropping a channel is pretty simple, we just flag it as disconnected
+ // and then wakeup a blocker if there is one.
+ match self.queue.producer_addition().cnt.swap(DISCONNECTED, Ordering::SeqCst) {
+ -1 => {
+ self.take_to_wake().signal();
+ }
+ DISCONNECTED => {}
+ n => {
+ assert!(n >= 0);
+ }
+ }
+ }
+
+ pub fn drop_port(&self) {
+ // Dropping a port seems like a fairly trivial thing. In theory all we
+ // need to do is flag that we're disconnected and then everything else
+ // can take over (we don't have anyone to wake up).
+ //
+ // The catch for Ports is that we want to drop the entire contents of
+ // the queue. There are multiple reasons for having this property, the
+ // largest of which is that if another chan is waiting in this channel
+ // (but not received yet), then waiting on that port will cause a
+ // deadlock.
+ //
+ // So if we accept that we must now destroy the entire contents of the
+ // queue, this code may make a bit more sense. The tricky part is that
+ // we can't let any in-flight sends go un-dropped, we have to make sure
+ // *everything* is dropped and nothing new will come onto the channel.
+
+ // The first thing we do is set a flag saying that we're done for. All
+ // sends are gated on this flag, so we're immediately guaranteed that
+ // there are a bounded number of active sends that we'll have to deal
+ // with.
+ self.queue.producer_addition().port_dropped.store(true, Ordering::SeqCst);
+
+ // Now that we're guaranteed to deal with a bounded number of senders,
+ // we need to drain the queue. This draining process happens atomically
+ // with respect to the "count" of the channel. If the count is nonzero
+ // (with steals taken into account), then there must be data on the
+ // channel. In this case we drain everything and then try again. We will
+ // continue to fail while active senders send data while we're dropping
+ // data, but eventually we're guaranteed to break out of this loop
+ // (because there is a bounded number of senders).
+ let mut steals = unsafe { *self.queue.consumer_addition().steals.get() };
+ while {
+ match self.queue.producer_addition().cnt.compare_exchange(
+ steals,
+ DISCONNECTED,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
+ Ok(_) => false,
+ Err(old) => old != DISCONNECTED,
+ }
+ } {
+ while self.queue.pop().is_some() {
+ steals += 1;
+ }
+ }
+
+ // At this point in time, we have gated all future senders from sending,
+ // and we have flagged the channel as being disconnected. The senders
+ // still have some responsibility, however, because some sends might not
+ // complete until after we flag the disconnection. There are more
+ // details in the sending methods that see DISCONNECTED
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // select implementation
+ ////////////////////////////////////////////////////////////////////////////
+
+ // increment the count on the channel (used for selection)
+ fn bump(&self, amt: isize) -> isize {
+ match self.queue.producer_addition().cnt.fetch_add(amt, Ordering::SeqCst) {
+ DISCONNECTED => {
+ self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
+ DISCONNECTED
+ }
+ n => n,
+ }
+ }
+
+ // Removes a previous thread from being blocked in this port
+ pub fn abort_selection(&self, was_upgrade: bool) -> Result<bool, Receiver<T>> {
+ // If we're aborting selection after upgrading from a oneshot, then
+ // we're guarantee that no one is waiting. The only way that we could
+ // have seen the upgrade is if data was actually sent on the channel
+ // half again. For us, this means that there is guaranteed to be data on
+ // this channel. Furthermore, we're guaranteed that there was no
+ // start_selection previously, so there's no need to modify `self.cnt`
+ // at all.
+ //
+ // Hence, because of these invariants, we immediately return `Ok(true)`.
+ // Note that the data might not actually be sent on the channel just yet.
+ // The other end could have flagged the upgrade but not sent data to
+ // this end. This is fine because we know it's a small bounded windows
+ // of time until the data is actually sent.
+ if was_upgrade {
+ assert_eq!(unsafe { *self.queue.consumer_addition().steals.get() }, 0);
+ assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
+ return Ok(true);
+ }
+
+ // We want to make sure that the count on the channel goes non-negative,
+ // and in the stream case we can have at most one steal, so just assume
+ // that we had one steal.
+ let steals = 1;
+ let prev = self.bump(steals + 1);
+
+ // If we were previously disconnected, then we know for sure that there
+ // is no thread in to_wake, so just keep going
+ let has_data = if prev == DISCONNECTED {
+ assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
+ true // there is data, that data is that we're disconnected
+ } else {
+ let cur = prev + steals + 1;
+ assert!(cur >= 0);
+
+ // If the previous count was negative, then we just made things go
+ // positive, hence we passed the -1 boundary and we're responsible
+ // for removing the to_wake() field and trashing it.
+ //
+ // If the previous count was positive then we're in a tougher
+ // situation. A possible race is that a sender just incremented
+ // through -1 (meaning it's going to try to wake a thread up), but it
+ // hasn't yet read the to_wake. In order to prevent a future recv()
+ // from waking up too early (this sender picking up the plastered
+ // over to_wake), we spin loop here waiting for to_wake to be 0.
+ // Note that this entire select() implementation needs an overhaul,
+ // and this is *not* the worst part of it, so this is not done as a
+ // final solution but rather out of necessity for now to get
+ // something working.
+ if prev < 0 {
+ drop(self.take_to_wake());
+ } else {
+ while self.queue.producer_addition().to_wake.load(Ordering::SeqCst) != EMPTY {
+ thread::yield_now();
+ }
+ }
+ unsafe {
+ assert_eq!(*self.queue.consumer_addition().steals.get(), 0);
+ *self.queue.consumer_addition().steals.get() = steals;
+ }
+
+ // if we were previously positive, then there's surely data to
+ // receive
+ prev >= 0
+ };
+
+ // Now that we've determined that this queue "has data", we peek at the
+ // queue to see if the data is an upgrade or not. If it's an upgrade,
+ // then we need to destroy this port and abort selection on the
+ // upgraded port.
+ if has_data {
+ match self.queue.peek() {
+ Some(&mut GoUp(..)) => match self.queue.pop() {
+ Some(GoUp(port)) => Err(port),
+ _ => unreachable!(),
+ },
+ _ => Ok(true),
+ }
+ } else {
+ Ok(false)
+ }
+ }
+}
+
+impl<T> Drop for Packet<T> {
+ fn drop(&mut self) {
+ // Note that this load is not only an assert for correctness about
+ // disconnection, but also a proper fence before the read of
+ // `to_wake`, so this assert cannot be removed with also removing
+ // the `to_wake` assert.
+ assert_eq!(self.queue.producer_addition().cnt.load(Ordering::SeqCst), DISCONNECTED);
+ assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
+ }
+}
diff --git a/library/std/src/sync/mpsc/sync.rs b/library/std/src/sync/mpsc/sync.rs
new file mode 100644
index 000000000..733761671
--- /dev/null
+++ b/library/std/src/sync/mpsc/sync.rs
@@ -0,0 +1,495 @@
+use self::Blocker::*;
+/// Synchronous channels/ports
+///
+/// This channel implementation differs significantly from the asynchronous
+/// implementations found next to it (oneshot/stream/share). This is an
+/// implementation of a synchronous, bounded buffer channel.
+///
+/// Each channel is created with some amount of backing buffer, and sends will
+/// *block* until buffer space becomes available. A buffer size of 0 is valid,
+/// which means that every successful send is paired with a successful recv.
+///
+/// This flavor of channels defines a new `send_opt` method for channels which
+/// is the method by which a message is sent but the thread does not panic if it
+/// cannot be delivered.
+///
+/// Another major difference is that send() will *always* return back the data
+/// if it couldn't be sent. This is because it is deterministically known when
+/// the data is received and when it is not received.
+///
+/// Implementation-wise, it can all be summed up with "use a mutex plus some
+/// logic". The mutex used here is an OS native mutex, meaning that no user code
+/// is run inside of the mutex (to prevent context switching). This
+/// implementation shares almost all code for the buffered and unbuffered cases
+/// of a synchronous channel. There are a few branches for the unbuffered case,
+/// but they're mostly just relevant to blocking senders.
+pub use self::Failure::*;
+
+use core::intrinsics::abort;
+use core::mem;
+use core::ptr;
+
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sync::mpsc::blocking::{self, SignalToken, WaitToken};
+use crate::sync::{Mutex, MutexGuard};
+use crate::time::Instant;
+
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+pub struct Packet<T> {
+ /// Only field outside of the mutex. Just done for kicks, but mainly because
+ /// the other shared channel already had the code implemented
+ channels: AtomicUsize,
+
+ lock: Mutex<State<T>>,
+}
+
+unsafe impl<T: Send> Send for Packet<T> {}
+
+unsafe impl<T: Send> Sync for Packet<T> {}
+
+struct State<T> {
+ disconnected: bool, // Is the channel disconnected yet?
+ queue: Queue, // queue of senders waiting to send data
+ blocker: Blocker, // currently blocked thread on this channel
+ buf: Buffer<T>, // storage for buffered messages
+ cap: usize, // capacity of this channel
+
+ /// A curious flag used to indicate whether a sender failed or succeeded in
+ /// blocking. This is used to transmit information back to the thread that it
+ /// must dequeue its message from the buffer because it was not received.
+ /// This is only relevant in the 0-buffer case. This obviously cannot be
+ /// safely constructed, but it's guaranteed to always have a valid pointer
+ /// value.
+ canceled: Option<&'static mut bool>,
+}
+
+unsafe impl<T: Send> Send for State<T> {}
+
+/// Possible flavors of threads who can be blocked on this channel.
+enum Blocker {
+ BlockedSender(SignalToken),
+ BlockedReceiver(SignalToken),
+ NoneBlocked,
+}
+
+/// Simple queue for threading threads together. Nodes are stack-allocated, so
+/// this structure is not safe at all
+struct Queue {
+ head: *mut Node,
+ tail: *mut Node,
+}
+
+struct Node {
+ token: Option<SignalToken>,
+ next: *mut Node,
+}
+
+unsafe impl Send for Node {}
+
+/// A simple ring-buffer
+struct Buffer<T> {
+ buf: Vec<Option<T>>,
+ start: usize,
+ size: usize,
+}
+
+#[derive(Debug)]
+pub enum Failure {
+ Empty,
+ Disconnected,
+}
+
+/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
+/// in the meantime. This re-locks the mutex upon returning.
+fn wait<'a, 'b, T>(
+ lock: &'a Mutex<State<T>>,
+ mut guard: MutexGuard<'b, State<T>>,
+ f: fn(SignalToken) -> Blocker,
+) -> MutexGuard<'a, State<T>> {
+ let (wait_token, signal_token) = blocking::tokens();
+ match mem::replace(&mut guard.blocker, f(signal_token)) {
+ NoneBlocked => {}
+ _ => unreachable!(),
+ }
+ drop(guard); // unlock
+ wait_token.wait(); // block
+ lock.lock().unwrap() // relock
+}
+
+/// Same as wait, but waiting at most until `deadline`.
+fn wait_timeout_receiver<'a, 'b, T>(
+ lock: &'a Mutex<State<T>>,
+ deadline: Instant,
+ mut guard: MutexGuard<'b, State<T>>,
+ success: &mut bool,
+) -> MutexGuard<'a, State<T>> {
+ let (wait_token, signal_token) = blocking::tokens();
+ match mem::replace(&mut guard.blocker, BlockedReceiver(signal_token)) {
+ NoneBlocked => {}
+ _ => unreachable!(),
+ }
+ drop(guard); // unlock
+ *success = wait_token.wait_max_until(deadline); // block
+ let mut new_guard = lock.lock().unwrap(); // relock
+ if !*success {
+ abort_selection(&mut new_guard);
+ }
+ new_guard
+}
+
+fn abort_selection<T>(guard: &mut MutexGuard<'_, State<T>>) -> bool {
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ NoneBlocked => true,
+ BlockedSender(token) => {
+ guard.blocker = BlockedSender(token);
+ true
+ }
+ BlockedReceiver(token) => {
+ drop(token);
+ false
+ }
+ }
+}
+
+/// Wakes up a thread, dropping the lock at the correct time
+fn wakeup<T>(token: SignalToken, guard: MutexGuard<'_, State<T>>) {
+ // We need to be careful to wake up the waiting thread *outside* of the mutex
+ // in case it incurs a context switch.
+ drop(guard);
+ token.signal();
+}
+
+impl<T> Packet<T> {
+ pub fn new(capacity: usize) -> Packet<T> {
+ Packet {
+ channels: AtomicUsize::new(1),
+ lock: Mutex::new(State {
+ disconnected: false,
+ blocker: NoneBlocked,
+ cap: capacity,
+ canceled: None,
+ queue: Queue { head: ptr::null_mut(), tail: ptr::null_mut() },
+ buf: Buffer {
+ buf: (0..capacity + if capacity == 0 { 1 } else { 0 }).map(|_| None).collect(),
+ start: 0,
+ size: 0,
+ },
+ }),
+ }
+ }
+
+ // wait until a send slot is available, returning locked access to
+ // the channel state.
+ fn acquire_send_slot(&self) -> MutexGuard<'_, State<T>> {
+ let mut node = Node { token: None, next: ptr::null_mut() };
+ loop {
+ let mut guard = self.lock.lock().unwrap();
+ // are we ready to go?
+ if guard.disconnected || guard.buf.size() < guard.buf.capacity() {
+ return guard;
+ }
+ // no room; actually block
+ let wait_token = guard.queue.enqueue(&mut node);
+ drop(guard);
+ wait_token.wait();
+ }
+ }
+
+ pub fn send(&self, t: T) -> Result<(), T> {
+ let mut guard = self.acquire_send_slot();
+ if guard.disconnected {
+ return Err(t);
+ }
+ guard.buf.enqueue(t);
+
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ // if our capacity is 0, then we need to wait for a receiver to be
+ // available to take our data. After waiting, we check again to make
+ // sure the port didn't go away in the meantime. If it did, we need
+ // to hand back our data.
+ NoneBlocked if guard.cap == 0 => {
+ let mut canceled = false;
+ assert!(guard.canceled.is_none());
+ guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
+ let mut guard = wait(&self.lock, guard, BlockedSender);
+ if canceled { Err(guard.buf.dequeue()) } else { Ok(()) }
+ }
+
+ // success, we buffered some data
+ NoneBlocked => Ok(()),
+
+ // success, someone's about to receive our buffered data.
+ BlockedReceiver(token) => {
+ wakeup(token, guard);
+ Ok(())
+ }
+
+ BlockedSender(..) => panic!("lolwut"),
+ }
+ }
+
+ pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
+ let mut guard = self.lock.lock().unwrap();
+ if guard.disconnected {
+ Err(super::TrySendError::Disconnected(t))
+ } else if guard.buf.size() == guard.buf.capacity() {
+ Err(super::TrySendError::Full(t))
+ } else if guard.cap == 0 {
+ // With capacity 0, even though we have buffer space we can't
+ // transfer the data unless there's a receiver waiting.
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ NoneBlocked => Err(super::TrySendError::Full(t)),
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(token) => {
+ guard.buf.enqueue(t);
+ wakeup(token, guard);
+ Ok(())
+ }
+ }
+ } else {
+ // If the buffer has some space and the capacity isn't 0, then we
+ // just enqueue the data for later retrieval, ensuring to wake up
+ // any blocked receiver if there is one.
+ assert!(guard.buf.size() < guard.buf.capacity());
+ guard.buf.enqueue(t);
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ BlockedReceiver(token) => wakeup(token, guard),
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ }
+ Ok(())
+ }
+ }
+
+ // Receives a message from this channel
+ //
+ // When reading this, remember that there can only ever be one receiver at
+ // time.
+ pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure> {
+ let mut guard = self.lock.lock().unwrap();
+
+ let mut woke_up_after_waiting = false;
+ // Wait for the buffer to have something in it. No need for a
+ // while loop because we're the only receiver.
+ if !guard.disconnected && guard.buf.size() == 0 {
+ if let Some(deadline) = deadline {
+ guard =
+ wait_timeout_receiver(&self.lock, deadline, guard, &mut woke_up_after_waiting);
+ } else {
+ guard = wait(&self.lock, guard, BlockedReceiver);
+ woke_up_after_waiting = true;
+ }
+ }
+
+ // N.B., channel could be disconnected while waiting, so the order of
+ // these conditionals is important.
+ if guard.disconnected && guard.buf.size() == 0 {
+ return Err(Disconnected);
+ }
+
+ // Pick up the data, wake up our neighbors, and carry on
+ assert!(guard.buf.size() > 0 || (deadline.is_some() && !woke_up_after_waiting));
+
+ if guard.buf.size() == 0 {
+ return Err(Empty);
+ }
+
+ let ret = guard.buf.dequeue();
+ self.wakeup_senders(woke_up_after_waiting, guard);
+ Ok(ret)
+ }
+
+ pub fn try_recv(&self) -> Result<T, Failure> {
+ let mut guard = self.lock.lock().unwrap();
+
+ // Easy cases first
+ if guard.disconnected && guard.buf.size() == 0 {
+ return Err(Disconnected);
+ }
+ if guard.buf.size() == 0 {
+ return Err(Empty);
+ }
+
+ // Be sure to wake up neighbors
+ let ret = Ok(guard.buf.dequeue());
+ self.wakeup_senders(false, guard);
+ ret
+ }
+
+ // Wake up pending senders after some data has been received
+ //
+ // * `waited` - flag if the receiver blocked to receive some data, or if it
+ // just picked up some data on the way out
+ // * `guard` - the lock guard that is held over this channel's lock
+ fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<'_, State<T>>) {
+ let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
+
+ // If this is a no-buffer channel (cap == 0), then if we didn't wait we
+ // need to ACK the sender. If we waited, then the sender waking us up
+ // was already the ACK.
+ let pending_sender2 = if guard.cap == 0 && !waited {
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedReceiver(..) => unreachable!(),
+ BlockedSender(token) => {
+ guard.canceled.take();
+ Some(token)
+ }
+ }
+ } else {
+ None
+ };
+ mem::drop(guard);
+
+ // only outside of the lock do we wake up the pending threads
+ if let Some(token) = pending_sender1 {
+ token.signal();
+ }
+ if let Some(token) = pending_sender2 {
+ token.signal();
+ }
+ }
+
+ // Prepares this shared packet for a channel clone, essentially just bumping
+ // a refcount.
+ pub fn clone_chan(&self) {
+ let old_count = self.channels.fetch_add(1, Ordering::SeqCst);
+
+ // See comments on Arc::clone() on why we do this (for `mem::forget`).
+ if old_count > MAX_REFCOUNT {
+ abort();
+ }
+ }
+
+ pub fn drop_chan(&self) {
+ // Only flag the channel as disconnected if we're the last channel
+ match self.channels.fetch_sub(1, Ordering::SeqCst) {
+ 1 => {}
+ _ => return,
+ }
+
+ // Not much to do other than wake up a receiver if one's there
+ let mut guard = self.lock.lock().unwrap();
+ if guard.disconnected {
+ return;
+ }
+ guard.disconnected = true;
+ match mem::replace(&mut guard.blocker, NoneBlocked) {
+ NoneBlocked => {}
+ BlockedSender(..) => unreachable!(),
+ BlockedReceiver(token) => wakeup(token, guard),
+ }
+ }
+
+ pub fn drop_port(&self) {
+ let mut guard = self.lock.lock().unwrap();
+
+ if guard.disconnected {
+ return;
+ }
+ guard.disconnected = true;
+
+ // If the capacity is 0, then the sender may want its data back after
+ // we're disconnected. Otherwise it's now our responsibility to destroy
+ // the buffered data. As with many other portions of this code, this
+ // needs to be careful to destroy the data *outside* of the lock to
+ // prevent deadlock.
+ let _data = if guard.cap != 0 { mem::take(&mut guard.buf.buf) } else { Vec::new() };
+ let mut queue =
+ mem::replace(&mut guard.queue, Queue { head: ptr::null_mut(), tail: ptr::null_mut() });
+
+ let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
+ NoneBlocked => None,
+ BlockedSender(token) => {
+ *guard.canceled.take().unwrap() = true;
+ Some(token)
+ }
+ BlockedReceiver(..) => unreachable!(),
+ };
+ mem::drop(guard);
+
+ while let Some(token) = queue.dequeue() {
+ token.signal();
+ }
+ if let Some(token) = waiter {
+ token.signal();
+ }
+ }
+}
+
+impl<T> Drop for Packet<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.channels.load(Ordering::SeqCst), 0);
+ let mut guard = self.lock.lock().unwrap();
+ assert!(guard.queue.dequeue().is_none());
+ assert!(guard.canceled.is_none());
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Buffer, a simple ring buffer backed by Vec<T>
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Buffer<T> {
+ fn enqueue(&mut self, t: T) {
+ let pos = (self.start + self.size) % self.buf.len();
+ self.size += 1;
+ let prev = mem::replace(&mut self.buf[pos], Some(t));
+ assert!(prev.is_none());
+ }
+
+ fn dequeue(&mut self) -> T {
+ let start = self.start;
+ self.size -= 1;
+ self.start = (self.start + 1) % self.buf.len();
+ let result = &mut self.buf[start];
+ result.take().unwrap()
+ }
+
+ fn size(&self) -> usize {
+ self.size
+ }
+ fn capacity(&self) -> usize {
+ self.buf.len()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Queue, a simple queue to enqueue threads with (stack-allocated nodes)
+////////////////////////////////////////////////////////////////////////////////
+
+impl Queue {
+ fn enqueue(&mut self, node: &mut Node) -> WaitToken {
+ let (wait_token, signal_token) = blocking::tokens();
+ node.token = Some(signal_token);
+ node.next = ptr::null_mut();
+
+ if self.tail.is_null() {
+ self.head = node as *mut Node;
+ self.tail = node as *mut Node;
+ } else {
+ unsafe {
+ (*self.tail).next = node as *mut Node;
+ self.tail = node as *mut Node;
+ }
+ }
+
+ wait_token
+ }
+
+ fn dequeue(&mut self) -> Option<SignalToken> {
+ if self.head.is_null() {
+ return None;
+ }
+ let node = self.head;
+ self.head = unsafe { (*node).next };
+ if self.head.is_null() {
+ self.tail = ptr::null_mut();
+ }
+ unsafe {
+ (*node).next = ptr::null_mut();
+ Some((*node).token.take().unwrap())
+ }
+ }
+}
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
new file mode 100644
index 000000000..e58649bab
--- /dev/null
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -0,0 +1,647 @@
+use super::*;
+use crate::env;
+use crate::thread;
+use crate::time::Duration;
+
+pub fn stress_factor() -> usize {
+ match env::var("RUST_TEST_STRESS") {
+ Ok(val) => val.parse().unwrap(),
+ Err(..) => 1,
+ }
+}
+
+#[test]
+fn smoke() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn drop_full() {
+ let (tx, _rx) = sync_channel::<Box<isize>>(1);
+ tx.send(Box::new(1)).unwrap();
+}
+
+#[test]
+fn smoke_shared() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+ let tx = tx.clone();
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn recv_timeout() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout));
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(1));
+}
+
+#[test]
+fn smoke_threads() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ tx.send(1).unwrap();
+ });
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn smoke_port_gone() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(rx);
+ assert!(tx.send(1).is_err());
+}
+
+#[test]
+fn smoke_shared_port_gone2() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ assert!(tx2.send(1).is_err());
+}
+
+#[test]
+fn port_gone_concurrent() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap();
+ });
+ while tx.send(1).is_ok() {}
+}
+
+#[test]
+fn port_gone_concurrent_shared() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let tx2 = tx.clone();
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap();
+ });
+ while tx.send(1).is_ok() && tx2.send(1).is_ok() {}
+}
+
+#[test]
+fn smoke_chan_gone() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(tx);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn smoke_chan_gone_shared() {
+ let (tx, rx) = sync_channel::<()>(0);
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn chan_gone_concurrent() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ thread::spawn(move || {
+ tx.send(1).unwrap();
+ tx.send(1).unwrap();
+ });
+ while rx.recv().is_ok() {}
+}
+
+#[test]
+fn stress() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ thread::spawn(move || {
+ for _ in 0..10000 {
+ tx.send(1).unwrap();
+ }
+ });
+ for _ in 0..10000 {
+ assert_eq!(rx.recv().unwrap(), 1);
+ }
+}
+
+#[test]
+fn stress_recv_timeout_two_threads() {
+ let (tx, rx) = sync_channel::<i32>(0);
+
+ thread::spawn(move || {
+ for _ in 0..10000 {
+ tx.send(1).unwrap();
+ }
+ });
+
+ let mut recv_count = 0;
+ loop {
+ match rx.recv_timeout(Duration::from_millis(1)) {
+ Ok(v) => {
+ assert_eq!(v, 1);
+ recv_count += 1;
+ }
+ Err(RecvTimeoutError::Timeout) => continue,
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+ }
+
+ assert_eq!(recv_count, 10000);
+}
+
+#[test]
+fn stress_recv_timeout_shared() {
+ const AMT: u32 = 1000;
+ const NTHREADS: u32 = 8;
+ let (tx, rx) = sync_channel::<i32>(0);
+ let (dtx, drx) = sync_channel::<()>(0);
+
+ thread::spawn(move || {
+ let mut recv_count = 0;
+ loop {
+ match rx.recv_timeout(Duration::from_millis(10)) {
+ Ok(v) => {
+ assert_eq!(v, 1);
+ recv_count += 1;
+ }
+ Err(RecvTimeoutError::Timeout) => continue,
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+ }
+
+ assert_eq!(recv_count, AMT * NTHREADS);
+ assert!(rx.try_recv().is_err());
+
+ dtx.send(()).unwrap();
+ });
+
+ for _ in 0..NTHREADS {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ for _ in 0..AMT {
+ tx.send(1).unwrap();
+ }
+ });
+ }
+
+ drop(tx);
+
+ drx.recv().unwrap();
+}
+
+#[test]
+fn stress_shared() {
+ const AMT: u32 = 1000;
+ const NTHREADS: u32 = 8;
+ let (tx, rx) = sync_channel::<i32>(0);
+ let (dtx, drx) = sync_channel::<()>(0);
+
+ thread::spawn(move || {
+ for _ in 0..AMT * NTHREADS {
+ assert_eq!(rx.recv().unwrap(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => panic!(),
+ _ => {}
+ }
+ dtx.send(()).unwrap();
+ });
+
+ for _ in 0..NTHREADS {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ for _ in 0..AMT {
+ tx.send(1).unwrap();
+ }
+ });
+ }
+ drop(tx);
+ drx.recv().unwrap();
+}
+
+#[test]
+fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = sync_channel::<i32>(0);
+ drop(rx);
+}
+
+#[test]
+fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = sync_channel::<i32>(0);
+ drop(tx);
+}
+
+#[test]
+fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = sync_channel::<Box<i32>>(0);
+ drop(rx);
+ assert!(tx.send(Box::new(0)).is_err());
+}
+
+#[test]
+fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will panic
+ let res = thread::spawn(move || {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(tx);
+ rx.recv().unwrap();
+ })
+ .join();
+ // What is our res?
+ assert!(res.is_err());
+}
+
+#[test]
+fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = sync_channel::<Box<i32>>(1);
+ tx.send(Box::new(10)).unwrap();
+ assert!(*rx.recv().unwrap() == 10);
+}
+
+#[test]
+fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ assert_eq!(tx.try_send(10), Ok(()));
+ assert!(rx.recv().unwrap() == 10);
+}
+
+#[test]
+fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(rx);
+ assert_eq!(tx.try_send(10), Err(TrySendError::Disconnected(10)));
+}
+
+#[test]
+fn oneshot_single_thread_try_send_closed2() {
+ let (tx, _rx) = sync_channel::<i32>(0);
+ assert_eq!(tx.try_send(10), Err(TrySendError::Full(10)));
+}
+
+#[test]
+fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ tx.send(10).unwrap();
+ assert!(rx.recv() == Ok(10));
+}
+
+#[test]
+fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(tx);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn oneshot_single_thread_try_recv_closed_with_data() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ tx.send(10).unwrap();
+ drop(tx);
+ assert_eq!(rx.try_recv(), Ok(10));
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+#[test]
+fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
+ tx.send(10).unwrap();
+ assert_eq!(rx.try_recv(), Ok(10));
+}
+
+#[test]
+fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+#[test]
+fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = sync_channel::<i32>(0);
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
+}
+
+#[test]
+fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = sync_channel::<Box<i32>>(0);
+ let _t = thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == 10);
+ });
+
+ tx.send(Box::new(10)).unwrap();
+}
+
+#[test]
+fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = sync_channel::<Box<i32>>(0);
+ let _t = thread::spawn(move || {
+ drop(tx);
+ });
+ let res = thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == 10);
+ })
+ .join();
+ assert!(res.is_err());
+}
+
+#[test]
+fn oneshot_multi_thread_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ drop(tx);
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_send_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ let _ = thread::spawn(move || {
+ tx.send(1).unwrap();
+ })
+ .join();
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_recv_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ let res = thread::spawn(move || {
+ rx.recv().unwrap();
+ })
+ .join();
+ assert!(res.is_err());
+ });
+ let _t = thread::spawn(move || {
+ thread::spawn(move || {
+ drop(tx);
+ });
+ });
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_send_recv_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = sync_channel::<Box<i32>>(0);
+ let _t = thread::spawn(move || {
+ tx.send(Box::new(10)).unwrap();
+ });
+ assert!(*rx.recv().unwrap() == 10);
+ }
+}
+
+#[test]
+fn stream_send_recv_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = sync_channel::<Box<i32>>(0);
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: SyncSender<Box<i32>>, i: i32) {
+ if i == 10 {
+ return;
+ }
+
+ thread::spawn(move || {
+ tx.send(Box::new(i)).unwrap();
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<i32>>, i: i32) {
+ if i == 10 {
+ return;
+ }
+
+ thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+}
+
+#[test]
+fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = sync_channel(10000);
+ for _ in 0..10000 {
+ tx.send(()).unwrap();
+ }
+ for _ in 0..10000 {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+fn shared_chan_stress() {
+ let (tx, rx) = sync_channel(0);
+ let total = stress_factor() + 100;
+ for _ in 0..total {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ });
+ }
+
+ for _ in 0..total {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+fn test_nested_recv_iter() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let (total_tx, total_rx) = sync_channel::<i32>(0);
+
+ let _t = thread::spawn(move || {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc).unwrap();
+ });
+
+ tx.send(3).unwrap();
+ tx.send(1).unwrap();
+ tx.send(2).unwrap();
+ drop(tx);
+ assert_eq!(total_rx.recv().unwrap(), 6);
+}
+
+#[test]
+fn test_recv_iter_break() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let (count_tx, count_rx) = sync_channel(0);
+
+ let _t = thread::spawn(move || {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count).unwrap();
+ });
+
+ tx.send(2).unwrap();
+ tx.send(2).unwrap();
+ tx.send(2).unwrap();
+ let _ = tx.try_send(2);
+ drop(tx);
+ assert_eq!(count_rx.recv().unwrap(), 4);
+}
+
+#[test]
+fn try_recv_states() {
+ let (tx1, rx1) = sync_channel::<i32>(1);
+ let (tx2, rx2) = sync_channel::<()>(1);
+ let (tx3, rx3) = sync_channel::<()>(1);
+ let _t = thread::spawn(move || {
+ rx2.recv().unwrap();
+ tx1.send(1).unwrap();
+ tx3.send(()).unwrap();
+ rx2.recv().unwrap();
+ drop(tx1);
+ tx3.send(()).unwrap();
+ });
+
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty));
+ tx2.send(()).unwrap();
+ rx3.recv().unwrap();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty));
+ tx2.send(()).unwrap();
+ rx3.recv().unwrap();
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+// This bug used to end up in a livelock inside of the Receiver destructor
+// because the internal state of the Shared packet was corrupted
+#[test]
+fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = sync_channel::<()>(0);
+ let (tx2, rx2) = sync_channel::<()>(0);
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(()).unwrap();
+ });
+ // make sure the other thread has gone to sleep
+ for _ in 0..5000 {
+ thread::yield_now();
+ }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(()).unwrap();
+
+ // wait for the child thread to exit before we exit
+ rx2.recv().unwrap();
+}
+
+#[test]
+fn send1() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap();
+ });
+ assert_eq!(tx.send(1), Ok(()));
+}
+
+#[test]
+fn send2() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ assert!(tx.send(1).is_err());
+}
+
+#[test]
+fn send3() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ assert_eq!(tx.send(1), Ok(()));
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ assert!(tx.send(1).is_err());
+}
+
+#[test]
+fn send4() {
+ let (tx, rx) = sync_channel::<i32>(0);
+ let tx2 = tx.clone();
+ let (done, donerx) = channel();
+ let done2 = done.clone();
+ let _t = thread::spawn(move || {
+ assert!(tx.send(1).is_err());
+ done.send(()).unwrap();
+ });
+ let _t = thread::spawn(move || {
+ assert!(tx2.send(2).is_err());
+ done2.send(()).unwrap();
+ });
+ drop(rx);
+ donerx.recv().unwrap();
+ donerx.recv().unwrap();
+}
+
+#[test]
+fn try_send1() {
+ let (tx, _rx) = sync_channel::<i32>(0);
+ assert_eq!(tx.try_send(1), Err(TrySendError::Full(1)));
+}
+
+#[test]
+fn try_send2() {
+ let (tx, _rx) = sync_channel::<i32>(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ assert_eq!(tx.try_send(1), Err(TrySendError::Full(1)));
+}
+
+#[test]
+fn try_send3() {
+ let (tx, rx) = sync_channel::<i32>(1);
+ assert_eq!(tx.try_send(1), Ok(()));
+ drop(rx);
+ assert_eq!(tx.try_send(1), Err(TrySendError::Disconnected(1)));
+}
+
+#[test]
+fn issue_15761() {
+ fn repro() {
+ let (tx1, rx1) = sync_channel::<()>(3);
+ let (tx2, rx2) = sync_channel::<()>(3);
+
+ let _t = thread::spawn(move || {
+ rx1.recv().unwrap();
+ tx2.try_send(()).unwrap();
+ });
+
+ tx1.try_send(()).unwrap();
+ rx2.recv().unwrap();
+ }
+
+ for _ in 0..100 {
+ repro()
+ }
+}
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
new file mode 100644
index 000000000..4deb3e596
--- /dev/null
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -0,0 +1,706 @@
+use super::*;
+use crate::env;
+use crate::thread;
+use crate::time::{Duration, Instant};
+
+pub fn stress_factor() -> usize {
+ match env::var("RUST_TEST_STRESS") {
+ Ok(val) => val.parse().unwrap(),
+ Err(..) => 1,
+ }
+}
+
+#[test]
+fn smoke() {
+ let (tx, rx) = channel::<i32>();
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn drop_full() {
+ let (tx, _rx) = channel::<Box<isize>>();
+ tx.send(Box::new(1)).unwrap();
+}
+
+#[test]
+fn drop_full_shared() {
+ let (tx, _rx) = channel::<Box<isize>>();
+ drop(tx.clone());
+ drop(tx.clone());
+ tx.send(Box::new(1)).unwrap();
+}
+
+#[test]
+fn smoke_shared() {
+ let (tx, rx) = channel::<i32>();
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+ let tx = tx.clone();
+ tx.send(1).unwrap();
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn smoke_threads() {
+ let (tx, rx) = channel::<i32>();
+ let _t = thread::spawn(move || {
+ tx.send(1).unwrap();
+ });
+ assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn smoke_port_gone() {
+ let (tx, rx) = channel::<i32>();
+ drop(rx);
+ assert!(tx.send(1).is_err());
+}
+
+#[test]
+fn smoke_shared_port_gone() {
+ let (tx, rx) = channel::<i32>();
+ drop(rx);
+ assert!(tx.send(1).is_err())
+}
+
+#[test]
+fn smoke_shared_port_gone2() {
+ let (tx, rx) = channel::<i32>();
+ drop(rx);
+ let tx2 = tx.clone();
+ drop(tx);
+ assert!(tx2.send(1).is_err());
+}
+
+#[test]
+fn port_gone_concurrent() {
+ let (tx, rx) = channel::<i32>();
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap();
+ });
+ while tx.send(1).is_ok() {}
+}
+
+#[test]
+fn port_gone_concurrent_shared() {
+ let (tx, rx) = channel::<i32>();
+ let tx2 = tx.clone();
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap();
+ });
+ while tx.send(1).is_ok() && tx2.send(1).is_ok() {}
+}
+
+#[test]
+fn smoke_chan_gone() {
+ let (tx, rx) = channel::<i32>();
+ drop(tx);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn smoke_chan_gone_shared() {
+ let (tx, rx) = channel::<()>();
+ let tx2 = tx.clone();
+ drop(tx);
+ drop(tx2);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn chan_gone_concurrent() {
+ let (tx, rx) = channel::<i32>();
+ let _t = thread::spawn(move || {
+ tx.send(1).unwrap();
+ tx.send(1).unwrap();
+ });
+ while rx.recv().is_ok() {}
+}
+
+#[test]
+fn stress() {
+ let (tx, rx) = channel::<i32>();
+ let t = thread::spawn(move || {
+ for _ in 0..10000 {
+ tx.send(1).unwrap();
+ }
+ });
+ for _ in 0..10000 {
+ assert_eq!(rx.recv().unwrap(), 1);
+ }
+ t.join().ok().expect("thread panicked");
+}
+
+#[test]
+fn stress_shared() {
+ const AMT: u32 = 10000;
+ const NTHREADS: u32 = 8;
+ let (tx, rx) = channel::<i32>();
+
+ let t = thread::spawn(move || {
+ for _ in 0..AMT * NTHREADS {
+ assert_eq!(rx.recv().unwrap(), 1);
+ }
+ match rx.try_recv() {
+ Ok(..) => panic!(),
+ _ => {}
+ }
+ });
+
+ for _ in 0..NTHREADS {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ for _ in 0..AMT {
+ tx.send(1).unwrap();
+ }
+ });
+ }
+ drop(tx);
+ t.join().ok().expect("thread panicked");
+}
+
+#[test]
+fn send_from_outside_runtime() {
+ let (tx1, rx1) = channel::<()>();
+ let (tx2, rx2) = channel::<i32>();
+ let t1 = thread::spawn(move || {
+ tx1.send(()).unwrap();
+ for _ in 0..40 {
+ assert_eq!(rx2.recv().unwrap(), 1);
+ }
+ });
+ rx1.recv().unwrap();
+ let t2 = thread::spawn(move || {
+ for _ in 0..40 {
+ tx2.send(1).unwrap();
+ }
+ });
+ t1.join().ok().expect("thread panicked");
+ t2.join().ok().expect("thread panicked");
+}
+
+#[test]
+fn recv_from_outside_runtime() {
+ let (tx, rx) = channel::<i32>();
+ let t = thread::spawn(move || {
+ for _ in 0..40 {
+ assert_eq!(rx.recv().unwrap(), 1);
+ }
+ });
+ for _ in 0..40 {
+ tx.send(1).unwrap();
+ }
+ t.join().ok().expect("thread panicked");
+}
+
+#[test]
+fn no_runtime() {
+ let (tx1, rx1) = channel::<i32>();
+ let (tx2, rx2) = channel::<i32>();
+ let t1 = thread::spawn(move || {
+ assert_eq!(rx1.recv().unwrap(), 1);
+ tx2.send(2).unwrap();
+ });
+ let t2 = thread::spawn(move || {
+ tx1.send(1).unwrap();
+ assert_eq!(rx2.recv().unwrap(), 2);
+ });
+ t1.join().ok().expect("thread panicked");
+ t2.join().ok().expect("thread panicked");
+}
+
+#[test]
+fn oneshot_single_thread_close_port_first() {
+ // Simple test of closing without sending
+ let (_tx, rx) = channel::<i32>();
+ drop(rx);
+}
+
+#[test]
+fn oneshot_single_thread_close_chan_first() {
+ // Simple test of closing without sending
+ let (tx, _rx) = channel::<i32>();
+ drop(tx);
+}
+
+#[test]
+fn oneshot_single_thread_send_port_close() {
+ // Testing that the sender cleans up the payload if receiver is closed
+ let (tx, rx) = channel::<Box<i32>>();
+ drop(rx);
+ assert!(tx.send(Box::new(0)).is_err());
+}
+
+#[test]
+fn oneshot_single_thread_recv_chan_close() {
+ // Receiving on a closed chan will panic
+ let res = thread::spawn(move || {
+ let (tx, rx) = channel::<i32>();
+ drop(tx);
+ rx.recv().unwrap();
+ })
+ .join();
+ // What is our res?
+ assert!(res.is_err());
+}
+
+#[test]
+fn oneshot_single_thread_send_then_recv() {
+ let (tx, rx) = channel::<Box<i32>>();
+ tx.send(Box::new(10)).unwrap();
+ assert!(*rx.recv().unwrap() == 10);
+}
+
+#[test]
+fn oneshot_single_thread_try_send_open() {
+ let (tx, rx) = channel::<i32>();
+ assert!(tx.send(10).is_ok());
+ assert!(rx.recv().unwrap() == 10);
+}
+
+#[test]
+fn oneshot_single_thread_try_send_closed() {
+ let (tx, rx) = channel::<i32>();
+ drop(rx);
+ assert!(tx.send(10).is_err());
+}
+
+#[test]
+fn oneshot_single_thread_try_recv_open() {
+ let (tx, rx) = channel::<i32>();
+ tx.send(10).unwrap();
+ assert!(rx.recv() == Ok(10));
+}
+
+#[test]
+fn oneshot_single_thread_try_recv_closed() {
+ let (tx, rx) = channel::<i32>();
+ drop(tx);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn oneshot_single_thread_peek_data() {
+ let (tx, rx) = channel::<i32>();
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
+ tx.send(10).unwrap();
+ assert_eq!(rx.try_recv(), Ok(10));
+}
+
+#[test]
+fn oneshot_single_thread_peek_close() {
+ let (tx, rx) = channel::<i32>();
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+#[test]
+fn oneshot_single_thread_peek_open() {
+ let (_tx, rx) = channel::<i32>();
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
+}
+
+#[test]
+fn oneshot_multi_task_recv_then_send() {
+ let (tx, rx) = channel::<Box<i32>>();
+ let _t = thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == 10);
+ });
+
+ tx.send(Box::new(10)).unwrap();
+}
+
+#[test]
+fn oneshot_multi_task_recv_then_close() {
+ let (tx, rx) = channel::<Box<i32>>();
+ let _t = thread::spawn(move || {
+ drop(tx);
+ });
+ let res = thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == 10);
+ })
+ .join();
+ assert!(res.is_err());
+}
+
+#[test]
+fn oneshot_multi_thread_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = channel::<i32>();
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ drop(tx);
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_send_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = channel::<i32>();
+ let _t = thread::spawn(move || {
+ drop(rx);
+ });
+ let _ = thread::spawn(move || {
+ tx.send(1).unwrap();
+ })
+ .join();
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_recv_close_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = channel::<i32>();
+ thread::spawn(move || {
+ let res = thread::spawn(move || {
+ rx.recv().unwrap();
+ })
+ .join();
+ assert!(res.is_err());
+ });
+ let _t = thread::spawn(move || {
+ thread::spawn(move || {
+ drop(tx);
+ });
+ });
+ }
+}
+
+#[test]
+fn oneshot_multi_thread_send_recv_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = channel::<Box<isize>>();
+ let _t = thread::spawn(move || {
+ tx.send(Box::new(10)).unwrap();
+ });
+ assert!(*rx.recv().unwrap() == 10);
+ }
+}
+
+#[test]
+fn stream_send_recv_stress() {
+ for _ in 0..stress_factor() {
+ let (tx, rx) = channel();
+
+ send(tx, 0);
+ recv(rx, 0);
+
+ fn send(tx: Sender<Box<i32>>, i: i32) {
+ if i == 10 {
+ return;
+ }
+
+ thread::spawn(move || {
+ tx.send(Box::new(i)).unwrap();
+ send(tx, i + 1);
+ });
+ }
+
+ fn recv(rx: Receiver<Box<i32>>, i: i32) {
+ if i == 10 {
+ return;
+ }
+
+ thread::spawn(move || {
+ assert!(*rx.recv().unwrap() == i);
+ recv(rx, i + 1);
+ });
+ }
+ }
+}
+
+#[test]
+fn oneshot_single_thread_recv_timeout() {
+ let (tx, rx) = channel();
+ tx.send(()).unwrap();
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(()));
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout));
+ tx.send(()).unwrap();
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(()));
+}
+
+#[test]
+fn stress_recv_timeout_two_threads() {
+ let (tx, rx) = channel();
+ let stress = stress_factor() + 100;
+ let timeout = Duration::from_millis(100);
+
+ thread::spawn(move || {
+ for i in 0..stress {
+ if i % 2 == 0 {
+ thread::sleep(timeout * 2);
+ }
+ tx.send(1usize).unwrap();
+ }
+ });
+
+ let mut recv_count = 0;
+ loop {
+ match rx.recv_timeout(timeout) {
+ Ok(n) => {
+ assert_eq!(n, 1usize);
+ recv_count += 1;
+ }
+ Err(RecvTimeoutError::Timeout) => continue,
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+ }
+
+ assert_eq!(recv_count, stress);
+}
+
+#[test]
+fn recv_timeout_upgrade() {
+ let (tx, rx) = channel::<()>();
+ let timeout = Duration::from_millis(1);
+ let _tx_clone = tx.clone();
+
+ let start = Instant::now();
+ assert_eq!(rx.recv_timeout(timeout), Err(RecvTimeoutError::Timeout));
+ assert!(Instant::now() >= start + timeout);
+}
+
+#[test]
+fn stress_recv_timeout_shared() {
+ let (tx, rx) = channel();
+ let stress = stress_factor() + 100;
+
+ for i in 0..stress {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(i as u64 * 10));
+ tx.send(1usize).unwrap();
+ });
+ }
+
+ drop(tx);
+
+ let mut recv_count = 0;
+ loop {
+ match rx.recv_timeout(Duration::from_millis(10)) {
+ Ok(n) => {
+ assert_eq!(n, 1usize);
+ recv_count += 1;
+ }
+ Err(RecvTimeoutError::Timeout) => continue,
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+ }
+
+ assert_eq!(recv_count, stress);
+}
+
+#[test]
+fn very_long_recv_timeout_wont_panic() {
+ let (tx, rx) = channel::<()>();
+ let join_handle = thread::spawn(move || rx.recv_timeout(Duration::from_secs(u64::MAX)));
+ thread::sleep(Duration::from_secs(1));
+ assert!(tx.send(()).is_ok());
+ assert_eq!(join_handle.join().unwrap(), Ok(()));
+}
+
+#[test]
+fn recv_a_lot() {
+ // Regression test that we don't run out of stack in scheduler context
+ let (tx, rx) = channel();
+ for _ in 0..10000 {
+ tx.send(()).unwrap();
+ }
+ for _ in 0..10000 {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+fn shared_recv_timeout() {
+ let (tx, rx) = channel();
+ let total = 5;
+ for _ in 0..total {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ });
+ }
+
+ for _ in 0..total {
+ rx.recv().unwrap();
+ }
+
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Err(RecvTimeoutError::Timeout));
+ tx.send(()).unwrap();
+ assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(()));
+}
+
+#[test]
+fn shared_chan_stress() {
+ let (tx, rx) = channel();
+ let total = stress_factor() + 100;
+ for _ in 0..total {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ });
+ }
+
+ for _ in 0..total {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+fn test_nested_recv_iter() {
+ let (tx, rx) = channel::<i32>();
+ let (total_tx, total_rx) = channel::<i32>();
+
+ let _t = thread::spawn(move || {
+ let mut acc = 0;
+ for x in rx.iter() {
+ acc += x;
+ }
+ total_tx.send(acc).unwrap();
+ });
+
+ tx.send(3).unwrap();
+ tx.send(1).unwrap();
+ tx.send(2).unwrap();
+ drop(tx);
+ assert_eq!(total_rx.recv().unwrap(), 6);
+}
+
+#[test]
+fn test_recv_iter_break() {
+ let (tx, rx) = channel::<i32>();
+ let (count_tx, count_rx) = channel();
+
+ let _t = thread::spawn(move || {
+ let mut count = 0;
+ for x in rx.iter() {
+ if count >= 3 {
+ break;
+ } else {
+ count += x;
+ }
+ }
+ count_tx.send(count).unwrap();
+ });
+
+ tx.send(2).unwrap();
+ tx.send(2).unwrap();
+ tx.send(2).unwrap();
+ let _ = tx.send(2);
+ drop(tx);
+ assert_eq!(count_rx.recv().unwrap(), 4);
+}
+
+#[test]
+fn test_recv_try_iter() {
+ let (request_tx, request_rx) = channel();
+ let (response_tx, response_rx) = channel();
+
+ // Request `x`s until we have `6`.
+ let t = thread::spawn(move || {
+ let mut count = 0;
+ loop {
+ for x in response_rx.try_iter() {
+ count += x;
+ if count == 6 {
+ return count;
+ }
+ }
+ request_tx.send(()).unwrap();
+ }
+ });
+
+ for _ in request_rx.iter() {
+ if response_tx.send(2).is_err() {
+ break;
+ }
+ }
+
+ assert_eq!(t.join().unwrap(), 6);
+}
+
+#[test]
+fn test_recv_into_iter_owned() {
+ let mut iter = {
+ let (tx, rx) = channel::<i32>();
+ tx.send(1).unwrap();
+ tx.send(2).unwrap();
+
+ rx.into_iter()
+ };
+ assert_eq!(iter.next().unwrap(), 1);
+ assert_eq!(iter.next().unwrap(), 2);
+ assert_eq!(iter.next().is_none(), true);
+}
+
+#[test]
+fn test_recv_into_iter_borrowed() {
+ let (tx, rx) = channel::<i32>();
+ tx.send(1).unwrap();
+ tx.send(2).unwrap();
+ drop(tx);
+ let mut iter = (&rx).into_iter();
+ assert_eq!(iter.next().unwrap(), 1);
+ assert_eq!(iter.next().unwrap(), 2);
+ assert_eq!(iter.next().is_none(), true);
+}
+
+#[test]
+fn try_recv_states() {
+ let (tx1, rx1) = channel::<i32>();
+ let (tx2, rx2) = channel::<()>();
+ let (tx3, rx3) = channel::<()>();
+ let _t = thread::spawn(move || {
+ rx2.recv().unwrap();
+ tx1.send(1).unwrap();
+ tx3.send(()).unwrap();
+ rx2.recv().unwrap();
+ drop(tx1);
+ tx3.send(()).unwrap();
+ });
+
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty));
+ tx2.send(()).unwrap();
+ rx3.recv().unwrap();
+ assert_eq!(rx1.try_recv(), Ok(1));
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty));
+ tx2.send(()).unwrap();
+ rx3.recv().unwrap();
+ assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+// This bug used to end up in a livelock inside of the Receiver destructor
+// because the internal state of the Shared packet was corrupted
+#[test]
+fn destroy_upgraded_shared_port_when_sender_still_active() {
+ let (tx, rx) = channel();
+ let (tx2, rx2) = channel();
+ let _t = thread::spawn(move || {
+ rx.recv().unwrap(); // wait on a oneshot
+ drop(rx); // destroy a shared
+ tx2.send(()).unwrap();
+ });
+ // make sure the other thread has gone to sleep
+ for _ in 0..5000 {
+ thread::yield_now();
+ }
+
+ // upgrade to a shared chan and send a message
+ let t = tx.clone();
+ drop(tx);
+ t.send(()).unwrap();
+
+ // wait for the child thread to exit before we exit
+ rx2.recv().unwrap();
+}
+
+#[test]
+fn issue_32114() {
+ let (tx, _) = channel();
+ let _ = tx.send(123);
+ assert_eq!(tx.send(123), Err(SendError(123)));
+}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
new file mode 100644
index 000000000..e0d13cd64
--- /dev/null
+++ b/library/std/src/sync/mutex.rs
@@ -0,0 +1,553 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::ops::{Deref, DerefMut};
+use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
+use crate::sys_common::mutex as sys;
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can be created via a [`new`] constructor. Each mutex has a type parameter
+/// which represents the data that it is protecting. The data can only be accessed
+/// through the RAII guards returned from [`lock`] and [`try_lock`], which
+/// guarantees that the data is only ever accessed when the mutex is locked.
+///
+/// # Poisoning
+///
+/// The mutexes in this module implement a strategy called "poisoning" where a
+/// mutex is considered poisoned whenever a thread panics while holding the
+/// mutex. Once a mutex is poisoned, all other threads are unable to access the
+/// data by default as it is likely tainted (some invariant is not being
+/// upheld).
+///
+/// For a mutex, this means that the [`lock`] and [`try_lock`] methods return a
+/// [`Result`] which indicates whether a mutex has been poisoned or not. Most
+/// usage of a mutex will simply [`unwrap()`] these results, propagating panics
+/// among threads to ensure that a possibly invalid invariant is not witnessed.
+///
+/// A poisoned mutex, however, does not prevent all access to the underlying
+/// data. The [`PoisonError`] type has an [`into_inner`] method which will return
+/// the guard that would have otherwise been returned on a successful lock. This
+/// allows access to the data, despite the lock being poisoned.
+///
+/// [`new`]: Self::new
+/// [`lock`]: Self::lock
+/// [`try_lock`]: Self::try_lock
+/// [`unwrap()`]: Result::unwrap
+/// [`PoisonError`]: super::PoisonError
+/// [`into_inner`]: super::PoisonError::into_inner
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::{Arc, Mutex};
+/// use std::thread;
+/// use std::sync::mpsc::channel;
+///
+/// const N: usize = 10;
+///
+/// // Spawn a few threads to increment a shared variable (non-atomically), and
+/// // let the main thread know once all increments are done.
+/// //
+/// // Here we're using an Arc to share memory among threads, and the data inside
+/// // the Arc is protected with a mutex.
+/// let data = Arc::new(Mutex::new(0));
+///
+/// let (tx, rx) = channel();
+/// for _ in 0..N {
+/// let (data, tx) = (Arc::clone(&data), tx.clone());
+/// thread::spawn(move || {
+/// // The shared state can only be accessed once the lock is held.
+/// // Our non-atomic increment is safe because we're the only thread
+/// // which can access the shared state when the lock is held.
+/// //
+/// // We unwrap() the return value to assert that we are not expecting
+/// // threads to ever fail while holding the lock.
+/// let mut data = data.lock().unwrap();
+/// *data += 1;
+/// if *data == N {
+/// tx.send(()).unwrap();
+/// }
+/// // the lock is unlocked here when `data` goes out of scope.
+/// });
+/// }
+///
+/// rx.recv().unwrap();
+/// ```
+///
+/// To recover from a poisoned mutex:
+///
+/// ```
+/// use std::sync::{Arc, Mutex};
+/// use std::thread;
+///
+/// let lock = Arc::new(Mutex::new(0_u32));
+/// let lock2 = Arc::clone(&lock);
+///
+/// let _ = thread::spawn(move || -> () {
+/// // This thread will acquire the mutex first, unwrapping the result of
+/// // `lock` because the lock has not been poisoned.
+/// let _guard = lock2.lock().unwrap();
+///
+/// // This panic while holding the lock (`_guard` is in scope) will poison
+/// // the mutex.
+/// panic!();
+/// }).join();
+///
+/// // The lock is poisoned by this point, but the returned result can be
+/// // pattern matched on to return the underlying guard on both branches.
+/// let mut guard = match lock.lock() {
+/// Ok(guard) => guard,
+/// Err(poisoned) => poisoned.into_inner(),
+/// };
+///
+/// *guard += 1;
+/// ```
+///
+/// It is sometimes necessary to manually drop the mutex guard to unlock it
+/// sooner than the end of the enclosing scope.
+///
+/// ```
+/// use std::sync::{Arc, Mutex};
+/// use std::thread;
+///
+/// const N: usize = 3;
+///
+/// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
+/// let res_mutex = Arc::new(Mutex::new(0));
+///
+/// let mut threads = Vec::with_capacity(N);
+/// (0..N).for_each(|_| {
+/// let data_mutex_clone = Arc::clone(&data_mutex);
+/// let res_mutex_clone = Arc::clone(&res_mutex);
+///
+/// threads.push(thread::spawn(move || {
+/// let mut data = data_mutex_clone.lock().unwrap();
+/// // This is the result of some important and long-ish work.
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+/// drop(data);
+/// *res_mutex_clone.lock().unwrap() += result;
+/// }));
+/// });
+///
+/// let mut data = data_mutex.lock().unwrap();
+/// // This is the result of some important and long-ish work.
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+/// // We drop the `data` explicitly because it's not necessary anymore and the
+/// // thread still has work to do. This allow other threads to start working on
+/// // the data immediately, without waiting for the rest of the unrelated work
+/// // to be done here.
+/// //
+/// // It's even more important here than in the threads because we `.join` the
+/// // threads after that. If we had not dropped the mutex guard, a thread could
+/// // be waiting forever for it, causing a deadlock.
+/// drop(data);
+/// // Here the mutex guard is not assigned to a variable and so, even if the
+/// // scope does not end after this line, the mutex is still released: there is
+/// // no deadlock.
+/// *res_mutex.lock().unwrap() += result;
+///
+/// threads.into_iter().for_each(|thread| {
+/// thread
+/// .join()
+/// .expect("The thread creating or execution failed !")
+/// });
+///
+/// assert_eq!(*res_mutex.lock().unwrap(), 800);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
+pub struct Mutex<T: ?Sized> {
+ inner: sys::MovableMutex,
+ poison: poison::Flag,
+ data: UnsafeCell<T>,
+}
+
+// these are the only places where `T: Send` matters; all other
+// functionality works fine on a single thread.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// [`Deref`] and [`DerefMut`] implementations.
+///
+/// This structure is created by the [`lock`] and [`try_lock`] methods on
+/// [`Mutex`].
+///
+/// [`lock`]: Mutex::lock
+/// [`try_lock`]: Mutex::try_lock
+#[must_use = "if unused the Mutex will immediately unlock"]
+#[must_not_suspend = "holding a MutexGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Futures to not implement `Send`"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[clippy::has_significant_drop]
+pub struct MutexGuard<'a, T: ?Sized + 'a> {
+ lock: &'a Mutex<T>,
+ poison: poison::Guard,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
+#[stable(feature = "mutexguard", since = "1.19.0")]
+unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
+
+impl<T> Mutex<T> {
+ /// Creates a new mutex in an unlocked state ready for use.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Mutex;
+ ///
+ /// let mutex = Mutex::new(0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ #[inline]
+ pub const fn new(t: T) -> Mutex<T> {
+ Mutex {
+ inner: sys::MovableMutex::new(),
+ poison: poison::Flag::new(),
+ data: UnsafeCell::new(t),
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Acquires a mutex, blocking the current thread until it is able to do so.
+ ///
+ /// This function will block the local thread until it is available to acquire
+ /// the mutex. Upon returning, the thread is the only thread with the lock
+ /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+ /// the guard goes out of scope, the mutex will be unlocked.
+ ///
+ /// The exact behavior on locking a mutex in the thread which already holds
+ /// the lock is left unspecified. However, this function will not return on
+ /// the second call (it might panic or deadlock, for example).
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return an error once the mutex is acquired.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic when called if the lock is already held by
+ /// the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ ///
+ /// let mutex = Arc::new(Mutex::new(0));
+ /// let c_mutex = Arc::clone(&mutex);
+ ///
+ /// thread::spawn(move || {
+ /// *c_mutex.lock().unwrap() = 10;
+ /// }).join().expect("thread::spawn failed");
+ /// assert_eq!(*mutex.lock().unwrap(), 10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
+ unsafe {
+ self.inner.raw_lock();
+ MutexGuard::new(self)
+ }
+ }
+
+ /// Attempts to acquire this lock.
+ ///
+ /// If the lock could not be acquired at this time, then [`Err`] is returned.
+ /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+ /// guard is dropped.
+ ///
+ /// This function does not block.
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return the [`Poisoned`] error if the mutex would
+ /// otherwise be acquired.
+ ///
+ /// If the mutex could not be acquired because it is already locked, then
+ /// this call will return the [`WouldBlock`] error.
+ ///
+ /// [`Poisoned`]: TryLockError::Poisoned
+ /// [`WouldBlock`]: TryLockError::WouldBlock
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ ///
+ /// let mutex = Arc::new(Mutex::new(0));
+ /// let c_mutex = Arc::clone(&mutex);
+ ///
+ /// thread::spawn(move || {
+ /// let mut lock = c_mutex.try_lock();
+ /// if let Ok(ref mut mutex) = lock {
+ /// **mutex = 10;
+ /// } else {
+ /// println!("try_lock failed");
+ /// }
+ /// }).join().expect("thread::spawn failed");
+ /// assert_eq!(*mutex.lock().unwrap(), 10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
+ unsafe {
+ if self.inner.try_lock() {
+ Ok(MutexGuard::new(self)?)
+ } else {
+ Err(TryLockError::WouldBlock)
+ }
+ }
+ }
+
+ /// Immediately drops the guard, and consequently unlocks the mutex.
+ ///
+ /// This function is equivalent to calling [`drop`] on the guard but is more self-documenting.
+ /// Alternately, the guard will be automatically dropped when it goes out of scope.
+ ///
+ /// ```
+ /// #![feature(mutex_unlock)]
+ ///
+ /// use std::sync::Mutex;
+ /// let mutex = Mutex::new(0);
+ ///
+ /// let mut guard = mutex.lock().unwrap();
+ /// *guard += 20;
+ /// Mutex::unlock(guard);
+ /// ```
+ #[unstable(feature = "mutex_unlock", issue = "81872")]
+ pub fn unlock(guard: MutexGuard<'_, T>) {
+ drop(guard);
+ }
+
+ /// Determines whether the mutex is poisoned.
+ ///
+ /// If another thread is active, the mutex can still become poisoned at any
+ /// time. You should not trust a `false` value for program correctness
+ /// without additional synchronization.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ ///
+ /// let mutex = Arc::new(Mutex::new(0));
+ /// let c_mutex = Arc::clone(&mutex);
+ ///
+ /// let _ = thread::spawn(move || {
+ /// let _lock = c_mutex.lock().unwrap();
+ /// panic!(); // the mutex gets poisoned
+ /// }).join();
+ /// assert_eq!(mutex.is_poisoned(), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn is_poisoned(&self) -> bool {
+ self.poison.get()
+ }
+
+ /// Clear the poisoned state from a mutex
+ ///
+ /// If the mutex is poisoned, it will remain poisoned until this function is called. This
+ /// allows recovering from a poisoned state and marking that it has recovered. For example, if
+ /// the value is overwritten by a known-good value, then the mutex can be marked as
+ /// un-poisoned. Or possibly, the value could be inspected to determine if it is in a
+ /// consistent state, and if so the poison is removed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(mutex_unpoison)]
+ ///
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ ///
+ /// let mutex = Arc::new(Mutex::new(0));
+ /// let c_mutex = Arc::clone(&mutex);
+ ///
+ /// let _ = thread::spawn(move || {
+ /// let _lock = c_mutex.lock().unwrap();
+ /// panic!(); // the mutex gets poisoned
+ /// }).join();
+ ///
+ /// assert_eq!(mutex.is_poisoned(), true);
+ /// let x = mutex.lock().unwrap_or_else(|mut e| {
+ /// **e.get_mut() = 1;
+ /// mutex.clear_poison();
+ /// e.into_inner()
+ /// });
+ /// assert_eq!(mutex.is_poisoned(), false);
+ /// assert_eq!(*x, 1);
+ /// ```
+ #[inline]
+ #[unstable(feature = "mutex_unpoison", issue = "96469")]
+ pub fn clear_poison(&self) {
+ self.poison.clear();
+ }
+
+ /// Consumes this mutex, returning the underlying data.
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return an error instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Mutex;
+ ///
+ /// let mutex = Mutex::new(0);
+ /// assert_eq!(mutex.into_inner().unwrap(), 0);
+ /// ```
+ #[stable(feature = "mutex_into_inner", since = "1.6.0")]
+ pub fn into_inner(self) -> LockResult<T>
+ where
+ T: Sized,
+ {
+ let data = self.data.into_inner();
+ poison::map_result(self.poison.borrow(), |()| data)
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no locks exist.
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return an error instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Mutex;
+ ///
+ /// let mut mutex = Mutex::new(0);
+ /// *mutex.get_mut().unwrap() = 10;
+ /// assert_eq!(*mutex.lock().unwrap(), 10);
+ /// ```
+ #[stable(feature = "mutex_get_mut", since = "1.6.0")]
+ pub fn get_mut(&mut self) -> LockResult<&mut T> {
+ let data = self.data.get_mut();
+ poison::map_result(self.poison.borrow(), |()| data)
+ }
+}
+
+#[stable(feature = "mutex_from", since = "1.24.0")]
+impl<T> From<T> for Mutex<T> {
+ /// Creates a new mutex in an unlocked state ready for use.
+ /// This is equivalent to [`Mutex::new`].
+ fn from(t: T) -> Self {
+ Mutex::new(t)
+ }
+}
+
+#[stable(feature = "mutex_default", since = "1.10.0")]
+impl<T: ?Sized + Default> Default for Mutex<T> {
+ /// Creates a `Mutex<T>`, with the `Default` value for T.
+ fn default() -> Mutex<T> {
+ Mutex::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_struct("Mutex");
+ match self.try_lock() {
+ Ok(guard) => {
+ d.field("data", &&*guard);
+ }
+ Err(TryLockError::Poisoned(err)) => {
+ d.field("data", &&**err.get_ref());
+ }
+ Err(TryLockError::WouldBlock) => {
+ struct LockedPlaceholder;
+ impl fmt::Debug for LockedPlaceholder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("<locked>")
+ }
+ }
+ d.field("data", &LockedPlaceholder);
+ }
+ }
+ d.field("poisoned", &self.poison.get());
+ d.finish_non_exhaustive()
+ }
+}
+
+impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
+ unsafe fn new(lock: &'mutex Mutex<T>) -> LockResult<MutexGuard<'mutex, T>> {
+ poison::map_result(lock.poison.guard(), |guard| MutexGuard { lock, poison: guard })
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for MutexGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Drop for MutexGuard<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ self.lock.poison.done(&self.poison);
+ self.lock.inner.raw_unlock();
+ }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::MovableMutex {
+ &guard.lock.inner
+}
+
+pub fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
+ &guard.lock.poison
+}
diff --git a/library/std/src/sync/mutex/tests.rs b/library/std/src/sync/mutex/tests.rs
new file mode 100644
index 000000000..93900566f
--- /dev/null
+++ b/library/std/src/sync/mutex/tests.rs
@@ -0,0 +1,238 @@
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sync::mpsc::channel;
+use crate::sync::{Arc, Condvar, Mutex};
+use crate::thread;
+
+struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+
+#[derive(Eq, PartialEq, Debug)]
+struct NonCopy(i32);
+
+#[test]
+fn smoke() {
+ let m = Mutex::new(());
+ drop(m.lock().unwrap());
+ drop(m.lock().unwrap());
+}
+
+#[test]
+fn lots_and_lots() {
+ const J: u32 = 1000;
+ const K: u32 = 3;
+
+ let m = Arc::new(Mutex::new(0));
+
+ fn inc(m: &Mutex<u32>) {
+ for _ in 0..J {
+ *m.lock().unwrap() += 1;
+ }
+ }
+
+ let (tx, rx) = channel();
+ for _ in 0..K {
+ let tx2 = tx.clone();
+ let m2 = m.clone();
+ thread::spawn(move || {
+ inc(&m2);
+ tx2.send(()).unwrap();
+ });
+ let tx2 = tx.clone();
+ let m2 = m.clone();
+ thread::spawn(move || {
+ inc(&m2);
+ tx2.send(()).unwrap();
+ });
+ }
+
+ drop(tx);
+ for _ in 0..2 * K {
+ rx.recv().unwrap();
+ }
+ assert_eq!(*m.lock().unwrap(), J * K * 2);
+}
+
+#[test]
+fn try_lock() {
+ let m = Mutex::new(());
+ *m.try_lock().unwrap() = ();
+}
+
+#[test]
+fn test_into_inner() {
+ let m = Mutex::new(NonCopy(10));
+ assert_eq!(m.into_inner().unwrap(), NonCopy(10));
+}
+
+#[test]
+fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = Mutex::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner().unwrap();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+}
+
+#[test]
+fn test_into_inner_poison() {
+ let m = Arc::new(Mutex::new(NonCopy(10)));
+ let m2 = m.clone();
+ let _ = thread::spawn(move || {
+ let _lock = m2.lock().unwrap();
+ panic!("test panic in inner thread to poison mutex");
+ })
+ .join();
+
+ assert!(m.is_poisoned());
+ match Arc::try_unwrap(m).unwrap().into_inner() {
+ Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
+ Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {x:?}"),
+ }
+}
+
+#[test]
+fn test_get_mut() {
+ let mut m = Mutex::new(NonCopy(10));
+ *m.get_mut().unwrap() = NonCopy(20);
+ assert_eq!(m.into_inner().unwrap(), NonCopy(20));
+}
+
+#[test]
+fn test_get_mut_poison() {
+ let m = Arc::new(Mutex::new(NonCopy(10)));
+ let m2 = m.clone();
+ let _ = thread::spawn(move || {
+ let _lock = m2.lock().unwrap();
+ panic!("test panic in inner thread to poison mutex");
+ })
+ .join();
+
+ assert!(m.is_poisoned());
+ match Arc::try_unwrap(m).unwrap().get_mut() {
+ Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
+ Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {x:?}"),
+ }
+}
+
+#[test]
+fn test_mutex_arc_condvar() {
+ let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
+ let packet2 = Packet(packet.0.clone());
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ // wait until parent gets in
+ rx.recv().unwrap();
+ let &(ref lock, ref cvar) = &*packet2.0;
+ let mut lock = lock.lock().unwrap();
+ *lock = true;
+ cvar.notify_one();
+ });
+
+ let &(ref lock, ref cvar) = &*packet.0;
+ let mut lock = lock.lock().unwrap();
+ tx.send(()).unwrap();
+ assert!(!*lock);
+ while !*lock {
+ lock = cvar.wait(lock).unwrap();
+ }
+}
+
+#[test]
+fn test_arc_condvar_poison() {
+ let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
+ let packet2 = Packet(packet.0.clone());
+ let (tx, rx) = channel();
+
+ let _t = thread::spawn(move || -> () {
+ rx.recv().unwrap();
+ let &(ref lock, ref cvar) = &*packet2.0;
+ let _g = lock.lock().unwrap();
+ cvar.notify_one();
+ // Parent should fail when it wakes up.
+ panic!();
+ });
+
+ let &(ref lock, ref cvar) = &*packet.0;
+ let mut lock = lock.lock().unwrap();
+ tx.send(()).unwrap();
+ while *lock == 1 {
+ match cvar.wait(lock) {
+ Ok(l) => {
+ lock = l;
+ assert_eq!(*lock, 1);
+ }
+ Err(..) => break,
+ }
+ }
+}
+
+#[test]
+fn test_mutex_arc_poison() {
+ let arc = Arc::new(Mutex::new(1));
+ assert!(!arc.is_poisoned());
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || {
+ let lock = arc2.lock().unwrap();
+ assert_eq!(*lock, 2);
+ })
+ .join();
+ assert!(arc.lock().is_err());
+ assert!(arc.is_poisoned());
+}
+
+#[test]
+fn test_mutex_arc_nested() {
+ // Tests nested mutexes and access
+ // to underlying data.
+ let arc = Arc::new(Mutex::new(1));
+ let arc2 = Arc::new(Mutex::new(arc));
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let lock = arc2.lock().unwrap();
+ let lock2 = lock.lock().unwrap();
+ assert_eq!(*lock2, 1);
+ tx.send(()).unwrap();
+ });
+ rx.recv().unwrap();
+}
+
+#[test]
+fn test_mutex_arc_access_in_unwind() {
+ let arc = Arc::new(Mutex::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc<Mutex<i32>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ *self.i.lock().unwrap() += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.lock().unwrap();
+ assert_eq!(*lock, 2);
+}
+
+#[test]
+fn test_mutex_unsized() {
+ let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
+ {
+ let b = &mut *mutex.lock().unwrap();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*mutex.lock().unwrap(), comp);
+}
diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs
new file mode 100644
index 000000000..a7feea588
--- /dev/null
+++ b/library/std/src/sync/once.rs
@@ -0,0 +1,580 @@
+//! A "once initialization" primitive
+//!
+//! This primitive is meant to be used to run one-time initialization. An
+//! example use case would be for initializing an FFI library.
+
+// A "once" is a relatively simple primitive, and it's also typically provided
+// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
+// primitives, however, tend to have surprising restrictions, such as the Unix
+// one doesn't allow an argument to be passed to the function.
+//
+// As a result, we end up implementing it ourselves in the standard library.
+// This also gives us the opportunity to optimize the implementation a bit which
+// should help the fast path on call sites. Consequently, let's explain how this
+// primitive works now!
+//
+// So to recap, the guarantees of a Once are that it will call the
+// initialization closure at most once, and it will never return until the one
+// that's running has finished running. This means that we need some form of
+// blocking here while the custom callback is running at the very least.
+// Additionally, we add on the restriction of **poisoning**. Whenever an
+// initialization closure panics, the Once enters a "poisoned" state which means
+// that all future calls will immediately panic as well.
+//
+// So to implement this, one might first reach for a `Mutex`, but those cannot
+// be put into a `static`. It also gets a lot harder with poisoning to figure
+// out when the mutex needs to be deallocated because it's not after the closure
+// finishes, but after the first successful closure finishes.
+//
+// All in all, this is instead implemented with atomics and lock-free
+// operations! Whee! Each `Once` has one word of atomic state, and this state is
+// CAS'd on to determine what to do. There are four possible state of a `Once`:
+//
+// * Incomplete - no initialization has run yet, and no thread is currently
+// using the Once.
+// * Poisoned - some thread has previously attempted to initialize the Once, but
+// it panicked, so the Once is now poisoned. There are no other
+// threads currently accessing this Once.
+// * Running - some thread is currently attempting to run initialization. It may
+// succeed, so all future threads need to wait for it to finish.
+// Note that this state is accompanied with a payload, described
+// below.
+// * Complete - initialization has completed and all future calls should finish
+// immediately.
+//
+// With 4 states we need 2 bits to encode this, and we use the remaining bits
+// in the word we have allocated as a queue of threads waiting for the thread
+// responsible for entering the RUNNING state. This queue is just a linked list
+// of Waiter nodes which is monotonically increasing in size. Each node is
+// allocated on the stack, and whenever the running closure finishes it will
+// consume the entire queue and notify all waiters they should try again.
+//
+// You'll find a few more details in the implementation, but that's the gist of
+// it!
+//
+// Atomic orderings:
+// When running `Once` we deal with multiple atomics:
+// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
+// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
+// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
+// - At the end of the `call_inner` function we have to make sure the result
+// of the `Once` is acquired. So every load which can be the only one to
+// load COMPLETED must have at least Acquire ordering, which means all
+// three of them.
+// - `WaiterQueue::Drop` is the only place that may store COMPLETED, and
+// must do so with Release ordering to make the result available.
+// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
+// needs to make the nodes available with Release ordering. The load in
+// its `compare_exchange` can be Relaxed because it only has to compare
+// the atomic, not to read other data.
+// - `WaiterQueue::Drop` must see the `Waiter` nodes, so it must load
+// `state_and_queue` with Acquire ordering.
+// - There is just one store where `state_and_queue` is used only as a
+// state flag, without having to synchronize data: switching the state
+// from INCOMPLETE to RUNNING in `call_inner`. This store can be Relaxed,
+// but the read has to be Acquire because of the requirements mentioned
+// above.
+// * `Waiter.signaled` is both used as a flag, and to protect a field with
+// interior mutability in `Waiter`. `Waiter.thread` is changed in
+// `WaiterQueue::Drop` which then sets `signaled` with Release ordering.
+// After `wait` loads `signaled` with Acquire and sees it is true, it needs to
+// see the changes to drop the `Waiter` struct correctly.
+// * There is one place where the two atomics `Once.state_and_queue` and
+// `Waiter.signaled` come together, and might be reordered by the compiler or
+// processor. Because both use Acquire ordering such a reordering is not
+// allowed, so no need for SeqCst.
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cell::Cell;
+use crate::fmt;
+use crate::marker;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::ptr;
+use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+use crate::thread::{self, Thread};
+
+type Masked = ();
+
+/// A synchronization primitive which can be used to run a one-time global
+/// initialization. Useful for one-time initialization for FFI or related
+/// functionality. This type can only be constructed with [`Once::new()`].
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::Once;
+///
+/// static START: Once = Once::new();
+///
+/// START.call_once(|| {
+/// // run initialization here
+/// });
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Once {
+ // `state_and_queue` is actually a pointer to a `Waiter` with extra state
+ // bits, so we add the `PhantomData` appropriately.
+ state_and_queue: AtomicPtr<Masked>,
+ _marker: marker::PhantomData<*const Waiter>,
+}
+
+// The `PhantomData` of a raw pointer removes these two auto traits, but we
+// enforce both below in the implementation so this should be safe to add.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Sync for Once {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Send for Once {}
+
+#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
+impl UnwindSafe for Once {}
+
+#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
+impl RefUnwindSafe for Once {}
+
+/// State yielded to [`Once::call_once_force()`]’s closure parameter. The state
+/// can be used to query the poison status of the [`Once`].
+#[stable(feature = "once_poison", since = "1.51.0")]
+#[derive(Debug)]
+pub struct OnceState {
+ poisoned: bool,
+ set_state_on_drop_to: Cell<*mut Masked>,
+}
+
+/// Initialization value for static [`Once`] values.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::{Once, ONCE_INIT};
+///
+/// static START: Once = ONCE_INIT;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(
+ since = "1.38.0",
+ note = "the `new` function is now preferred",
+ suggestion = "Once::new()"
+)]
+pub const ONCE_INIT: Once = Once::new();
+
+// Four states that a Once can be in, encoded into the lower bits of
+// `state_and_queue` in the Once structure.
+const INCOMPLETE: usize = 0x0;
+const POISONED: usize = 0x1;
+const RUNNING: usize = 0x2;
+const COMPLETE: usize = 0x3;
+
+// Mask to learn about the state. All other bits are the queue of waiters if
+// this is in the RUNNING state.
+const STATE_MASK: usize = 0x3;
+
+// Representation of a node in the linked list of waiters, used while in the
+// RUNNING state.
+// Note: `Waiter` can't hold a mutable pointer to the next thread, because then
+// `wait` would both hand out a mutable reference to its `Waiter` node, and keep
+// a shared reference to check `signaled`. Instead we hold shared references and
+// use interior mutability.
+#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
+struct Waiter {
+ thread: Cell<Option<Thread>>,
+ signaled: AtomicBool,
+ next: *const Waiter,
+}
+
+// Head of a linked list of waiters.
+// Every node is a struct on the stack of a waiting thread.
+// Will wake up the waiters when it gets dropped, i.e. also on panic.
+struct WaiterQueue<'a> {
+ state_and_queue: &'a AtomicPtr<Masked>,
+ set_state_on_drop_to: *mut Masked,
+}
+
+impl Once {
+ /// Creates a new `Once` value.
+ #[inline]
+ #[stable(feature = "once_new", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
+ #[must_use]
+ pub const fn new() -> Once {
+ Once {
+ state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)),
+ _marker: marker::PhantomData,
+ }
+ }
+
+ /// Performs an initialization routine once and only once. The given closure
+ /// will be executed if this is the first time `call_once` has been called,
+ /// and otherwise the routine will *not* be invoked.
+ ///
+ /// This method will block the calling thread if another initialization
+ /// routine is currently running.
+ ///
+ /// When this function returns, it is guaranteed that some initialization
+ /// has run and completed (it might not be the closure specified). It is also
+ /// guaranteed that any memory writes performed by the executed closure can
+ /// be reliably observed by other threads at this point (there is a
+ /// happens-before relation between the closure and code executing after the
+ /// return).
+ ///
+ /// If the given closure recursively invokes `call_once` on the same [`Once`]
+ /// instance the exact behavior is not specified, allowed outcomes are
+ /// a panic or a deadlock.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Once;
+ ///
+ /// static mut VAL: usize = 0;
+ /// static INIT: Once = Once::new();
+ ///
+ /// // Accessing a `static mut` is unsafe much of the time, but if we do so
+ /// // in a synchronized fashion (e.g., write once or read all) then we're
+ /// // good to go!
+ /// //
+ /// // This function will only call `expensive_computation` once, and will
+ /// // otherwise always return the value returned from the first invocation.
+ /// fn get_cached_val() -> usize {
+ /// unsafe {
+ /// INIT.call_once(|| {
+ /// VAL = expensive_computation();
+ /// });
+ /// VAL
+ /// }
+ /// }
+ ///
+ /// fn expensive_computation() -> usize {
+ /// // ...
+ /// # 2
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// The closure `f` will only be executed once if this is called
+ /// concurrently amongst many threads. If that closure panics, however, then
+ /// it will *poison* this [`Once`] instance, causing all future invocations of
+ /// `call_once` to also panic.
+ ///
+ /// This is similar to [poisoning with mutexes][poison].
+ ///
+ /// [poison]: struct.Mutex.html#poisoning
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ pub fn call_once<F>(&self, f: F)
+ where
+ F: FnOnce(),
+ {
+ // Fast path check
+ if self.is_completed() {
+ return;
+ }
+
+ let mut f = Some(f);
+ self.call_inner(false, &mut |_| f.take().unwrap()());
+ }
+
+ /// Performs the same function as [`call_once()`] except ignores poisoning.
+ ///
+ /// Unlike [`call_once()`], if this [`Once`] has been poisoned (i.e., a previous
+ /// call to [`call_once()`] or [`call_once_force()`] caused a panic), calling
+ /// [`call_once_force()`] will still invoke the closure `f` and will _not_
+ /// result in an immediate panic. If `f` panics, the [`Once`] will remain
+ /// in a poison state. If `f` does _not_ panic, the [`Once`] will no
+ /// longer be in a poison state and all future calls to [`call_once()`] or
+ /// [`call_once_force()`] will be no-ops.
+ ///
+ /// The closure `f` is yielded a [`OnceState`] structure which can be used
+ /// to query the poison status of the [`Once`].
+ ///
+ /// [`call_once()`]: Once::call_once
+ /// [`call_once_force()`]: Once::call_once_force
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Once;
+ /// use std::thread;
+ ///
+ /// static INIT: Once = Once::new();
+ ///
+ /// // poison the once
+ /// let handle = thread::spawn(|| {
+ /// INIT.call_once(|| panic!());
+ /// });
+ /// assert!(handle.join().is_err());
+ ///
+ /// // poisoning propagates
+ /// let handle = thread::spawn(|| {
+ /// INIT.call_once(|| {});
+ /// });
+ /// assert!(handle.join().is_err());
+ ///
+ /// // call_once_force will still run and reset the poisoned state
+ /// INIT.call_once_force(|state| {
+ /// assert!(state.is_poisoned());
+ /// });
+ ///
+ /// // once any success happens, we stop propagating the poison
+ /// INIT.call_once(|| {});
+ /// ```
+ #[stable(feature = "once_poison", since = "1.51.0")]
+ pub fn call_once_force<F>(&self, f: F)
+ where
+ F: FnOnce(&OnceState),
+ {
+ // Fast path check
+ if self.is_completed() {
+ return;
+ }
+
+ let mut f = Some(f);
+ self.call_inner(true, &mut |p| f.take().unwrap()(p));
+ }
+
+ /// Returns `true` if some [`call_once()`] call has completed
+ /// successfully. Specifically, `is_completed` will return false in
+ /// the following situations:
+ /// * [`call_once()`] was not called at all,
+ /// * [`call_once()`] was called, but has not yet completed,
+ /// * the [`Once`] instance is poisoned
+ ///
+ /// This function returning `false` does not mean that [`Once`] has not been
+ /// executed. For example, it may have been executed in the time between
+ /// when `is_completed` starts executing and when it returns, in which case
+ /// the `false` return value would be stale (but still permissible).
+ ///
+ /// [`call_once()`]: Once::call_once
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Once;
+ ///
+ /// static INIT: Once = Once::new();
+ ///
+ /// assert_eq!(INIT.is_completed(), false);
+ /// INIT.call_once(|| {
+ /// assert_eq!(INIT.is_completed(), false);
+ /// });
+ /// assert_eq!(INIT.is_completed(), true);
+ /// ```
+ ///
+ /// ```
+ /// use std::sync::Once;
+ /// use std::thread;
+ ///
+ /// static INIT: Once = Once::new();
+ ///
+ /// assert_eq!(INIT.is_completed(), false);
+ /// let handle = thread::spawn(|| {
+ /// INIT.call_once(|| panic!());
+ /// });
+ /// assert!(handle.join().is_err());
+ /// assert_eq!(INIT.is_completed(), false);
+ /// ```
+ #[stable(feature = "once_is_completed", since = "1.43.0")]
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ // An `Acquire` load is enough because that makes all the initialization
+ // operations visible to us, and, this being a fast path, weaker
+ // ordering helps with performance. This `Acquire` synchronizes with
+ // `Release` operations on the slow path.
+ self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
+ }
+
+ // This is a non-generic function to reduce the monomorphization cost of
+ // using `call_once` (this isn't exactly a trivial or small implementation).
+ //
+ // Additionally, this is tagged with `#[cold]` as it should indeed be cold
+ // and it helps let LLVM know that calls to this function should be off the
+ // fast path. Essentially, this should help generate more straight line code
+ // in LLVM.
+ //
+ // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
+ // currently no way to take an `FnOnce` and call it via virtual dispatch
+ // without some allocation overhead.
+ #[cold]
+ #[track_caller]
+ fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
+ let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ loop {
+ match state_and_queue.addr() {
+ COMPLETE => break,
+ POISONED if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ POISONED | INCOMPLETE => {
+ // Try to register this thread as the one RUNNING.
+ let exchange_result = self.state_and_queue.compare_exchange(
+ state_and_queue,
+ ptr::invalid_mut(RUNNING),
+ Ordering::Acquire,
+ Ordering::Acquire,
+ );
+ if let Err(old) = exchange_result {
+ state_and_queue = old;
+ continue;
+ }
+ // `waiter_queue` will manage other waiting threads, and
+ // wake them up on drop.
+ let mut waiter_queue = WaiterQueue {
+ state_and_queue: &self.state_and_queue,
+ set_state_on_drop_to: ptr::invalid_mut(POISONED),
+ };
+ // Run the initialization function, letting it know if we're
+ // poisoned or not.
+ let init_state = OnceState {
+ poisoned: state_and_queue.addr() == POISONED,
+ set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
+ };
+ init(&init_state);
+ waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
+ break;
+ }
+ _ => {
+ // All other values must be RUNNING with possibly a
+ // pointer to the waiter queue in the more significant bits.
+ assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
+ wait(&self.state_and_queue, state_and_queue);
+ state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ }
+ }
+ }
+ }
+}
+
+fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
+ // Note: the following code was carefully written to avoid creating a
+ // mutable reference to `node` that gets aliased.
+ loop {
+ // Don't queue this thread if the status is no longer running,
+ // otherwise we will not be woken up.
+ if current_state.addr() & STATE_MASK != RUNNING {
+ return;
+ }
+
+ // Create the node for our current thread.
+ let node = Waiter {
+ thread: Cell::new(Some(thread::current())),
+ signaled: AtomicBool::new(false),
+ next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
+ };
+ let me = &node as *const Waiter as *const Masked as *mut Masked;
+
+ // Try to slide in the node at the head of the linked list, making sure
+ // that another thread didn't just replace the head of the linked list.
+ let exchange_result = state_and_queue.compare_exchange(
+ current_state,
+ me.with_addr(me.addr() | RUNNING),
+ Ordering::Release,
+ Ordering::Relaxed,
+ );
+ if let Err(old) = exchange_result {
+ current_state = old;
+ continue;
+ }
+
+ // We have enqueued ourselves, now lets wait.
+ // It is important not to return before being signaled, otherwise we
+ // would drop our `Waiter` node and leave a hole in the linked list
+ // (and a dangling reference). Guard against spurious wakeups by
+ // reparking ourselves until we are signaled.
+ while !node.signaled.load(Ordering::Acquire) {
+ // If the managing thread happens to signal and unpark us before we
+ // can park ourselves, the result could be this thread never gets
+ // unparked. Luckily `park` comes with the guarantee that if it got
+ // an `unpark` just before on an unparked thread it does not park.
+ thread::park();
+ }
+ break;
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Once {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Once").finish_non_exhaustive()
+ }
+}
+
+impl Drop for WaiterQueue<'_> {
+ fn drop(&mut self) {
+ // Swap out our state with however we finished.
+ let state_and_queue =
+ self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
+
+ // We should only ever see an old state which was RUNNING.
+ assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
+
+ // Walk the entire linked list of waiters and wake them up (in lifo
+ // order, last to register is first to wake up).
+ unsafe {
+ // Right after setting `node.signaled = true` the other thread may
+ // free `node` if there happens to be has a spurious wakeup.
+ // So we have to take out the `thread` field and copy the pointer to
+ // `next` first.
+ let mut queue =
+ state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
+ while !queue.is_null() {
+ let next = (*queue).next;
+ let thread = (*queue).thread.take().unwrap();
+ (*queue).signaled.store(true, Ordering::Release);
+ // ^- FIXME (maybe): This is another case of issue #55005
+ // `store()` has a potentially dangling ref to `signaled`.
+ queue = next;
+ thread.unpark();
+ }
+ }
+ }
+}
+
+impl OnceState {
+ /// Returns `true` if the associated [`Once`] was poisoned prior to the
+ /// invocation of the closure passed to [`Once::call_once_force()`].
+ ///
+ /// # Examples
+ ///
+ /// A poisoned [`Once`]:
+ ///
+ /// ```
+ /// use std::sync::Once;
+ /// use std::thread;
+ ///
+ /// static INIT: Once = Once::new();
+ ///
+ /// // poison the once
+ /// let handle = thread::spawn(|| {
+ /// INIT.call_once(|| panic!());
+ /// });
+ /// assert!(handle.join().is_err());
+ ///
+ /// INIT.call_once_force(|state| {
+ /// assert!(state.is_poisoned());
+ /// });
+ /// ```
+ ///
+ /// An unpoisoned [`Once`]:
+ ///
+ /// ```
+ /// use std::sync::Once;
+ ///
+ /// static INIT: Once = Once::new();
+ ///
+ /// INIT.call_once_force(|state| {
+ /// assert!(!state.is_poisoned());
+ /// });
+ #[stable(feature = "once_poison", since = "1.51.0")]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ /// Poison the associated [`Once`] without explicitly panicking.
+ // NOTE: This is currently only exposed for the `lazy` module
+ pub(crate) fn poison(&self) {
+ self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
+ }
+}
diff --git a/library/std/src/sync/once/tests.rs b/library/std/src/sync/once/tests.rs
new file mode 100644
index 000000000..0c35597e1
--- /dev/null
+++ b/library/std/src/sync/once/tests.rs
@@ -0,0 +1,116 @@
+use super::Once;
+use crate::panic;
+use crate::sync::mpsc::channel;
+use crate::thread;
+
+#[test]
+fn smoke_once() {
+ static O: Once = Once::new();
+ let mut a = 0;
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+}
+
+#[test]
+fn stampede_once() {
+ static O: Once = Once::new();
+ static mut RUN: bool = false;
+
+ let (tx, rx) = channel();
+ for _ in 0..10 {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ for _ in 0..4 {
+ thread::yield_now()
+ }
+ unsafe {
+ O.call_once(|| {
+ assert!(!RUN);
+ RUN = true;
+ });
+ assert!(RUN);
+ }
+ tx.send(()).unwrap();
+ });
+ }
+
+ unsafe {
+ O.call_once(|| {
+ assert!(!RUN);
+ RUN = true;
+ });
+ assert!(RUN);
+ }
+
+ for _ in 0..10 {
+ rx.recv().unwrap();
+ }
+}
+
+#[test]
+fn poison_bad() {
+ static O: Once = Once::new();
+
+ // poison the once
+ let t = panic::catch_unwind(|| {
+ O.call_once(|| panic!());
+ });
+ assert!(t.is_err());
+
+ // poisoning propagates
+ let t = panic::catch_unwind(|| {
+ O.call_once(|| {});
+ });
+ assert!(t.is_err());
+
+ // we can subvert poisoning, however
+ let mut called = false;
+ O.call_once_force(|p| {
+ called = true;
+ assert!(p.is_poisoned())
+ });
+ assert!(called);
+
+ // once any success happens, we stop propagating the poison
+ O.call_once(|| {});
+}
+
+#[test]
+fn wait_for_force_to_finish() {
+ static O: Once = Once::new();
+
+ // poison the once
+ let t = panic::catch_unwind(|| {
+ O.call_once(|| panic!());
+ });
+ assert!(t.is_err());
+
+ // make sure someone's waiting inside the once via a force
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ let t1 = thread::spawn(move || {
+ O.call_once_force(|p| {
+ assert!(p.is_poisoned());
+ tx1.send(()).unwrap();
+ rx2.recv().unwrap();
+ });
+ });
+
+ rx1.recv().unwrap();
+
+ // put another waiter on the once
+ let t2 = thread::spawn(|| {
+ let mut called = false;
+ O.call_once(|| {
+ called = true;
+ });
+ assert!(!called);
+ });
+
+ tx2.send(()).unwrap();
+
+ assert!(t1.join().is_ok());
+ assert!(t2.join().is_ok());
+}
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
new file mode 100644
index 000000000..813516040
--- /dev/null
+++ b/library/std/src/sync/once_lock.rs
@@ -0,0 +1,496 @@
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::mem::MaybeUninit;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::pin::Pin;
+use crate::sync::Once;
+
+/// A synchronization primitive which can be written to only once.
+///
+/// This type is a thread-safe `OnceCell`.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::sync::OnceLock;
+///
+/// static CELL: OnceLock<String> = OnceLock::new();
+/// assert!(CELL.get().is_none());
+///
+/// std::thread::spawn(|| {
+/// let value: &String = CELL.get_or_init(|| {
+/// "Hello, World!".to_string()
+/// });
+/// assert_eq!(value, "Hello, World!");
+/// }).join().unwrap();
+///
+/// let value: Option<&String> = CELL.get();
+/// assert!(value.is_some());
+/// assert_eq!(value.unwrap().as_str(), "Hello, World!");
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct OnceLock<T> {
+ once: Once,
+ // Whether or not the value is initialized is tracked by `state_and_queue`.
+ value: UnsafeCell<MaybeUninit<T>>,
+ /// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
+ ///
+ /// ```compile_fail,E0597
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// struct A<'a>(&'a str);
+ ///
+ /// impl<'a> Drop for A<'a> {
+ /// fn drop(&mut self) {}
+ /// }
+ ///
+ /// let cell = OnceLock::new();
+ /// {
+ /// let s = String::new();
+ /// let _ = cell.set(A(&s));
+ /// }
+ /// ```
+ _marker: PhantomData<T>,
+}
+
+impl<T> OnceLock<T> {
+ /// Creates a new empty cell.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ #[must_use]
+ pub const fn new() -> OnceLock<T> {
+ OnceLock {
+ once: Once::new(),
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Gets the reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty, or being initialized. This
+ /// method never blocks.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get(&self) -> Option<&T> {
+ if self.is_initialized() {
+ // Safe b/c checked is_initialized
+ Some(unsafe { self.get_unchecked() })
+ } else {
+ None
+ }
+ }
+
+ /// Gets the mutable reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty. This method never blocks.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ if self.is_initialized() {
+ // Safe b/c checked is_initialized and we have a unique access
+ Some(unsafe { self.get_unchecked_mut() })
+ } else {
+ None
+ }
+ }
+
+ /// Sets the contents of this cell to `value`.
+ ///
+ /// May block if another thread is currently attempting to initialize the cell. The cell is
+ /// guaranteed to contain a value when set returns, though not necessarily the one provided.
+ ///
+ /// Returns `Ok(())` if the cell's value was set by this call.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// static CELL: OnceLock<i32> = OnceLock::new();
+ ///
+ /// fn main() {
+ /// assert!(CELL.get().is_none());
+ ///
+ /// std::thread::spawn(|| {
+ /// assert_eq!(CELL.set(92), Ok(()));
+ /// }).join().unwrap();
+ ///
+ /// assert_eq!(CELL.set(62), Err(62));
+ /// assert_eq!(CELL.get(), Some(&92));
+ /// }
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn set(&self, value: T) -> Result<(), T> {
+ let mut value = Some(value);
+ self.get_or_init(|| value.take().unwrap());
+ match value {
+ None => Ok(()),
+ Some(value) => Err(value),
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if the cell
+ /// was empty.
+ ///
+ /// Many threads may call `get_or_init` concurrently with different
+ /// initializing functions, but it is guaranteed that only one function
+ /// will be executed.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. The
+ /// exact outcome is unspecified. Current implementation deadlocks, but
+ /// this may be changed to a panic in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// let cell = OnceLock::new();
+ /// let value = cell.get_or_init(|| 92);
+ /// assert_eq!(value, &92);
+ /// let value = cell.get_or_init(|| unreachable!());
+ /// assert_eq!(value, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_init<F>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ match self.get_or_try_init(|| Ok::<T, !>(f())) {
+ Ok(val) => val,
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if
+ /// the cell was empty. If the cell was empty and `f` failed, an
+ /// error is returned.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and
+ /// the cell remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`.
+ /// The exact outcome is unspecified. Current implementation
+ /// deadlocks, but this may be changed to a panic in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// let cell = OnceLock::new();
+ /// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
+ /// assert!(cell.get().is_none());
+ /// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
+ /// Ok(92)
+ /// });
+ /// assert_eq!(value, Ok(&92));
+ /// assert_eq!(cell.get(), Some(&92))
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ // Fast path check
+ // NOTE: We need to perform an acquire on the state in this method
+ // in order to correctly synchronize `LazyLock::force`. This is
+ // currently done by calling `self.get()`, which in turn calls
+ // `self.is_initialized()`, which in turn performs the acquire.
+ if let Some(value) = self.get() {
+ return Ok(value);
+ }
+ self.initialize(f)?;
+
+ debug_assert!(self.is_initialized());
+
+ // SAFETY: The inner value has been initialized
+ Ok(unsafe { self.get_unchecked() })
+ }
+
+ /// Internal-only API that gets the contents of the cell, initializing it
+ /// in two steps with `f` and `g` if the cell was empty.
+ ///
+ /// `f` is called to construct the value, which is then moved into the cell
+ /// and given as a (pinned) mutable reference to `g` to finish
+ /// initialization.
+ ///
+ /// This allows `g` to inspect an manipulate the value after it has been
+ /// moved into its final place in the cell, but before the cell is
+ /// considered initialized.
+ ///
+ /// # Panics
+ ///
+ /// If `f` or `g` panics, the panic is propagated to the caller, and the
+ /// cell remains uninitialized.
+ ///
+ /// With the current implementation, if `g` panics, the value from `f` will
+ /// not be dropped. This should probably be fixed if this is ever used for
+ /// a type where this matters.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. The exact
+ /// outcome is unspecified. Current implementation deadlocks, but this may
+ /// be changed to a panic in the future.
+ pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
+ where
+ F: FnOnce() -> T,
+ G: FnOnce(Pin<&mut T>),
+ {
+ if let Some(value) = self.get_ref().get() {
+ // SAFETY: The inner value was already initialized, and will not be
+ // moved anymore.
+ return unsafe { Pin::new_unchecked(value) };
+ }
+
+ let slot = &self.value;
+
+ // Ignore poisoning from other threads
+ // If another thread panics, then we'll be able to run our closure
+ self.once.call_once_force(|_| {
+ let value = f();
+ // SAFETY: We use the Once (self.once) to guarantee unique access
+ // to the UnsafeCell (slot).
+ let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
+ // SAFETY: The value has been written to its final place in
+ // self.value. We do not to move it anymore, which we promise here
+ // with a Pin<&mut T>.
+ g(unsafe { Pin::new_unchecked(value) });
+ });
+
+ // SAFETY: The inner value has been initialized, and will not be moved
+ // anymore.
+ unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
+ }
+
+ /// Consumes the `OnceLock`, returning the wrapped value. Returns
+ /// `None` if the cell was empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// let cell: OnceLock<String> = OnceLock::new();
+ /// assert_eq!(cell.into_inner(), None);
+ ///
+ /// let cell = OnceLock::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.into_inner(), Some("hello".to_string()));
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn into_inner(mut self) -> Option<T> {
+ self.take()
+ }
+
+ /// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
+ ///
+ /// Has no effect and returns `None` if the `OnceLock` hasn't been initialized.
+ ///
+ /// Safety is guaranteed by requiring a mutable reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// let mut cell: OnceLock<String> = OnceLock::new();
+ /// assert_eq!(cell.take(), None);
+ ///
+ /// let mut cell = OnceLock::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.take(), Some("hello".to_string()));
+ /// assert_eq!(cell.get(), None);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn take(&mut self) -> Option<T> {
+ if self.is_initialized() {
+ self.once = Once::new();
+ // SAFETY: `self.value` is initialized and contains a valid `T`.
+ // `self.once` is reset, so `is_initialized()` will be false again
+ // which prevents the value from being read twice.
+ unsafe { Some((&mut *self.value.get()).assume_init_read()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn is_initialized(&self) -> bool {
+ self.once.is_completed()
+ }
+
+ #[cold]
+ fn initialize<F, E>(&self, f: F) -> Result<(), E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ let mut res: Result<(), E> = Ok(());
+ let slot = &self.value;
+
+ // Ignore poisoning from other threads
+ // If another thread panics, then we'll be able to run our closure
+ self.once.call_once_force(|p| {
+ match f() {
+ Ok(value) => {
+ unsafe { (&mut *slot.get()).write(value) };
+ }
+ Err(e) => {
+ res = Err(e);
+
+ // Treat the underlying `Once` as poisoned since we
+ // failed to initialize our value. Calls
+ p.poison();
+ }
+ }
+ });
+ res
+ }
+
+ /// # Safety
+ ///
+ /// The value must be initialized
+ unsafe fn get_unchecked(&self) -> &T {
+ debug_assert!(self.is_initialized());
+ (&*self.value.get()).assume_init_ref()
+ }
+
+ /// # Safety
+ ///
+ /// The value must be initialized
+ unsafe fn get_unchecked_mut(&mut self) -> &mut T {
+ debug_assert!(self.is_initialized());
+ (&mut *self.value.get()).assume_init_mut()
+ }
+}
+
+// Why do we need `T: Send`?
+// Thread A creates a `OnceLock` and shares it with
+// scoped thread B, which fills the cell, which is
+// then destroyed by A. That is, destructor observes
+// a sent value.
+#[unstable(feature = "once_cell", issue = "74465")]
+unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
+#[unstable(feature = "once_cell", issue = "74465")]
+unsafe impl<T: Send> Send for OnceLock<T> {}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {}
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for OnceLock<T> {
+ /// Creates a new empty cell.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// fn main() {
+ /// assert_eq!(OnceLock::<()>::new(), OnceLock::default());
+ /// }
+ /// ```
+ fn default() -> OnceLock<T> {
+ OnceLock::new()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.get() {
+ Some(v) => f.debug_tuple("Once").field(v).finish(),
+ None => f.write_str("Once(Uninit)"),
+ }
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Clone> Clone for OnceLock<T> {
+ fn clone(&self) -> OnceLock<T> {
+ let cell = Self::new();
+ if let Some(value) = self.get() {
+ match cell.set(value.clone()) {
+ Ok(()) => (),
+ Err(_) => unreachable!(),
+ }
+ }
+ cell
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T> From<T> for OnceLock<T> {
+ /// Create a new cell with its contents set to `value`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::sync::OnceLock;
+ ///
+ /// # fn main() -> Result<(), i32> {
+ /// let a = OnceLock::from(3);
+ /// let b = OnceLock::new();
+ /// b.set(3)?;
+ /// assert_eq!(a, b);
+ /// Ok(())
+ /// # }
+ /// ```
+ fn from(value: T) -> Self {
+ let cell = Self::new();
+ match cell.set(value) {
+ Ok(()) => cell,
+ Err(_) => unreachable!(),
+ }
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: PartialEq> PartialEq for OnceLock<T> {
+ fn eq(&self, other: &OnceLock<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Eq> Eq for OnceLock<T> {}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+unsafe impl<#[may_dangle] T> Drop for OnceLock<T> {
+ fn drop(&mut self) {
+ if self.is_initialized() {
+ // SAFETY: The cell is initialized and being dropped, so it can't
+ // be accessed again. We also don't touch the `T` other than
+ // dropping it, which validates our usage of #[may_dangle].
+ unsafe { (&mut *self.value.get()).assume_init_drop() };
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sync/once_lock/tests.rs b/library/std/src/sync/once_lock/tests.rs
new file mode 100644
index 000000000..46695225b
--- /dev/null
+++ b/library/std/src/sync/once_lock/tests.rs
@@ -0,0 +1,203 @@
+use crate::{
+ panic,
+ sync::OnceLock,
+ sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ mpsc::channel,
+ },
+ thread,
+};
+
+fn spawn_and_wait<R: Send + 'static>(f: impl FnOnce() -> R + Send + 'static) -> R {
+ thread::spawn(f).join().unwrap()
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn sync_once_cell() {
+ static ONCE_CELL: OnceLock<i32> = OnceLock::new();
+
+ assert!(ONCE_CELL.get().is_none());
+
+ spawn_and_wait(|| {
+ ONCE_CELL.get_or_init(|| 92);
+ assert_eq!(ONCE_CELL.get(), Some(&92));
+ });
+
+ ONCE_CELL.get_or_init(|| panic!("Kabom!"));
+ assert_eq!(ONCE_CELL.get(), Some(&92));
+}
+
+#[test]
+fn sync_once_cell_get_mut() {
+ let mut c = OnceLock::new();
+ assert!(c.get_mut().is_none());
+ c.set(90).unwrap();
+ *c.get_mut().unwrap() += 2;
+ assert_eq!(c.get_mut(), Some(&mut 92));
+}
+
+#[test]
+fn sync_once_cell_get_unchecked() {
+ let c = OnceLock::new();
+ c.set(92).unwrap();
+ unsafe {
+ assert_eq!(c.get_unchecked(), &92);
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn sync_once_cell_drop() {
+ static DROP_CNT: AtomicUsize = AtomicUsize::new(0);
+ struct Dropper;
+ impl Drop for Dropper {
+ fn drop(&mut self) {
+ DROP_CNT.fetch_add(1, SeqCst);
+ }
+ }
+
+ let x = OnceLock::new();
+ spawn_and_wait(move || {
+ x.get_or_init(|| Dropper);
+ assert_eq!(DROP_CNT.load(SeqCst), 0);
+ drop(x);
+ });
+
+ assert_eq!(DROP_CNT.load(SeqCst), 1);
+}
+
+#[test]
+fn sync_once_cell_drop_empty() {
+ let x = OnceLock::<String>::new();
+ drop(x);
+}
+
+#[test]
+fn clone() {
+ let s = OnceLock::new();
+ let c = s.clone();
+ assert!(c.get().is_none());
+
+ s.set("hello".to_string()).unwrap();
+ let c = s.clone();
+ assert_eq!(c.get().map(String::as_str), Some("hello"));
+}
+
+#[test]
+fn get_or_try_init() {
+ let cell: OnceLock<String> = OnceLock::new();
+ assert!(cell.get().is_none());
+
+ let res = panic::catch_unwind(|| cell.get_or_try_init(|| -> Result<_, ()> { panic!() }));
+ assert!(res.is_err());
+ assert!(!cell.is_initialized());
+ assert!(cell.get().is_none());
+
+ assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
+
+ assert_eq!(cell.get_or_try_init(|| Ok::<_, ()>("hello".to_string())), Ok(&"hello".to_string()));
+ assert_eq!(cell.get(), Some(&"hello".to_string()));
+}
+
+#[test]
+fn from_impl() {
+ assert_eq!(OnceLock::from("value").get(), Some(&"value"));
+ assert_ne!(OnceLock::from("foo").get(), Some(&"bar"));
+}
+
+#[test]
+fn partialeq_impl() {
+ assert!(OnceLock::from("value") == OnceLock::from("value"));
+ assert!(OnceLock::from("foo") != OnceLock::from("bar"));
+
+ assert!(OnceLock::<String>::new() == OnceLock::new());
+ assert!(OnceLock::<String>::new() != OnceLock::from("value".to_owned()));
+}
+
+#[test]
+fn into_inner() {
+ let cell: OnceLock<String> = OnceLock::new();
+ assert_eq!(cell.into_inner(), None);
+ let cell = OnceLock::new();
+ cell.set("hello".to_string()).unwrap();
+ assert_eq!(cell.into_inner(), Some("hello".to_string()));
+}
+
+#[test]
+fn is_sync_send() {
+ fn assert_traits<T: Send + Sync>() {}
+ assert_traits::<OnceLock<String>>();
+}
+
+#[test]
+fn eval_once_macro() {
+ macro_rules! eval_once {
+ (|| -> $ty:ty {
+ $($body:tt)*
+ }) => {{
+ static ONCE_CELL: OnceLock<$ty> = OnceLock::new();
+ fn init() -> $ty {
+ $($body)*
+ }
+ ONCE_CELL.get_or_init(init)
+ }};
+ }
+
+ let fib: &'static Vec<i32> = eval_once! {
+ || -> Vec<i32> {
+ let mut res = vec![1, 1];
+ for i in 0..10 {
+ let next = res[i] + res[i + 1];
+ res.push(next);
+ }
+ res
+ }
+ };
+ assert_eq!(fib[5], 8)
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn sync_once_cell_does_not_leak_partially_constructed_boxes() {
+ static ONCE_CELL: OnceLock<String> = OnceLock::new();
+
+ let n_readers = 10;
+ let n_writers = 3;
+ const MSG: &str = "Hello, World";
+
+ let (tx, rx) = channel();
+
+ for _ in 0..n_readers {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ loop {
+ if let Some(msg) = ONCE_CELL.get() {
+ tx.send(msg).unwrap();
+ break;
+ }
+ #[cfg(target_env = "sgx")]
+ crate::thread::yield_now();
+ }
+ });
+ }
+ for _ in 0..n_writers {
+ thread::spawn(move || {
+ let _ = ONCE_CELL.set(MSG.to_owned());
+ });
+ }
+
+ for _ in 0..n_readers {
+ let msg = rx.recv().unwrap();
+ assert_eq!(msg, MSG);
+ }
+}
+
+#[test]
+fn dropck() {
+ let cell = OnceLock::new();
+ {
+ let s = String::new();
+ cell.set(&s).unwrap();
+ }
+}
diff --git a/library/std/src/sync/poison.rs b/library/std/src/sync/poison.rs
new file mode 100644
index 000000000..741312d55
--- /dev/null
+++ b/library/std/src/sync/poison.rs
@@ -0,0 +1,272 @@
+use crate::error::Error;
+use crate::fmt;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::thread;
+
+pub struct Flag {
+ failed: AtomicBool,
+}
+
+// Note that the Ordering uses to access the `failed` field of `Flag` below is
+// always `Relaxed`, and that's because this isn't actually protecting any data,
+// it's just a flag whether we've panicked or not.
+//
+// The actual location that this matters is when a mutex is **locked** which is
+// where we have external synchronization ensuring that we see memory
+// reads/writes to this flag.
+//
+// As a result, if it matters, we should see the correct value for `failed` in
+// all cases.
+
+impl Flag {
+ #[inline]
+ pub const fn new() -> Flag {
+ Flag { failed: AtomicBool::new(false) }
+ }
+
+ /// Check the flag for an unguarded borrow, where we only care about existing poison.
+ #[inline]
+ pub fn borrow(&self) -> LockResult<()> {
+ if self.get() { Err(PoisonError::new(())) } else { Ok(()) }
+ }
+
+ /// Check the flag for a guarded borrow, where we may also set poison when `done`.
+ #[inline]
+ pub fn guard(&self) -> LockResult<Guard> {
+ let ret = Guard { panicking: thread::panicking() };
+ if self.get() { Err(PoisonError::new(ret)) } else { Ok(ret) }
+ }
+
+ #[inline]
+ pub fn done(&self, guard: &Guard) {
+ if !guard.panicking && thread::panicking() {
+ self.failed.store(true, Ordering::Relaxed);
+ }
+ }
+
+ #[inline]
+ pub fn get(&self) -> bool {
+ self.failed.load(Ordering::Relaxed)
+ }
+
+ #[inline]
+ pub fn clear(&self) {
+ self.failed.store(false, Ordering::Relaxed)
+ }
+}
+
+pub struct Guard {
+ panicking: bool,
+}
+
+/// A type of error which can be returned whenever a lock is acquired.
+///
+/// Both [`Mutex`]es and [`RwLock`]s are poisoned whenever a thread fails while the lock
+/// is held. The precise semantics for when a lock is poisoned is documented on
+/// each lock, but once a lock is poisoned then all future acquisitions will
+/// return this error.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::{Arc, Mutex};
+/// use std::thread;
+///
+/// let mutex = Arc::new(Mutex::new(1));
+///
+/// // poison the mutex
+/// let c_mutex = Arc::clone(&mutex);
+/// let _ = thread::spawn(move || {
+/// let mut data = c_mutex.lock().unwrap();
+/// *data = 2;
+/// panic!();
+/// }).join();
+///
+/// match mutex.lock() {
+/// Ok(_) => unreachable!(),
+/// Err(p_err) => {
+/// let data = p_err.get_ref();
+/// println!("recovered: {data}");
+/// }
+/// };
+/// ```
+/// [`Mutex`]: crate::sync::Mutex
+/// [`RwLock`]: crate::sync::RwLock
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct PoisonError<T> {
+ guard: T,
+}
+
+/// An enumeration of possible errors associated with a [`TryLockResult`] which
+/// can occur while trying to acquire a lock, from the [`try_lock`] method on a
+/// [`Mutex`] or the [`try_read`] and [`try_write`] methods on an [`RwLock`].
+///
+/// [`try_lock`]: crate::sync::Mutex::try_lock
+/// [`try_read`]: crate::sync::RwLock::try_read
+/// [`try_write`]: crate::sync::RwLock::try_write
+/// [`Mutex`]: crate::sync::Mutex
+/// [`RwLock`]: crate::sync::RwLock
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum TryLockError<T> {
+ /// The lock could not be acquired because another thread failed while holding
+ /// the lock.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Poisoned(#[stable(feature = "rust1", since = "1.0.0")] PoisonError<T>),
+ /// The lock could not be acquired at this time because the operation would
+ /// otherwise block.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ WouldBlock,
+}
+
+/// A type alias for the result of a lock method which can be poisoned.
+///
+/// The [`Ok`] variant of this result indicates that the primitive was not
+/// poisoned, and the `Guard` is contained within. The [`Err`] variant indicates
+/// that the primitive was poisoned. Note that the [`Err`] variant *also* carries
+/// the associated guard, and it can be acquired through the [`into_inner`]
+/// method.
+///
+/// [`into_inner`]: PoisonError::into_inner
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>;
+
+/// A type alias for the result of a nonblocking locking method.
+///
+/// For more information, see [`LockResult`]. A `TryLockResult` doesn't
+/// necessarily hold the associated guard in the [`Err`] type as the lock might not
+/// have been acquired for other reasons.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Debug for PoisonError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PoisonError").finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Display for PoisonError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "poisoned lock: another task failed inside".fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Error for PoisonError<T> {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "poisoned lock: another task failed inside"
+ }
+}
+
+impl<T> PoisonError<T> {
+ /// Creates a `PoisonError`.
+ ///
+ /// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock)
+ /// or [`RwLock::read`](crate::sync::RwLock::read).
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn new(guard: T) -> PoisonError<T> {
+ PoisonError { guard }
+ }
+
+ /// Consumes this error indicating that a lock is poisoned, returning the
+ /// underlying guard to allow access regardless.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ ///
+ /// let mutex = Arc::new(Mutex::new(HashSet::new()));
+ ///
+ /// // poison the mutex
+ /// let c_mutex = Arc::clone(&mutex);
+ /// let _ = thread::spawn(move || {
+ /// let mut data = c_mutex.lock().unwrap();
+ /// data.insert(10);
+ /// panic!();
+ /// }).join();
+ ///
+ /// let p_err = mutex.lock().unwrap_err();
+ /// let data = p_err.into_inner();
+ /// println!("recovered {} items", data.len());
+ /// ```
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn into_inner(self) -> T {
+ self.guard
+ }
+
+ /// Reaches into this error indicating that a lock is poisoned, returning a
+ /// reference to the underlying guard to allow access regardless.
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn get_ref(&self) -> &T {
+ &self.guard
+ }
+
+ /// Reaches into this error indicating that a lock is poisoned, returning a
+ /// mutable reference to the underlying guard to allow access regardless.
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.guard
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> From<PoisonError<T>> for TryLockError<T> {
+ fn from(err: PoisonError<T>) -> TryLockError<T> {
+ TryLockError::Poisoned(err)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Debug for TryLockError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f),
+ TryLockError::WouldBlock => "WouldBlock".fmt(f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> fmt::Display for TryLockError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ TryLockError::Poisoned(..) => "poisoned lock: another task failed inside",
+ TryLockError::WouldBlock => "try_lock failed because the operation would block",
+ }
+ .fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Error for TryLockError<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ match *self {
+ TryLockError::Poisoned(ref p) => p.description(),
+ TryLockError::WouldBlock => "try_lock failed because the operation would block",
+ }
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn Error> {
+ match *self {
+ TryLockError::Poisoned(ref p) => Some(p),
+ _ => None,
+ }
+ }
+}
+
+pub fn map_result<T, U, F>(result: LockResult<T>, f: F) -> LockResult<U>
+where
+ F: FnOnce(T) -> U,
+{
+ match result {
+ Ok(t) => Ok(f(t)),
+ Err(PoisonError { guard }) => Err(PoisonError::new(f(guard))),
+ }
+}
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
new file mode 100644
index 000000000..6e4a2cfc8
--- /dev/null
+++ b/library/std/src/sync/rwlock.rs
@@ -0,0 +1,615 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
+use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
+use crate::sys_common::rwlock as sys;
+
+/// A reader-writer lock
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// In comparison, a [`Mutex`] does not distinguish between readers or writers
+/// that acquire the lock, therefore blocking any threads waiting for the lock to
+/// become available. An `RwLock` will allow any number of readers to acquire the
+/// lock as long as a writer is not holding the lock.
+///
+/// The priority policy of the lock is dependent on the underlying operating
+/// system's implementation, and this type does not guarantee that any
+/// particular policy will be used. In particular, a writer which is waiting to
+/// acquire the lock in `write` might or might not block concurrent calls to
+/// `read`, e.g.:
+///
+/// <details><summary>Potential deadlock example</summary>
+///
+/// ```text
+/// // Thread 1 | // Thread 2
+/// let _rg = lock.read(); |
+/// | // will block
+/// | let _wg = lock.write();
+/// // may deadlock |
+/// let _rg = lock.read(); |
+/// ```
+/// </details>
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies [`Send`] to be shared across threads and
+/// [`Sync`] to allow concurrent access through readers. The RAII guards
+/// returned from the locking methods implement [`Deref`] (and [`DerefMut`]
+/// for the `write` methods) to allow access to the content of the lock.
+///
+/// # Poisoning
+///
+/// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however,
+/// that an `RwLock` may only be poisoned if a panic occurs while it is locked
+/// exclusively (write mode). If a panic occurs in any reader, then the lock
+/// will not be poisoned.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::RwLock;
+///
+/// let lock = RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+/// let r1 = lock.read().unwrap();
+/// let r2 = lock.read().unwrap();
+/// assert_eq!(*r1, 5);
+/// assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+/// let mut w = lock.write().unwrap();
+/// *w += 1;
+/// assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+///
+/// [`Mutex`]: super::Mutex
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RwLock<T: ?Sized> {
+ inner: sys::MovableRwLock,
+ poison: poison::Flag,
+ data: UnsafeCell<T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] and [`try_read`] methods on
+/// [`RwLock`].
+///
+/// [`read`]: RwLock::read
+/// [`try_read`]: RwLock::try_read
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockReadGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Futures to not implement `Send`"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[clippy::has_significant_drop]
+pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
+ // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
+ // `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
+ // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
+ // is preferable over `const* T` to allow for niche optimization.
+ data: NonNull<T>,
+ inner_lock: &'a sys::MovableRwLock,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {}
+
+#[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
+unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] and [`try_write`] methods
+/// on [`RwLock`].
+///
+/// [`write`]: RwLock::write
+/// [`try_write`]: RwLock::try_write
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockWriteGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Future's to not implement `Send`"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[clippy::has_significant_drop]
+pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
+ lock: &'a RwLock<T>,
+ poison: poison::Guard,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {}
+
+#[stable(feature = "rwlock_guard_sync", since = "1.23.0")]
+unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
+
+impl<T> RwLock<T> {
+ /// Creates a new instance of an `RwLock<T>` which is unlocked.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
+ #[inline]
+ pub const fn new(t: T) -> RwLock<T> {
+ RwLock {
+ inner: sys::MovableRwLock::new(),
+ poison: poison::Flag::new(),
+ data: UnsafeCell::new(t),
+ }
+ }
+}
+
+impl<T: ?Sized> RwLock<T> {
+ /// Locks this rwlock with shared read access, blocking the current thread
+ /// until it can be acquired.
+ ///
+ /// The calling thread will be blocked until there are no more writers which
+ /// hold the lock. There may be other readers currently inside the lock when
+ /// this method returns. This method does not provide any guarantees with
+ /// respect to the ordering of whether contentious readers or writers will
+ /// acquire the lock first.
+ ///
+ /// Returns an RAII guard which will release this thread's shared access
+ /// once it is dropped.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the RwLock is poisoned. An RwLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// The failure will occur immediately after the lock has been acquired.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic when called if the lock is already held by the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, RwLock};
+ /// use std::thread;
+ ///
+ /// let lock = Arc::new(RwLock::new(1));
+ /// let c_lock = Arc::clone(&lock);
+ ///
+ /// let n = lock.read().unwrap();
+ /// assert_eq!(*n, 1);
+ ///
+ /// thread::spawn(move || {
+ /// let r = c_lock.read();
+ /// assert!(r.is_ok());
+ /// }).join().unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
+ unsafe {
+ self.inner.read();
+ RwLockReadGuard::new(self)
+ }
+ }
+
+ /// Attempts to acquire this rwlock with shared read access.
+ ///
+ /// If the access could not be granted at this time, then `Err` is returned.
+ /// Otherwise, an RAII guard is returned which will release the shared access
+ /// when it is dropped.
+ ///
+ /// This function does not block.
+ ///
+ /// This function does not provide any guarantees with respect to the ordering
+ /// of whether contentious readers or writers will acquire the lock first.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the [`Poisoned`] error if the RwLock is poisoned.
+ /// An RwLock is poisoned whenever a writer panics while holding an exclusive
+ /// lock. `Poisoned` will only be returned if the lock would have otherwise been
+ /// acquired.
+ ///
+ /// This function will return the [`WouldBlock`] error if the RwLock could not
+ /// be acquired because it was already locked exclusively.
+ ///
+ /// [`Poisoned`]: TryLockError::Poisoned
+ /// [`WouldBlock`]: TryLockError::WouldBlock
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// match lock.try_read() {
+ /// Ok(n) => assert_eq!(*n, 1),
+ /// Err(_) => unreachable!(),
+ /// };
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
+ unsafe {
+ if self.inner.try_read() {
+ Ok(RwLockReadGuard::new(self)?)
+ } else {
+ Err(TryLockError::WouldBlock)
+ }
+ }
+ }
+
+ /// Locks this rwlock with exclusive write access, blocking the current
+ /// thread until it can be acquired.
+ ///
+ /// This function will not return while other writers or other readers
+ /// currently have access to the lock.
+ ///
+ /// Returns an RAII guard which will drop the write access of this rwlock
+ /// when dropped.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the RwLock is poisoned. An RwLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// An error will be returned when the lock is acquired.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic when called if the lock is already held by the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// let mut n = lock.write().unwrap();
+ /// *n = 2;
+ ///
+ /// assert!(lock.try_read().is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
+ unsafe {
+ self.inner.write();
+ RwLockWriteGuard::new(self)
+ }
+ }
+
+ /// Attempts to lock this rwlock with exclusive write access.
+ ///
+ /// If the lock could not be acquired at this time, then `Err` is returned.
+ /// Otherwise, an RAII guard is returned which will release the lock when
+ /// it is dropped.
+ ///
+ /// This function does not block.
+ ///
+ /// This function does not provide any guarantees with respect to the ordering
+ /// of whether contentious readers or writers will acquire the lock first.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the [`Poisoned`] error if the RwLock is
+ /// poisoned. An RwLock is poisoned whenever a writer panics while holding
+ /// an exclusive lock. `Poisoned` will only be returned if the lock would have
+ /// otherwise been acquired.
+ ///
+ /// This function will return the [`WouldBlock`] error if the RwLock could not
+ /// be acquired because it was already locked exclusively.
+ ///
+ /// [`Poisoned`]: TryLockError::Poisoned
+ /// [`WouldBlock`]: TryLockError::WouldBlock
+ ///
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// let n = lock.read().unwrap();
+ /// assert_eq!(*n, 1);
+ ///
+ /// assert!(lock.try_write().is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
+ unsafe {
+ if self.inner.try_write() {
+ Ok(RwLockWriteGuard::new(self)?)
+ } else {
+ Err(TryLockError::WouldBlock)
+ }
+ }
+ }
+
+ /// Determines whether the lock is poisoned.
+ ///
+ /// If another thread is active, the lock can still become poisoned at any
+ /// time. You should not trust a `false` value for program correctness
+ /// without additional synchronization.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, RwLock};
+ /// use std::thread;
+ ///
+ /// let lock = Arc::new(RwLock::new(0));
+ /// let c_lock = Arc::clone(&lock);
+ ///
+ /// let _ = thread::spawn(move || {
+ /// let _lock = c_lock.write().unwrap();
+ /// panic!(); // the lock gets poisoned
+ /// }).join();
+ /// assert_eq!(lock.is_poisoned(), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "sync_poison", since = "1.2.0")]
+ pub fn is_poisoned(&self) -> bool {
+ self.poison.get()
+ }
+
+ /// Clear the poisoned state from a lock
+ ///
+ /// If the lock is poisoned, it will remain poisoned until this function is called. This allows
+ /// recovering from a poisoned state and marking that it has recovered. For example, if the
+ /// value is overwritten by a known-good value, then the mutex can be marked as un-poisoned. Or
+ /// possibly, the value could be inspected to determine if it is in a consistent state, and if
+ /// so the poison is removed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(mutex_unpoison)]
+ ///
+ /// use std::sync::{Arc, RwLock};
+ /// use std::thread;
+ ///
+ /// let lock = Arc::new(RwLock::new(0));
+ /// let c_lock = Arc::clone(&lock);
+ ///
+ /// let _ = thread::spawn(move || {
+ /// let _lock = c_lock.write().unwrap();
+ /// panic!(); // the mutex gets poisoned
+ /// }).join();
+ ///
+ /// assert_eq!(lock.is_poisoned(), true);
+ /// let guard = lock.write().unwrap_or_else(|mut e| {
+ /// **e.get_mut() = 1;
+ /// lock.clear_poison();
+ /// e.into_inner()
+ /// });
+ /// assert_eq!(lock.is_poisoned(), false);
+ /// assert_eq!(*guard, 1);
+ /// ```
+ #[inline]
+ #[unstable(feature = "mutex_unpoison", issue = "96469")]
+ pub fn clear_poison(&self) {
+ self.poison.clear();
+ }
+
+ /// Consumes this `RwLock`, returning the underlying data.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the RwLock is poisoned. An RwLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(String::new());
+ /// {
+ /// let mut s = lock.write().unwrap();
+ /// *s = "modified".to_owned();
+ /// }
+ /// assert_eq!(lock.into_inner().unwrap(), "modified");
+ /// ```
+ #[stable(feature = "rwlock_into_inner", since = "1.6.0")]
+ pub fn into_inner(self) -> LockResult<T>
+ where
+ T: Sized,
+ {
+ let data = self.data.into_inner();
+ poison::map_result(self.poison.borrow(), |()| data)
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no locks exist.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the RwLock is poisoned. An RwLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::RwLock;
+ ///
+ /// let mut lock = RwLock::new(0);
+ /// *lock.get_mut().unwrap() = 10;
+ /// assert_eq!(*lock.read().unwrap(), 10);
+ /// ```
+ #[stable(feature = "rwlock_get_mut", since = "1.6.0")]
+ pub fn get_mut(&mut self) -> LockResult<&mut T> {
+ let data = self.data.get_mut();
+ poison::map_result(self.poison.borrow(), |()| data)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_struct("RwLock");
+ match self.try_read() {
+ Ok(guard) => {
+ d.field("data", &&*guard);
+ }
+ Err(TryLockError::Poisoned(err)) => {
+ d.field("data", &&**err.get_ref());
+ }
+ Err(TryLockError::WouldBlock) => {
+ struct LockedPlaceholder;
+ impl fmt::Debug for LockedPlaceholder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("<locked>")
+ }
+ }
+ d.field("data", &LockedPlaceholder);
+ }
+ }
+ d.field("poisoned", &self.poison.get());
+ d.finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "rw_lock_default", since = "1.10.0")]
+impl<T: Default> Default for RwLock<T> {
+ /// Creates a new `RwLock<T>`, with the `Default` value for T.
+ fn default() -> RwLock<T> {
+ RwLock::new(Default::default())
+ }
+}
+
+#[stable(feature = "rw_lock_from", since = "1.24.0")]
+impl<T> From<T> for RwLock<T> {
+ /// Creates a new instance of an `RwLock<T>` which is unlocked.
+ /// This is equivalent to [`RwLock::new`].
+ fn from(t: T) -> Self {
+ RwLock::new(t)
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+ /// Create a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
+ // SAFETY: if and only if `lock.inner.read()` (or `lock.inner.try_read()`) has been
+ // successfully called from the same thread before instantiating this object.
+ unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard {
+ data: NonNull::new_unchecked(lock.data.get()),
+ inner_lock: &lock.inner,
+ })
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+ /// Create a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
+ // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
+ // successfully called from the same thread before instantiating this object.
+ unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockGuard::new` were satisfied when created.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+ unsafe {
+ self.inner_lock.read_unlock();
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
+ fn drop(&mut self) {
+ self.lock.poison.done(&self.poison);
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe {
+ self.lock.inner.write_unlock();
+ }
+ }
+}
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
new file mode 100644
index 000000000..08255c985
--- /dev/null
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -0,0 +1,259 @@
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sync::mpsc::channel;
+use crate::sync::{Arc, RwLock, RwLockReadGuard, TryLockError};
+use crate::thread;
+use rand::{self, Rng};
+
+#[derive(Eq, PartialEq, Debug)]
+struct NonCopy(i32);
+
+#[test]
+fn smoke() {
+ let l = RwLock::new(());
+ drop(l.read().unwrap());
+ drop(l.write().unwrap());
+ drop((l.read().unwrap(), l.read().unwrap()));
+ drop(l.write().unwrap());
+}
+
+#[test]
+fn frob() {
+ const N: u32 = 10;
+ const M: usize = 1000;
+
+ let r = Arc::new(RwLock::new(()));
+
+ let (tx, rx) = channel::<()>();
+ for _ in 0..N {
+ let tx = tx.clone();
+ let r = r.clone();
+ thread::spawn(move || {
+ let mut rng = rand::thread_rng();
+ for _ in 0..M {
+ if rng.gen_bool(1.0 / (N as f64)) {
+ drop(r.write().unwrap());
+ } else {
+ drop(r.read().unwrap());
+ }
+ }
+ drop(tx);
+ });
+ }
+ drop(tx);
+ let _ = rx.recv();
+}
+
+#[test]
+fn test_rw_arc_poison_wr() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.write().unwrap();
+ panic!();
+ })
+ .join();
+ assert!(arc.read().is_err());
+}
+
+#[test]
+fn test_rw_arc_poison_ww() {
+ let arc = Arc::new(RwLock::new(1));
+ assert!(!arc.is_poisoned());
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.write().unwrap();
+ panic!();
+ })
+ .join();
+ assert!(arc.write().is_err());
+ assert!(arc.is_poisoned());
+}
+
+#[test]
+fn test_rw_arc_no_poison_rr() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.read().unwrap();
+ panic!();
+ })
+ .join();
+ let lock = arc.read().unwrap();
+ assert_eq!(*lock, 1);
+}
+#[test]
+fn test_rw_arc_no_poison_rw() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.read().unwrap();
+ panic!()
+ })
+ .join();
+ let lock = arc.write().unwrap();
+ assert_eq!(*lock, 1);
+}
+
+#[test]
+fn test_rw_arc() {
+ let arc = Arc::new(RwLock::new(0));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+
+ thread::spawn(move || {
+ let mut lock = arc2.write().unwrap();
+ for _ in 0..10 {
+ let tmp = *lock;
+ *lock = -1;
+ thread::yield_now();
+ *lock = tmp + 1;
+ }
+ tx.send(()).unwrap();
+ });
+
+ // Readers try to catch the writer in the act
+ let mut children = Vec::new();
+ for _ in 0..5 {
+ let arc3 = arc.clone();
+ children.push(thread::spawn(move || {
+ let lock = arc3.read().unwrap();
+ assert!(*lock >= 0);
+ }));
+ }
+
+ // Wait for children to pass their asserts
+ for r in children {
+ assert!(r.join().is_ok());
+ }
+
+ // Wait for writer to finish
+ rx.recv().unwrap();
+ let lock = arc.read().unwrap();
+ assert_eq!(*lock, 10);
+}
+
+#[test]
+fn test_rw_arc_access_in_unwind() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc<RwLock<isize>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ let mut lock = self.i.write().unwrap();
+ *lock += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.read().unwrap();
+ assert_eq!(*lock, 2);
+}
+
+#[test]
+fn test_rwlock_unsized() {
+ let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+ {
+ let b = &mut *rw.write().unwrap();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*rw.read().unwrap(), comp);
+}
+
+#[test]
+fn test_rwlock_try_write() {
+ let lock = RwLock::new(0isize);
+ let read_guard = lock.read().unwrap();
+
+ let write_result = lock.try_write();
+ match write_result {
+ Err(TryLockError::WouldBlock) => (),
+ Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
+ Err(_) => assert!(false, "unexpected error"),
+ }
+
+ drop(read_guard);
+}
+
+#[test]
+fn test_into_inner() {
+ let m = RwLock::new(NonCopy(10));
+ assert_eq!(m.into_inner().unwrap(), NonCopy(10));
+}
+
+#[test]
+fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = RwLock::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner().unwrap();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+}
+
+#[test]
+fn test_into_inner_poison() {
+ let m = Arc::new(RwLock::new(NonCopy(10)));
+ let m2 = m.clone();
+ let _ = thread::spawn(move || {
+ let _lock = m2.write().unwrap();
+ panic!("test panic in inner thread to poison RwLock");
+ })
+ .join();
+
+ assert!(m.is_poisoned());
+ match Arc::try_unwrap(m).unwrap().into_inner() {
+ Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
+ Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {x:?}"),
+ }
+}
+
+#[test]
+fn test_get_mut() {
+ let mut m = RwLock::new(NonCopy(10));
+ *m.get_mut().unwrap() = NonCopy(20);
+ assert_eq!(m.into_inner().unwrap(), NonCopy(20));
+}
+
+#[test]
+fn test_get_mut_poison() {
+ let m = Arc::new(RwLock::new(NonCopy(10)));
+ let m2 = m.clone();
+ let _ = thread::spawn(move || {
+ let _lock = m2.write().unwrap();
+ panic!("test panic in inner thread to poison RwLock");
+ })
+ .join();
+
+ assert!(m.is_poisoned());
+ match Arc::try_unwrap(m).unwrap().get_mut() {
+ Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
+ Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {x:?}"),
+ }
+}
+
+#[test]
+fn test_read_guard_covariance() {
+ fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+ let j: i32 = 5;
+ let lock = RwLock::new(&j);
+ {
+ let i = 6;
+ do_stuff(lock.read().unwrap(), &i);
+ }
+ drop(lock);
+}
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
new file mode 100644
index 000000000..e8e7c51cb
--- /dev/null
+++ b/library/std/src/sys/common/alloc.rs
@@ -0,0 +1,54 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::cmp;
+use crate::ptr;
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(
+ target_arch = "x86",
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "powerpc",
+ target_arch = "powerpc64",
+ target_arch = "sparc",
+ target_arch = "asmjs",
+ target_arch = "wasm32",
+ target_arch = "hexagon",
+ all(target_arch = "riscv32", not(target_os = "espidf")),
+ all(target_arch = "xtensa", not(target_os = "espidf")),
+)))]
+pub const MIN_ALIGN: usize = 8;
+#[cfg(all(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64",
+ target_arch = "riscv64",
+ target_arch = "wasm64",
+)))]
+pub const MIN_ALIGN: usize = 16;
+// The allocator on the esp-idf platform guarantees 4 byte alignment.
+#[cfg(all(any(
+ all(target_arch = "riscv32", target_os = "espidf"),
+ all(target_arch = "xtensa", target_os = "espidf"),
+)))]
+pub const MIN_ALIGN: usize = 4;
+
+pub unsafe fn realloc_fallback(
+ alloc: &System,
+ ptr: *mut u8,
+ old_layout: Layout,
+ new_size: usize,
+) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+
+ let new_ptr = GlobalAlloc::alloc(alloc, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(alloc, ptr, old_layout);
+ }
+ new_ptr
+}
diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs
new file mode 100644
index 000000000..ff64d2aa8
--- /dev/null
+++ b/library/std/src/sys/common/mod.rs
@@ -0,0 +1,13 @@
+// This module contains code that is shared between all platforms, mostly utility or fallback code.
+// This explicitly does not include code that is shared between only a few platforms,
+// such as when reusing an implementation from `unix` or `unsupported`.
+// In those cases the desired code should be included directly using the #[path] attribute,
+// not moved to this module.
+//
+// Currently `sys_common` contains a lot of code that should live in this module,
+// ideally `sys_common` would only contain platform-independent abstractions on top of `sys`.
+// Progress on this is tracked in #84187.
+
+#![allow(dead_code)]
+
+pub mod alloc;
diff --git a/library/std/src/sys/hermit/alloc.rs b/library/std/src/sys/hermit/alloc.rs
new file mode 100644
index 000000000..d153914e7
--- /dev/null
+++ b/library/std/src/sys/hermit/alloc.rs
@@ -0,0 +1,31 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr;
+use crate::sys::hermit::abi;
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ abi::malloc(layout.size(), layout.align())
+ }
+
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let addr = abi::malloc(layout.size(), layout.align());
+
+ if !addr.is_null() {
+ ptr::write_bytes(addr, 0x00, layout.size());
+ }
+
+ addr
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ abi::free(ptr, layout.size(), layout.align())
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ abi::realloc(ptr, layout.size(), layout.align(), new_size)
+ }
+}
diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs
new file mode 100644
index 000000000..1c7e1dd8d
--- /dev/null
+++ b/library/std/src/sys/hermit/args.rs
@@ -0,0 +1,94 @@
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::vec;
+
+/// One-time global initialization.
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ imp::init(argc, argv)
+}
+
+/// One-time global cleanup.
+pub unsafe fn cleanup() {
+ imp::cleanup()
+}
+
+/// Returns the command line arguments
+pub fn args() -> Args {
+ imp::args()
+}
+
+pub struct Args {
+ iter: vec::IntoIter<OsString>,
+}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.iter.as_slice().fmt(f)
+ }
+}
+
+impl !Send for Args {}
+impl !Sync for Args {}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.iter.next_back()
+ }
+}
+
+mod imp {
+ use super::Args;
+ use crate::ffi::{CStr, OsString};
+ use crate::os::unix::ffi::OsStringExt;
+ use crate::ptr;
+
+ use crate::sys_common::mutex::StaticMutex;
+
+ static mut ARGC: isize = 0;
+ static mut ARGV: *const *const u8 = ptr::null();
+ static LOCK: StaticMutex = StaticMutex::new();
+
+ pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ let _guard = LOCK.lock();
+ ARGC = argc;
+ ARGV = argv;
+ }
+
+ pub unsafe fn cleanup() {
+ let _guard = LOCK.lock();
+ ARGC = 0;
+ ARGV = ptr::null();
+ }
+
+ pub fn args() -> Args {
+ Args { iter: clone().into_iter() }
+ }
+
+ fn clone() -> Vec<OsString> {
+ unsafe {
+ let _guard = LOCK.lock();
+ (0..ARGC)
+ .map(|i| {
+ let cstr = CStr::from_ptr(*ARGV.offset(i) as *const i8);
+ OsStringExt::from_vec(cstr.to_bytes().to_vec())
+ })
+ .collect()
+ }
+ }
+}
diff --git a/library/std/src/sys/hermit/condvar.rs b/library/std/src/sys/hermit/condvar.rs
new file mode 100644
index 000000000..22059ca0d
--- /dev/null
+++ b/library/std/src/sys/hermit/condvar.rs
@@ -0,0 +1,90 @@
+use crate::ffi::c_void;
+use crate::ptr;
+use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+use crate::sys::hermit::abi;
+use crate::sys::locks::Mutex;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+use crate::time::Duration;
+
+// The implementation is inspired by Andrew D. Birrell's paper
+// "Implementing Condition Variables with Semaphores"
+
+pub struct Condvar {
+ counter: AtomicUsize,
+ sem1: *const c_void,
+ sem2: *const c_void,
+}
+
+pub(crate) type MovableCondvar = LazyBox<Condvar>;
+
+impl LazyInit for Condvar {
+ fn init() -> Box<Self> {
+ Box::new(Self::new())
+ }
+}
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+impl Condvar {
+ pub fn new() -> Self {
+ let mut condvar =
+ Self { counter: AtomicUsize::new(0), sem1: ptr::null(), sem2: ptr::null() };
+ unsafe {
+ let _ = abi::sem_init(&mut condvar.sem1, 0);
+ let _ = abi::sem_init(&mut condvar.sem2, 0);
+ }
+ condvar
+ }
+
+ pub unsafe fn notify_one(&self) {
+ if self.counter.load(SeqCst) > 0 {
+ self.counter.fetch_sub(1, SeqCst);
+ abi::sem_post(self.sem1);
+ abi::sem_timedwait(self.sem2, 0);
+ }
+ }
+
+ pub unsafe fn notify_all(&self) {
+ let counter = self.counter.swap(0, SeqCst);
+ for _ in 0..counter {
+ abi::sem_post(self.sem1);
+ }
+ for _ in 0..counter {
+ abi::sem_timedwait(self.sem2, 0);
+ }
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ self.counter.fetch_add(1, SeqCst);
+ mutex.unlock();
+ abi::sem_timedwait(self.sem1, 0);
+ abi::sem_post(self.sem2);
+ mutex.lock();
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ self.counter.fetch_add(1, SeqCst);
+ mutex.unlock();
+ let millis = dur.as_millis().min(u32::MAX as u128) as u32;
+
+ let res = if millis > 0 {
+ abi::sem_timedwait(self.sem1, millis)
+ } else {
+ abi::sem_trywait(self.sem1)
+ };
+
+ abi::sem_post(self.sem2);
+ mutex.lock();
+ res == 0
+ }
+}
+
+impl Drop for Condvar {
+ fn drop(&mut self) {
+ unsafe {
+ let _ = abi::sem_destroy(self.sem1);
+ let _ = abi::sem_destroy(self.sem2);
+ }
+ }
+}
diff --git a/library/std/src/sys/hermit/env.rs b/library/std/src/sys/hermit/env.rs
new file mode 100644
index 000000000..7a0fcb31e
--- /dev/null
+++ b/library/std/src/sys/hermit/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "hermit";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = "";
+ pub const DLL_EXTENSION: &str = "";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
diff --git a/library/std/src/sys/hermit/fd.rs b/library/std/src/sys/hermit/fd.rs
new file mode 100644
index 000000000..c400f5f2c
--- /dev/null
+++ b/library/std/src/sys/hermit/fd.rs
@@ -0,0 +1,87 @@
+#![unstable(reason = "not public", issue = "none", feature = "fd")]
+
+use crate::io::{self, Read};
+use crate::mem;
+use crate::sys::cvt;
+use crate::sys::hermit::abi;
+use crate::sys::unsupported;
+use crate::sys_common::AsInner;
+
+#[derive(Debug)]
+pub struct FileDesc {
+ fd: i32,
+}
+
+impl FileDesc {
+ pub fn new(fd: i32) -> FileDesc {
+ FileDesc { fd }
+ }
+
+ pub fn raw(&self) -> i32 {
+ self.fd
+ }
+
+ /// Extracts the actual file descriptor without closing it.
+ pub fn into_raw(self) -> i32 {
+ let fd = self.fd;
+ mem::forget(self);
+ fd
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let result = unsafe { abi::read(self.fd, buf.as_mut_ptr(), buf.len()) };
+ cvt(result as i32)
+ }
+
+ pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ let mut me = self;
+ (&mut me).read_to_end(buf)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let result = unsafe { abi::write(self.fd, buf.as_ptr(), buf.len()) };
+ cvt(result as i32)
+ }
+
+ pub fn duplicate(&self) -> io::Result<FileDesc> {
+ self.duplicate_path(&[])
+ }
+ pub fn duplicate_path(&self, _path: &[u8]) -> io::Result<FileDesc> {
+ unsupported()
+ }
+
+ pub fn nonblocking(&self) -> io::Result<bool> {
+ Ok(false)
+ }
+
+ pub fn set_cloexec(&self) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, _nonblocking: bool) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+impl<'a> Read for &'a FileDesc {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+}
+
+impl AsInner<i32> for FileDesc {
+ fn as_inner(&self) -> &i32 {
+ &self.fd
+ }
+}
+
+impl Drop for FileDesc {
+ fn drop(&mut self) {
+ // Note that errors are ignored when closing a file descriptor. The
+ // reason for this is that if an error occurs we don't actually know if
+ // the file descriptor was closed or not, and if we retried (for
+ // something like EINTR), we might close another valid file descriptor
+ // (opened after we closed ours.
+ let _ = unsafe { abi::close(self.fd) };
+ }
+}
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
new file mode 100644
index 000000000..fa9a7fb19
--- /dev/null
+++ b/library/std/src/sys/hermit/fs.rs
@@ -0,0 +1,408 @@
+use crate::ffi::{CStr, CString, OsString};
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::io::{self, Error, ErrorKind};
+use crate::io::{IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::os::unix::ffi::OsStrExt;
+use crate::path::{Path, PathBuf};
+use crate::sys::cvt;
+use crate::sys::hermit::abi;
+use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY};
+use crate::sys::hermit::fd::FileDesc;
+use crate::sys::time::SystemTime;
+use crate::sys::unsupported;
+
+pub use crate::sys_common::fs::{copy, try_exists};
+//pub use crate::sys_common::fs::remove_dir_all;
+
+fn cstr(path: &Path) -> io::Result<CString> {
+ Ok(CString::new(path.as_os_str().as_bytes())?)
+}
+
+#[derive(Debug)]
+pub struct File(FileDesc);
+
+pub struct FileAttr(!);
+
+pub struct ReadDir(!);
+
+pub struct DirEntry(!);
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {
+ // generic
+ read: bool,
+ write: bool,
+ append: bool,
+ truncate: bool,
+ create: bool,
+ create_new: bool,
+ // system-specific
+ mode: i32,
+}
+
+pub struct FilePermissions(!);
+
+pub struct FileType(!);
+
+#[derive(Debug)]
+pub struct DirBuilder {}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.0
+ }
+
+ pub fn perm(&self) -> FilePermissions {
+ self.0
+ }
+
+ pub fn file_type(&self) -> FileType {
+ self.0
+ }
+
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+}
+
+impl Clone for FileAttr {
+ fn clone(&self) -> FileAttr {
+ self.0
+ }
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ self.0
+ }
+
+ pub fn set_readonly(&mut self, _readonly: bool) {
+ self.0
+ }
+}
+
+impl Clone for FilePermissions {
+ fn clone(&self) -> FilePermissions {
+ self.0
+ }
+}
+
+impl PartialEq for FilePermissions {
+ fn eq(&self, _other: &FilePermissions) -> bool {
+ self.0
+ }
+}
+
+impl Eq for FilePermissions {}
+
+impl fmt::Debug for FilePermissions {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl FileType {
+ pub fn is_dir(&self) -> bool {
+ self.0
+ }
+
+ pub fn is_file(&self) -> bool {
+ self.0
+ }
+
+ pub fn is_symlink(&self) -> bool {
+ self.0
+ }
+}
+
+impl Clone for FileType {
+ fn clone(&self) -> FileType {
+ self.0
+ }
+}
+
+impl Copy for FileType {}
+
+impl PartialEq for FileType {
+ fn eq(&self, _other: &FileType) -> bool {
+ self.0
+ }
+}
+
+impl Eq for FileType {}
+
+impl Hash for FileType {
+ fn hash<H: Hasher>(&self, _h: &mut H) {
+ self.0
+ }
+}
+
+impl fmt::Debug for FileType {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ self.0
+ }
+}
+
+impl DirEntry {
+ pub fn path(&self) -> PathBuf {
+ self.0
+ }
+
+ pub fn file_name(&self) -> OsString {
+ self.0
+ }
+
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ self.0
+ }
+
+ pub fn file_type(&self) -> io::Result<FileType> {
+ self.0
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ OpenOptions {
+ // generic
+ read: false,
+ write: false,
+ append: false,
+ truncate: false,
+ create: false,
+ create_new: false,
+ // system-specific
+ mode: 0x777,
+ }
+ }
+
+ pub fn read(&mut self, read: bool) {
+ self.read = read;
+ }
+ pub fn write(&mut self, write: bool) {
+ self.write = write;
+ }
+ pub fn append(&mut self, append: bool) {
+ self.append = append;
+ }
+ pub fn truncate(&mut self, truncate: bool) {
+ self.truncate = truncate;
+ }
+ pub fn create(&mut self, create: bool) {
+ self.create = create;
+ }
+ pub fn create_new(&mut self, create_new: bool) {
+ self.create_new = create_new;
+ }
+
+ fn get_access_mode(&self) -> io::Result<i32> {
+ match (self.read, self.write, self.append) {
+ (true, false, false) => Ok(O_RDONLY),
+ (false, true, false) => Ok(O_WRONLY),
+ (true, true, false) => Ok(O_RDWR),
+ (false, _, true) => Ok(O_WRONLY | O_APPEND),
+ (true, _, true) => Ok(O_RDWR | O_APPEND),
+ (false, false, false) => {
+ Err(io::const_io_error!(ErrorKind::InvalidInput, "invalid access mode"))
+ }
+ }
+ }
+
+ fn get_creation_mode(&self) -> io::Result<i32> {
+ match (self.write, self.append) {
+ (true, false) => {}
+ (false, false) => {
+ if self.truncate || self.create || self.create_new {
+ return Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "invalid creation mode",
+ ));
+ }
+ }
+ (_, true) => {
+ if self.truncate && !self.create_new {
+ return Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "invalid creation mode",
+ ));
+ }
+ }
+ }
+
+ Ok(match (self.create, self.truncate, self.create_new) {
+ (false, false, false) => 0,
+ (true, false, false) => O_CREAT,
+ (false, true, false) => O_TRUNC,
+ (true, true, false) => O_CREAT | O_TRUNC,
+ (_, _, true) => O_CREAT | O_EXCL,
+ })
+ }
+}
+
+impl File {
+ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let path = cstr(path)?;
+ File::open_c(&path, opts)
+ }
+
+ pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
+ let mut flags = opts.get_access_mode()?;
+ flags = flags | opts.get_creation_mode()?;
+
+ let mode;
+ if flags & O_CREAT == O_CREAT {
+ mode = opts.mode;
+ } else {
+ mode = 0;
+ }
+
+ let fd = unsafe { cvt(abi::open(path.as_ptr(), flags, mode))? };
+ Ok(File(FileDesc::new(fd as i32)))
+ }
+
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ Err(Error::from_raw_os_error(22))
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ Err(Error::from_raw_os_error(22))
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ self.fsync()
+ }
+
+ pub fn truncate(&self, _size: u64) -> io::Result<()> {
+ Err(Error::from_raw_os_error(22))
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ crate::io::default_read_vectored(|buf| self.read(buf), bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), buf)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ crate::io::default_write_vectored(|buf| self.write(buf), bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+
+ pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> {
+ Err(Error::from_raw_os_error(22))
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ Err(Error::from_raw_os_error(22))
+ }
+
+ pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
+ Err(Error::from_raw_os_error(22))
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder {}
+ }
+
+ pub fn mkdir(&self, _p: &Path) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+pub fn readdir(_p: &Path) -> io::Result<ReadDir> {
+ unsupported()
+}
+
+pub fn unlink(path: &Path) -> io::Result<()> {
+ let name = cstr(path)?;
+ let _ = unsafe { cvt(abi::unlink(name.as_ptr()))? };
+ Ok(())
+}
+
+pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> {
+ match perm.0 {}
+}
+
+pub fn rmdir(_p: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn remove_dir_all(_path: &Path) -> io::Result<()> {
+ //unsupported()
+ Ok(())
+}
+
+pub fn readlink(_p: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn link(_original: &Path, _link: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn stat(_p: &Path) -> io::Result<FileAttr> {
+ unsupported()
+}
+
+pub fn lstat(_p: &Path) -> io::Result<FileAttr> {
+ unsupported()
+}
+
+pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
diff --git a/library/std/src/sys/hermit/memchr.rs b/library/std/src/sys/hermit/memchr.rs
new file mode 100644
index 000000000..996748219
--- /dev/null
+++ b/library/std/src/sys/hermit/memchr.rs
@@ -0,0 +1 @@
+pub use core::slice::memchr::{memchr, memrchr};
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
new file mode 100644
index 000000000..60b7a973c
--- /dev/null
+++ b/library/std/src/sys/hermit/mod.rs
@@ -0,0 +1,156 @@
+//! System bindings for HermitCore
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for HermitCore.
+//!
+//! This is all super highly experimental and not actually intended for
+//! wide/production use yet, it's still all in the experimental category. This
+//! will likely change over time.
+//!
+//! Currently all functions here are basically stubs that immediately return
+//! errors. The hope is that with a portability lint we can turn actually just
+//! remove all this and just omit parts of the standard library if we're
+//! compiling for wasm. That way it's a compile time error for something that's
+//! guaranteed to be a runtime error!
+
+#![allow(unsafe_op_in_unsafe_fn)]
+
+use crate::intrinsics;
+use crate::os::raw::c_char;
+
+pub mod alloc;
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+pub mod fd;
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+pub mod memchr;
+pub mod net;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+pub mod stdio;
+pub mod thread;
+pub mod thread_local_dtor;
+#[path = "../unsupported/thread_local_key.rs"]
+pub mod thread_local_key;
+pub mod time;
+
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub mod locks {
+ pub use super::condvar::*;
+ pub use super::mutex::*;
+ pub use super::rwlock::*;
+}
+
+use crate::io::ErrorKind;
+
+#[allow(unused_extern_crates)]
+pub extern crate hermit_abi as abi;
+
+pub fn unsupported<T>() -> crate::io::Result<T> {
+ Err(unsupported_err())
+}
+
+pub fn unsupported_err() -> crate::io::Error {
+ crate::io::const_io_error!(
+ crate::io::ErrorKind::Unsupported,
+ "operation not supported on HermitCore yet",
+ )
+}
+
+#[no_mangle]
+pub extern "C" fn floor(x: f64) -> f64 {
+ unsafe { intrinsics::floorf64(x) }
+}
+
+pub fn abort_internal() -> ! {
+ unsafe {
+ abi::abort();
+ }
+}
+
+// FIXME: just a workaround to test the system
+pub fn hashmap_random_keys() -> (u64, u64) {
+ (1, 2)
+}
+
+// This function is needed by the panic runtime. The symbol is named in
+// pre-link args for the target specification, so keep that in sync.
+#[cfg(not(test))]
+#[no_mangle]
+// NB. used by both libunwind and libpanic_abort
+pub extern "C" fn __rust_abort() {
+ abort_internal();
+}
+
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ let _ = net::init();
+ args::init(argc, argv);
+}
+
+// SAFETY: must be called only once during runtime cleanup.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {
+ args::cleanup();
+}
+
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn runtime_entry(
+ argc: i32,
+ argv: *const *const c_char,
+ env: *const *const c_char,
+) -> ! {
+ use crate::sys::hermit::thread_local_dtor::run_dtors;
+ extern "C" {
+ fn main(argc: isize, argv: *const *const c_char) -> i32;
+ }
+
+ // initialize environment
+ os::init_environment(env as *const *const i8);
+
+ let result = main(argc as isize, argv);
+
+ run_dtors();
+ abi::exit(result);
+}
+
+pub fn decode_error_kind(errno: i32) -> ErrorKind {
+ match errno {
+ x if x == 13 as i32 => ErrorKind::PermissionDenied,
+ x if x == 98 as i32 => ErrorKind::AddrInUse,
+ x if x == 99 as i32 => ErrorKind::AddrNotAvailable,
+ x if x == 11 as i32 => ErrorKind::WouldBlock,
+ x if x == 103 as i32 => ErrorKind::ConnectionAborted,
+ x if x == 111 as i32 => ErrorKind::ConnectionRefused,
+ x if x == 104 as i32 => ErrorKind::ConnectionReset,
+ x if x == 17 as i32 => ErrorKind::AlreadyExists,
+ x if x == 4 as i32 => ErrorKind::Interrupted,
+ x if x == 22 as i32 => ErrorKind::InvalidInput,
+ x if x == 2 as i32 => ErrorKind::NotFound,
+ x if x == 107 as i32 => ErrorKind::NotConnected,
+ x if x == 1 as i32 => ErrorKind::PermissionDenied,
+ x if x == 32 as i32 => ErrorKind::BrokenPipe,
+ x if x == 110 as i32 => ErrorKind::TimedOut,
+ _ => ErrorKind::Uncategorized,
+ }
+}
+
+pub fn cvt(result: i32) -> crate::io::Result<usize> {
+ if result < 0 { Err(crate::io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) }
+}
diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs
new file mode 100644
index 000000000..eb15a04ff
--- /dev/null
+++ b/library/std/src/sys/hermit/mutex.rs
@@ -0,0 +1,216 @@
+use crate::cell::UnsafeCell;
+use crate::collections::VecDeque;
+use crate::hint;
+use crate::ops::{Deref, DerefMut, Drop};
+use crate::ptr;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sys::hermit::abi;
+
+/// This type provides a lock based on busy waiting to realize mutual exclusion
+///
+/// # Description
+///
+/// This structure behaves a lot like a common mutex. There are some differences:
+///
+/// - By using busy waiting, it can be used outside the runtime.
+/// - It is a so called ticket lock and is completely fair.
+#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
+struct Spinlock<T: ?Sized> {
+ queue: AtomicUsize,
+ dequeue: AtomicUsize,
+ data: UnsafeCell<T>,
+}
+
+unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
+unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
+
+/// A guard to which the protected data can be accessed
+///
+/// When the guard falls out of scope it will release the lock.
+struct SpinlockGuard<'a, T: ?Sized + 'a> {
+ dequeue: &'a AtomicUsize,
+ data: &'a mut T,
+}
+
+impl<T> Spinlock<T> {
+ pub const fn new(user_data: T) -> Spinlock<T> {
+ Spinlock {
+ queue: AtomicUsize::new(0),
+ dequeue: AtomicUsize::new(1),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+
+ #[inline]
+ fn obtain_lock(&self) {
+ let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
+ let mut counter: u16 = 0;
+ while self.dequeue.load(Ordering::SeqCst) != ticket {
+ counter += 1;
+ if counter < 100 {
+ hint::spin_loop();
+ } else {
+ counter = 0;
+ unsafe {
+ abi::yield_now();
+ }
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
+ self.obtain_lock();
+ SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
+ }
+}
+
+impl<T: ?Sized + Default> Default for Spinlock<T> {
+ fn default() -> Spinlock<T> {
+ Spinlock::new(Default::default())
+ }
+}
+
+impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &*self.data
+ }
+}
+
+impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut *self.data
+ }
+}
+
+impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
+ /// The dropping of the SpinlockGuard will release the lock it was created from.
+ fn drop(&mut self) {
+ self.dequeue.fetch_add(1, Ordering::SeqCst);
+ }
+}
+
+/// Realize a priority queue for tasks
+struct PriorityQueue {
+ queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
+ prio_bitmap: u64,
+}
+
+impl PriorityQueue {
+ pub const fn new() -> PriorityQueue {
+ PriorityQueue {
+ queues: [
+ None, None, None, None, None, None, None, None, None, None, None, None, None, None,
+ None, None, None, None, None, None, None, None, None, None, None, None, None, None,
+ None, None, None,
+ ],
+ prio_bitmap: 0,
+ }
+ }
+
+ /// Add a task id by its priority to the queue
+ pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
+ let i: usize = prio.into().into();
+ self.prio_bitmap |= (1 << i) as u64;
+ if let Some(queue) = &mut self.queues[i] {
+ queue.push_back(id);
+ } else {
+ let mut queue = VecDeque::new();
+ queue.push_back(id);
+ self.queues[i] = Some(queue);
+ }
+ }
+
+ fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
+ if let Some(queue) = &mut self.queues[queue_index] {
+ let id = queue.pop_front();
+
+ if queue.is_empty() {
+ self.prio_bitmap &= !(1 << queue_index as u64);
+ }
+
+ id
+ } else {
+ None
+ }
+ }
+
+ /// Pop the task handle with the highest priority from the queue
+ pub fn pop(&mut self) -> Option<abi::Tid> {
+ for i in 0..abi::NO_PRIORITIES {
+ if self.prio_bitmap & (1 << i) != 0 {
+ return self.pop_from_queue(i);
+ }
+ }
+
+ None
+ }
+}
+
+struct MutexInner {
+ locked: bool,
+ blocked_task: PriorityQueue,
+}
+
+impl MutexInner {
+ pub const fn new() -> MutexInner {
+ MutexInner { locked: false, blocked_task: PriorityQueue::new() }
+ }
+}
+
+pub struct Mutex {
+ inner: Spinlock<MutexInner>,
+}
+
+pub type MovableMutex = Mutex;
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { inner: Spinlock::new(MutexInner::new()) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ loop {
+ let mut guard = self.inner.lock();
+ if guard.locked == false {
+ guard.locked = true;
+ return;
+ } else {
+ let prio = abi::get_priority();
+ let id = abi::getpid();
+
+ guard.blocked_task.push(prio, id);
+ abi::block_current_task();
+ drop(guard);
+ abi::yield_now();
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ let mut guard = self.inner.lock();
+ guard.locked = false;
+ if let Some(tid) = guard.blocked_task.pop() {
+ abi::wakeup_task(tid);
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ let mut guard = self.inner.lock();
+ if guard.locked == false {
+ guard.locked = true;
+ }
+ guard.locked
+ }
+}
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
new file mode 100644
index 000000000..745476171
--- /dev/null
+++ b/library/std/src/sys/hermit/net.rs
@@ -0,0 +1,492 @@
+use crate::fmt;
+use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
+use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+use crate::str;
+use crate::sync::Arc;
+use crate::sys::hermit::abi;
+use crate::sys::hermit::abi::IpAddress::{Ipv4, Ipv6};
+use crate::sys::unsupported;
+use crate::sys_common::AsInner;
+use crate::time::Duration;
+
+/// Checks whether the HermitCore's socket interface has been started already, and
+/// if not, starts it.
+pub fn init() -> io::Result<()> {
+ if abi::network_init() < 0 {
+ return Err(io::const_io_error!(
+ ErrorKind::Uncategorized,
+ "Unable to initialize network interface",
+ ));
+ }
+
+ Ok(())
+}
+
+#[derive(Debug, Clone)]
+pub struct Socket(abi::Handle);
+
+impl AsInner<abi::Handle> for Socket {
+ fn as_inner(&self) -> &abi::Handle {
+ &self.0
+ }
+}
+
+impl Drop for Socket {
+ fn drop(&mut self) {
+ let _ = abi::tcpstream::close(self.0);
+ }
+}
+
+// Arc is used to count the number of used sockets.
+// Only if all sockets are released, the drop
+// method will close the socket.
+#[derive(Clone)]
+pub struct TcpStream(Arc<Socket>);
+
+impl TcpStream {
+ pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ let addr = addr?;
+
+ match abi::tcpstream::connect(addr.ip().to_string().as_bytes(), addr.port(), None) {
+ Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))),
+ _ => Err(io::const_io_error!(
+ ErrorKind::Uncategorized,
+ "Unable to initiate a connection on a socket",
+ )),
+ }
+ }
+
+ pub fn connect_timeout(saddr: &SocketAddr, duration: Duration) -> io::Result<TcpStream> {
+ match abi::tcpstream::connect(
+ saddr.ip().to_string().as_bytes(),
+ saddr.port(),
+ Some(duration.as_millis() as u64),
+ ) {
+ Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))),
+ _ => Err(io::const_io_error!(
+ ErrorKind::Uncategorized,
+ "Unable to initiate a connection on a socket",
+ )),
+ }
+ }
+
+ pub fn set_read_timeout(&self, duration: Option<Duration>) -> io::Result<()> {
+ abi::tcpstream::set_read_timeout(*self.0.as_inner(), duration.map(|d| d.as_millis() as u64))
+ .map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "Unable to set timeout value")
+ })
+ }
+
+ pub fn set_write_timeout(&self, duration: Option<Duration>) -> io::Result<()> {
+ abi::tcpstream::set_write_timeout(
+ *self.0.as_inner(),
+ duration.map(|d| d.as_millis() as u64),
+ )
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "Unable to set timeout value"))
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ let duration = abi::tcpstream::get_read_timeout(*self.0.as_inner()).map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "Unable to determine timeout value")
+ })?;
+
+ Ok(duration.map(|d| Duration::from_millis(d)))
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ let duration = abi::tcpstream::get_write_timeout(*self.0.as_inner()).map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "Unable to determine timeout value")
+ })?;
+
+ Ok(duration.map(|d| Duration::from_millis(d)))
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ abi::tcpstream::peek(*self.0.as_inner(), buf)
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "peek failed"))
+ }
+
+ pub fn read(&self, buffer: &mut [u8]) -> io::Result<usize> {
+ self.read_vectored(&mut [IoSliceMut::new(buffer)])
+ }
+
+ pub fn read_vectored(&self, ioslice: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let mut size: usize = 0;
+
+ for i in ioslice.iter_mut() {
+ let ret = abi::tcpstream::read(*self.0.as_inner(), &mut i[0..]).map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "Unable to read on socket")
+ })?;
+
+ if ret != 0 {
+ size += ret;
+ }
+ }
+
+ Ok(size)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn write(&self, buffer: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(buffer)])
+ }
+
+ pub fn write_vectored(&self, ioslice: &[IoSlice<'_>]) -> io::Result<usize> {
+ let mut size: usize = 0;
+
+ for i in ioslice.iter() {
+ size += abi::tcpstream::write(*self.0.as_inner(), i).map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "Unable to write on socket")
+ })?;
+ }
+
+ Ok(size)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ let (ipaddr, port) = abi::tcpstream::peer_addr(*self.0.as_inner())
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "peer_addr failed"))?;
+
+ let saddr = match ipaddr {
+ Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port),
+ Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port),
+ _ => {
+ return Err(io::const_io_error!(ErrorKind::Uncategorized, "peer_addr failed"));
+ }
+ };
+
+ Ok(saddr)
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ abi::tcpstream::shutdown(*self.0.as_inner(), how as i32)
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to shutdown socket"))
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ Ok(self.clone())
+ }
+
+ pub fn set_linger(&self, _linger: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn set_nodelay(&self, mode: bool) -> io::Result<()> {
+ abi::tcpstream::set_nodelay(*self.0.as_inner(), mode)
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "set_nodelay failed"))
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ abi::tcpstream::nodelay(*self.0.as_inner())
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "nodelay failed"))
+ }
+
+ pub fn set_ttl(&self, tll: u32) -> io::Result<()> {
+ abi::tcpstream::set_tll(*self.0.as_inner(), tll)
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to set TTL"))
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ abi::tcpstream::get_tll(*self.0.as_inner())
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to get TTL"))
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, mode: bool) -> io::Result<()> {
+ abi::tcpstream::set_nonblocking(*self.0.as_inner(), mode).map_err(|_| {
+ io::const_io_error!(ErrorKind::Uncategorized, "unable to set blocking mode")
+ })
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+#[derive(Clone)]
+pub struct TcpListener(SocketAddr);
+
+impl TcpListener {
+ pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ let addr = addr?;
+
+ Ok(TcpListener(*addr))
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ Ok(self.0)
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let (handle, ipaddr, port) = abi::tcplistener::accept(self.0.port())
+ .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "accept failed"))?;
+ let saddr = match ipaddr {
+ Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port),
+ Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port),
+ _ => {
+ return Err(io::const_io_error!(ErrorKind::Uncategorized, "accept failed"));
+ }
+ };
+
+ Ok((TcpStream(Arc::new(Socket(handle))), saddr))
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ Ok(self.clone())
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+pub struct UdpSocket(abi::Handle);
+
+impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsupported()
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsupported()
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+pub struct LookupHost(!);
+
+impl LookupHost {
+ pub fn port(&self) -> u16 {
+ self.0
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ self.0
+ }
+}
+
+impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: &str) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+#[allow(nonstandard_style)]
+pub mod netc {
+ pub const AF_INET: u8 = 0;
+ pub const AF_INET6: u8 = 1;
+ pub type sa_family_t = u8;
+
+ #[derive(Copy, Clone)]
+ pub struct in_addr {
+ pub s_addr: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in {
+ pub sin_family: sa_family_t,
+ pub sin_port: u16,
+ pub sin_addr: in_addr,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in6 {
+ pub sin6_family: sa_family_t,
+ pub sin6_port: u16,
+ pub sin6_addr: in6_addr,
+ pub sin6_flowinfo: u32,
+ pub sin6_scope_id: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr {}
+
+ pub type socklen_t = usize;
+}
diff --git a/library/std/src/sys/hermit/os.rs b/library/std/src/sys/hermit/os.rs
new file mode 100644
index 000000000..8f927df85
--- /dev/null
+++ b/library/std/src/sys/hermit/os.rs
@@ -0,0 +1,178 @@
+use crate::collections::HashMap;
+use crate::error::Error as StdError;
+use crate::ffi::{CStr, OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::os::unix::ffi::OsStringExt;
+use crate::path::{self, PathBuf};
+use crate::str;
+use crate::sync::Mutex;
+use crate::sys::hermit::abi;
+use crate::sys::memchr;
+use crate::sys::unsupported;
+use crate::vec;
+
+pub fn errno() -> i32 {
+ 0
+}
+
+pub fn error_string(_errno: i32) -> String {
+ "operation successful".to_string()
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on hermit yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on hermit yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+static mut ENV: Option<Mutex<HashMap<OsString, OsString>>> = None;
+
+pub fn init_environment(env: *const *const i8) {
+ unsafe {
+ ENV = Some(Mutex::new(HashMap::new()));
+
+ if env.is_null() {
+ return;
+ }
+
+ let mut guard = ENV.as_ref().unwrap().lock().unwrap();
+ let mut environ = env;
+ while !(*environ).is_null() {
+ if let Some((key, value)) = parse(CStr::from_ptr(*environ).to_bytes()) {
+ guard.insert(key, value);
+ }
+ environ = environ.add(1);
+ }
+ }
+
+ fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
+ // Strategy (copied from glibc): Variable name and value are separated
+ // by an ASCII equals sign '='. Since a variable name must not be
+ // empty, allow variable names starting with an equals sign. Skip all
+ // malformed lines.
+ if input.is_empty() {
+ return None;
+ }
+ let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
+ pos.map(|p| {
+ (
+ OsStringExt::from_vec(input[..p].to_vec()),
+ OsStringExt::from_vec(input[p + 1..].to_vec()),
+ )
+ })
+ }
+}
+
+pub struct Env {
+ iter: vec::IntoIter<(OsString, OsString)>,
+}
+
+impl !Send for Env {}
+impl !Sync for Env {}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+/// Returns a vector of (variable, value) byte-vector pairs for all the
+/// environment variables of the current process.
+pub fn env() -> Env {
+ unsafe {
+ let guard = ENV.as_ref().unwrap().lock().unwrap();
+ let mut result = Vec::new();
+
+ for (key, value) in guard.iter() {
+ result.push((key.clone(), value.clone()));
+ }
+
+ return Env { iter: result.into_iter() };
+ }
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ unsafe { ENV.as_ref().unwrap().lock().unwrap().get_mut(k).cloned() }
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ unsafe {
+ let (k, v) = (k.to_owned(), v.to_owned());
+ ENV.as_ref().unwrap().lock().unwrap().insert(k, v);
+ }
+ Ok(())
+}
+
+pub fn unsetenv(k: &OsStr) -> io::Result<()> {
+ unsafe {
+ ENV.as_ref().unwrap().lock().unwrap().remove(k);
+ }
+ Ok(())
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on hermit")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ unsafe {
+ abi::exit(code);
+ }
+}
+
+pub fn getpid() -> u32 {
+ unsafe { abi::getpid() }
+}
diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs
new file mode 100644
index 000000000..9701bab1f
--- /dev/null
+++ b/library/std/src/sys/hermit/rwlock.rs
@@ -0,0 +1,144 @@
+use crate::cell::UnsafeCell;
+use crate::sys::locks::{MovableCondvar, Mutex};
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+
+pub struct RwLock {
+ lock: Mutex,
+ cond: MovableCondvar,
+ state: UnsafeCell<State>,
+}
+
+pub type MovableRwLock = RwLock;
+
+enum State {
+ Unlocked,
+ Reading(usize),
+ Writing,
+}
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+// This rwlock implementation is a relatively simple implementation which has a
+// condition variable for readers/writers as well as a mutex protecting the
+// internal state of the lock. A current downside of the implementation is that
+// unlocking the lock will notify *all* waiters rather than just readers or just
+// writers. This can cause lots of "thundering stampede" problems. While
+// hopefully correct this implementation is very likely to want to be changed in
+// the future.
+
+impl RwLock {
+ pub const fn new() -> RwLock {
+ RwLock {
+ lock: Mutex::new(),
+ cond: MovableCondvar::new(),
+ state: UnsafeCell::new(State::Unlocked),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ self.lock.lock();
+ while !(*self.state.get()).inc_readers() {
+ self.cond.wait(&self.lock);
+ }
+ self.lock.unlock();
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ self.lock.lock();
+ let ok = (*self.state.get()).inc_readers();
+ self.lock.unlock();
+ return ok;
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ self.lock.lock();
+ while !(*self.state.get()).inc_writers() {
+ self.cond.wait(&self.lock);
+ }
+ self.lock.unlock();
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ self.lock.lock();
+ let ok = (*self.state.get()).inc_writers();
+ self.lock.unlock();
+ return ok;
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.lock.lock();
+ let notify = (*self.state.get()).dec_readers();
+ self.lock.unlock();
+ if notify {
+ // FIXME: should only wake up one of these some of the time
+ self.cond.notify_all();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ self.lock.lock();
+ (*self.state.get()).dec_writers();
+ self.lock.unlock();
+ // FIXME: should only wake up one of these some of the time
+ self.cond.notify_all();
+ }
+}
+
+impl State {
+ fn inc_readers(&mut self) -> bool {
+ match *self {
+ State::Unlocked => {
+ *self = State::Reading(1);
+ true
+ }
+ State::Reading(ref mut cnt) => {
+ *cnt += 1;
+ true
+ }
+ State::Writing => false,
+ }
+ }
+
+ fn inc_writers(&mut self) -> bool {
+ match *self {
+ State::Unlocked => {
+ *self = State::Writing;
+ true
+ }
+ State::Reading(_) | State::Writing => false,
+ }
+ }
+
+ fn dec_readers(&mut self) -> bool {
+ let zero = match *self {
+ State::Reading(ref mut cnt) => {
+ *cnt -= 1;
+ *cnt == 0
+ }
+ State::Unlocked | State::Writing => invalid(),
+ };
+ if zero {
+ *self = State::Unlocked;
+ }
+ zero
+ }
+
+ fn dec_writers(&mut self) {
+ match *self {
+ State::Writing => {}
+ State::Unlocked | State::Reading(_) => invalid(),
+ }
+ *self = State::Unlocked;
+ }
+}
+
+fn invalid() -> ! {
+ panic!("inconsistent rwlock");
+}
diff --git a/library/std/src/sys/hermit/stdio.rs b/library/std/src/sys/hermit/stdio.rs
new file mode 100644
index 000000000..514de1df6
--- /dev/null
+++ b/library/std/src/sys/hermit/stdio.rs
@@ -0,0 +1,120 @@
+use crate::io;
+use crate::io::{IoSlice, IoSliceMut};
+use crate::sys::hermit::abi;
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, data: &mut [u8]) -> io::Result<usize> {
+ self.read_vectored(&mut [IoSliceMut::new(data)])
+ }
+
+ fn read_vectored(&mut self, _data: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ Ok(0)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ let len;
+
+ unsafe { len = abi::write(1, data.as_ptr() as *const u8, data.len()) }
+
+ if len < 0 {
+ Err(io::const_io_error!(io::ErrorKind::Uncategorized, "Stdout is not able to print"))
+ } else {
+ Ok(len as usize)
+ }
+ }
+
+ fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> {
+ let len;
+
+ unsafe { len = abi::write(1, data.as_ptr() as *const u8, data.len()) }
+
+ if len < 0 {
+ Err(io::const_io_error!(io::ErrorKind::Uncategorized, "Stdout is not able to print"))
+ } else {
+ Ok(len as usize)
+ }
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ let len;
+
+ unsafe { len = abi::write(2, data.as_ptr() as *const u8, data.len()) }
+
+ if len < 0 {
+ Err(io::const_io_error!(io::ErrorKind::Uncategorized, "Stderr is not able to print"))
+ } else {
+ Ok(len as usize)
+ }
+ }
+
+ fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> {
+ let len;
+
+ unsafe { len = abi::write(2, data.as_ptr() as *const u8, data.len()) }
+
+ if len < 0 {
+ Err(io::const_io_error!(io::ErrorKind::Uncategorized, "Stderr is not able to print"))
+ } else {
+ Ok(len as usize)
+ }
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = 0;
+
+pub fn is_ebadf(_err: &io::Error) -> bool {
+ true
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs
new file mode 100644
index 000000000..e53a1fea6
--- /dev/null
+++ b/library/std/src/sys/hermit/thread.rs
@@ -0,0 +1,112 @@
+#![allow(dead_code)]
+
+use super::unsupported;
+use crate::ffi::CStr;
+use crate::io;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::sys::hermit::abi;
+use crate::sys::hermit::thread_local_dtor::run_dtors;
+use crate::time::Duration;
+
+pub type Tid = abi::Tid;
+
+pub struct Thread {
+ tid: Tid,
+}
+
+unsafe impl Send for Thread {}
+unsafe impl Sync for Thread {}
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 1 << 20;
+
+impl Thread {
+ pub unsafe fn new_with_coreid(
+ stack: usize,
+ p: Box<dyn FnOnce()>,
+ core_id: isize,
+ ) -> io::Result<Thread> {
+ let p = Box::into_raw(box p);
+ let tid = abi::spawn2(
+ thread_start,
+ p as usize,
+ abi::Priority::into(abi::NORMAL_PRIO),
+ stack,
+ core_id,
+ );
+
+ return if tid == 0 {
+ // The thread failed to start and as a result p was not consumed. Therefore, it is
+ // safe to reconstruct the box so that it gets deallocated.
+ drop(Box::from_raw(p));
+ Err(io::const_io_error!(io::ErrorKind::Uncategorized, "Unable to create thread!"))
+ } else {
+ Ok(Thread { tid: tid })
+ };
+
+ extern "C" fn thread_start(main: usize) {
+ unsafe {
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+
+ // run all destructors
+ run_dtors();
+ }
+ }
+ }
+
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ Thread::new_with_coreid(stack, p, -1 /* = no specific core */)
+ }
+
+ #[inline]
+ pub fn yield_now() {
+ unsafe {
+ abi::yield_now();
+ }
+ }
+
+ #[inline]
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ #[inline]
+ pub fn sleep(dur: Duration) {
+ unsafe {
+ abi::usleep(dur.as_micros() as u64);
+ }
+ }
+
+ pub fn join(self) {
+ unsafe {
+ let _ = abi::join(self.tid);
+ }
+ }
+
+ #[inline]
+ pub fn id(&self) -> Tid {
+ self.tid
+ }
+
+ #[inline]
+ pub fn into_id(self) -> Tid {
+ let id = self.tid;
+ mem::forget(self);
+ id
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/hermit/thread_local_dtor.rs b/library/std/src/sys/hermit/thread_local_dtor.rs
new file mode 100644
index 000000000..9b683fce1
--- /dev/null
+++ b/library/std/src/sys/hermit/thread_local_dtor.rs
@@ -0,0 +1,36 @@
+#![cfg(target_thread_local)]
+#![unstable(feature = "thread_local_internals", issue = "none")]
+
+// Simplify dtor registration by using a list of destructors.
+// The this solution works like the implementation of macOS and
+// doesn't additional OS support
+
+use crate::cell::Cell;
+use crate::ptr;
+
+#[thread_local]
+static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut());
+
+type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>;
+
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ if DTORS.get().is_null() {
+ let v: Box<List> = box Vec::new();
+ DTORS.set(Box::into_raw(v));
+ }
+
+ let list: &mut List = &mut *DTORS.get();
+ list.push((t, dtor));
+}
+
+// every thread call this function to run through all possible destructors
+pub unsafe fn run_dtors() {
+ let mut ptr = DTORS.replace(ptr::null_mut());
+ while !ptr.is_null() {
+ let list = Box::from_raw(ptr);
+ for (ptr, dtor) in list.into_iter() {
+ dtor(ptr);
+ }
+ ptr = DTORS.replace(ptr::null_mut());
+ }
+}
diff --git a/library/std/src/sys/hermit/time.rs b/library/std/src/sys/hermit/time.rs
new file mode 100644
index 000000000..c17e6c8af
--- /dev/null
+++ b/library/std/src/sys/hermit/time.rs
@@ -0,0 +1,156 @@
+#![allow(dead_code)]
+
+use crate::cmp::Ordering;
+use crate::sys::hermit::abi;
+use crate::sys::hermit::abi::timespec;
+use crate::sys::hermit::abi::{CLOCK_MONOTONIC, CLOCK_REALTIME, NSEC_PER_SEC};
+use crate::time::Duration;
+use core::hash::{Hash, Hasher};
+
+#[derive(Copy, Clone, Debug)]
+struct Timespec {
+ t: timespec,
+}
+
+impl Timespec {
+ const fn zero() -> Timespec {
+ Timespec { t: timespec { tv_sec: 0, tv_nsec: 0 } }
+ }
+
+ fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
+ if self >= other {
+ Ok(if self.t.tv_nsec >= other.t.tv_nsec {
+ Duration::new(
+ (self.t.tv_sec - other.t.tv_sec) as u64,
+ (self.t.tv_nsec - other.t.tv_nsec) as u32,
+ )
+ } else {
+ Duration::new(
+ (self.t.tv_sec - 1 - other.t.tv_sec) as u64,
+ self.t.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.t.tv_nsec as u32,
+ )
+ })
+ } else {
+ match other.sub_timespec(self) {
+ Ok(d) => Err(d),
+ Err(d) => Ok(d),
+ }
+ }
+ }
+
+ fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
+ let mut secs = other
+ .as_secs()
+ .try_into() // <- target type would be `libc::time_t`
+ .ok()
+ .and_then(|secs| self.t.tv_sec.checked_add(secs))?;
+
+ // Nano calculations can't overflow because nanos are <1B which fit
+ // in a u32.
+ let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32;
+ if nsec >= NSEC_PER_SEC as u32 {
+ nsec -= NSEC_PER_SEC as u32;
+ secs = secs.checked_add(1)?;
+ }
+ Some(Timespec { t: timespec { tv_sec: secs, tv_nsec: nsec as _ } })
+ }
+
+ fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
+ let mut secs = other
+ .as_secs()
+ .try_into() // <- target type would be `libc::time_t`
+ .ok()
+ .and_then(|secs| self.t.tv_sec.checked_sub(secs))?;
+
+ // Similar to above, nanos can't overflow.
+ let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32;
+ if nsec < 0 {
+ nsec += NSEC_PER_SEC as i32;
+ secs = secs.checked_sub(1)?;
+ }
+ Some(Timespec { t: timespec { tv_sec: secs, tv_nsec: nsec as _ } })
+ }
+}
+
+impl PartialEq for Timespec {
+ fn eq(&self, other: &Timespec) -> bool {
+ self.t.tv_sec == other.t.tv_sec && self.t.tv_nsec == other.t.tv_nsec
+ }
+}
+
+impl Eq for Timespec {}
+
+impl PartialOrd for Timespec {
+ fn partial_cmp(&self, other: &Timespec) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Timespec {
+ fn cmp(&self, other: &Timespec) -> Ordering {
+ let me = (self.t.tv_sec, self.t.tv_nsec);
+ let other = (other.t.tv_sec, other.t.tv_nsec);
+ me.cmp(&other)
+ }
+}
+
+impl Hash for Timespec {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.t.tv_sec.hash(state);
+ self.t.tv_nsec.hash(state);
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant {
+ t: Timespec,
+}
+
+impl Instant {
+ pub fn now() -> Instant {
+ let mut time: Timespec = Timespec::zero();
+ let _ = unsafe { abi::clock_gettime(CLOCK_MONOTONIC, &mut time.t as *mut timespec) };
+
+ Instant { t: time }
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.t.sub_timespec(&other.t).ok()
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_add_duration(other)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_sub_duration(other)? })
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub struct SystemTime {
+ t: Timespec,
+}
+
+pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ let mut time: Timespec = Timespec::zero();
+ let _ = unsafe { abi::clock_gettime(CLOCK_REALTIME, &mut time.t as *mut timespec) };
+
+ SystemTime { t: time }
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.t.sub_timespec(&other.t)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime { t: self.t.checked_add_duration(other)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime { t: self.t.checked_sub_duration(other)? })
+ }
+}
diff --git a/library/std/src/sys/itron/abi.rs b/library/std/src/sys/itron/abi.rs
new file mode 100644
index 000000000..5eb14bb7e
--- /dev/null
+++ b/library/std/src/sys/itron/abi.rs
@@ -0,0 +1,197 @@
+//! ABI for μITRON derivatives
+pub type int_t = crate::os::raw::c_int;
+pub type uint_t = crate::os::raw::c_uint;
+pub type bool_t = int_t;
+
+/// Kernel object ID
+pub type ID = int_t;
+
+/// The current task.
+pub const TSK_SELF: ID = 0;
+
+/// Relative time
+pub type RELTIM = u32;
+
+/// Timeout (a valid `RELTIM` value or `TMO_FEVR`)
+pub type TMO = u32;
+
+/// The infinite timeout value
+pub const TMO_FEVR: TMO = TMO::MAX;
+
+/// The maximum valid value of `RELTIM`
+pub const TMAX_RELTIM: RELTIM = 4_000_000_000;
+
+/// System time
+pub type SYSTIM = u64;
+
+/// Error code type
+pub type ER = int_t;
+
+/// Error code type, `ID` on success
+pub type ER_ID = int_t;
+
+/// Service call operational mode
+pub type MODE = uint_t;
+
+/// OR waiting condition for an eventflag
+pub const TWF_ORW: MODE = 0x01;
+
+/// Object attributes
+pub type ATR = uint_t;
+
+/// FIFO wait order
+pub const TA_FIFO: ATR = 0;
+/// Only one task is allowed to be in the waiting state for the eventflag
+pub const TA_WSGL: ATR = 0;
+/// The eventflag’s bit pattern is cleared when a task is released from the
+/// waiting state for that eventflag.
+pub const TA_CLR: ATR = 0x04;
+
+/// Bit pattern of an eventflag
+pub type FLGPTN = uint_t;
+
+/// Task or interrupt priority
+pub type PRI = int_t;
+
+/// The special value of `PRI` representing the current task's priority.
+pub const TPRI_SELF: PRI = 0;
+
+/// Use the priority inheritance protocol
+#[cfg(target_os = "solid_asp3")]
+pub const TA_INHERIT: ATR = 0x02;
+
+/// Activate the task on creation
+pub const TA_ACT: ATR = 0x01;
+
+/// The maximum count of a semaphore
+pub const TMAX_MAXSEM: uint_t = uint_t::MAX;
+
+/// Callback parameter
+pub type EXINF = isize;
+
+/// Task entrypoint
+pub type TASK = Option<unsafe extern "C" fn(EXINF)>;
+
+// Error codes
+pub const E_OK: ER = 0;
+pub const E_SYS: ER = -5;
+pub const E_NOSPT: ER = -9;
+pub const E_RSFN: ER = -10;
+pub const E_RSATR: ER = -11;
+pub const E_PAR: ER = -17;
+pub const E_ID: ER = -18;
+pub const E_CTX: ER = -25;
+pub const E_MACV: ER = -26;
+pub const E_OACV: ER = -27;
+pub const E_ILUSE: ER = -28;
+pub const E_NOMEM: ER = -33;
+pub const E_NOID: ER = -34;
+pub const E_NORES: ER = -35;
+pub const E_OBJ: ER = -41;
+pub const E_NOEXS: ER = -42;
+pub const E_QOVR: ER = -43;
+pub const E_RLWAI: ER = -49;
+pub const E_TMOUT: ER = -50;
+pub const E_DLT: ER = -51;
+pub const E_CLS: ER = -52;
+pub const E_RASTER: ER = -53;
+pub const E_WBLK: ER = -57;
+pub const E_BOVR: ER = -58;
+pub const E_COMM: ER = -65;
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CSEM {
+ pub sematr: ATR,
+ pub isemcnt: uint_t,
+ pub maxsem: uint_t,
+}
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CFLG {
+ pub flgatr: ATR,
+ pub iflgptn: FLGPTN,
+}
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CMTX {
+ pub mtxatr: ATR,
+ pub ceilpri: PRI,
+}
+
+#[derive(Clone, Copy)]
+#[repr(C)]
+pub struct T_CTSK {
+ pub tskatr: ATR,
+ pub exinf: EXINF,
+ pub task: TASK,
+ pub itskpri: PRI,
+ pub stksz: usize,
+ pub stk: *mut u8,
+}
+
+extern "C" {
+ #[link_name = "__asp3_acre_tsk"]
+ pub fn acre_tsk(pk_ctsk: *const T_CTSK) -> ER_ID;
+ #[link_name = "__asp3_get_tid"]
+ pub fn get_tid(p_tskid: *mut ID) -> ER;
+ #[link_name = "__asp3_dly_tsk"]
+ pub fn dly_tsk(dlytim: RELTIM) -> ER;
+ #[link_name = "__asp3_ter_tsk"]
+ pub fn ter_tsk(tskid: ID) -> ER;
+ #[link_name = "__asp3_del_tsk"]
+ pub fn del_tsk(tskid: ID) -> ER;
+ #[link_name = "__asp3_get_pri"]
+ pub fn get_pri(tskid: ID, p_tskpri: *mut PRI) -> ER;
+ #[link_name = "__asp3_rot_rdq"]
+ pub fn rot_rdq(tskpri: PRI) -> ER;
+ #[link_name = "__asp3_slp_tsk"]
+ pub fn slp_tsk() -> ER;
+ #[link_name = "__asp3_tslp_tsk"]
+ pub fn tslp_tsk(tmout: TMO) -> ER;
+ #[link_name = "__asp3_wup_tsk"]
+ pub fn wup_tsk(tskid: ID) -> ER;
+ #[link_name = "__asp3_unl_cpu"]
+ pub fn unl_cpu() -> ER;
+ #[link_name = "__asp3_dis_dsp"]
+ pub fn dis_dsp() -> ER;
+ #[link_name = "__asp3_ena_dsp"]
+ pub fn ena_dsp() -> ER;
+ #[link_name = "__asp3_sns_dsp"]
+ pub fn sns_dsp() -> bool_t;
+ #[link_name = "__asp3_get_tim"]
+ pub fn get_tim(p_systim: *mut SYSTIM) -> ER;
+ #[link_name = "__asp3_acre_flg"]
+ pub fn acre_flg(pk_cflg: *const T_CFLG) -> ER_ID;
+ #[link_name = "__asp3_del_flg"]
+ pub fn del_flg(flgid: ID) -> ER;
+ #[link_name = "__asp3_set_flg"]
+ pub fn set_flg(flgid: ID, setptn: FLGPTN) -> ER;
+ #[link_name = "__asp3_clr_flg"]
+ pub fn clr_flg(flgid: ID, clrptn: FLGPTN) -> ER;
+ #[link_name = "__asp3_wai_flg"]
+ pub fn wai_flg(flgid: ID, waiptn: FLGPTN, wfmode: MODE, p_flgptn: *mut FLGPTN) -> ER;
+ #[link_name = "__asp3_twai_flg"]
+ pub fn twai_flg(
+ flgid: ID,
+ waiptn: FLGPTN,
+ wfmode: MODE,
+ p_flgptn: *mut FLGPTN,
+ tmout: TMO,
+ ) -> ER;
+ #[link_name = "__asp3_acre_mtx"]
+ pub fn acre_mtx(pk_cmtx: *const T_CMTX) -> ER_ID;
+ #[link_name = "__asp3_del_mtx"]
+ pub fn del_mtx(tskid: ID) -> ER;
+ #[link_name = "__asp3_loc_mtx"]
+ pub fn loc_mtx(mtxid: ID) -> ER;
+ #[link_name = "__asp3_ploc_mtx"]
+ pub fn ploc_mtx(mtxid: ID) -> ER;
+ #[link_name = "__asp3_tloc_mtx"]
+ pub fn tloc_mtx(mtxid: ID, tmout: TMO) -> ER;
+ #[link_name = "__asp3_unl_mtx"]
+ pub fn unl_mtx(mtxid: ID) -> ER;
+ pub fn exd_tsk() -> ER;
+}
diff --git a/library/std/src/sys/itron/condvar.rs b/library/std/src/sys/itron/condvar.rs
new file mode 100644
index 000000000..008cd8fb1
--- /dev/null
+++ b/library/std/src/sys/itron/condvar.rs
@@ -0,0 +1,297 @@
+//! POSIX conditional variable implementation based on user-space wait queues.
+use super::{abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong};
+use crate::{mem::replace, ptr::NonNull, sys::locks::Mutex, time::Duration};
+
+// The implementation is inspired by the queue-based implementation shown in
+// Andrew D. Birrell's paper "Implementing Condition Variables with Semaphores"
+
+pub struct Condvar {
+ waiters: SpinMutex<waiter_queue::WaiterQueue>,
+}
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+pub type MovableCondvar = Condvar;
+
+impl Condvar {
+ #[inline]
+ pub const fn new() -> Condvar {
+ Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ pub unsafe fn notify_one(&self) {
+ self.waiters.with_locked(|waiters| {
+ if let Some(task) = waiters.pop_front() {
+ // Unpark the task
+ match unsafe { abi::wup_tsk(task) } {
+ // The task already has a token.
+ abi::E_QOVR => {}
+ // Can't undo the effect; abort the program on failure
+ er => {
+ expect_success_aborting(er, &"wup_tsk");
+ }
+ }
+ }
+ });
+ }
+
+ pub unsafe fn notify_all(&self) {
+ self.waiters.with_locked(|waiters| {
+ while let Some(task) = waiters.pop_front() {
+ // Unpark the task
+ match unsafe { abi::wup_tsk(task) } {
+ // The task already has a token.
+ abi::E_QOVR => {}
+ // Can't undo the effect; abort the program on failure
+ er => {
+ expect_success_aborting(er, &"wup_tsk");
+ }
+ }
+ }
+ });
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ // Construct `Waiter`.
+ let mut waiter = waiter_queue::Waiter::new();
+ let waiter = NonNull::from(&mut waiter);
+
+ self.waiters.with_locked(|waiters| unsafe {
+ waiters.insert(waiter);
+ });
+
+ unsafe { mutex.unlock() };
+
+ // Wait until `waiter` is removed from the queue
+ loop {
+ // Park the current task
+ expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk");
+
+ if !self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) {
+ break;
+ }
+ }
+
+ unsafe { mutex.lock() };
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ // Construct and pin `Waiter`
+ let mut waiter = waiter_queue::Waiter::new();
+ let waiter = NonNull::from(&mut waiter);
+
+ self.waiters.with_locked(|waiters| unsafe {
+ waiters.insert(waiter);
+ });
+
+ unsafe { mutex.unlock() };
+
+ // Park the current task and do not wake up until the timeout elapses
+ // or the task gets woken up by `notify_*`
+ match with_tmos_strong(dur, |tmo| {
+ let er = unsafe { abi::tslp_tsk(tmo) };
+ if er == 0 {
+ // We were unparked. Are we really dequeued?
+ if self.waiters.with_locked(|waiters| unsafe { waiters.is_queued(waiter) }) {
+ // No we are not. Continue waiting.
+ return abi::E_TMOUT;
+ }
+ }
+ er
+ }) {
+ abi::E_TMOUT => {}
+ er => {
+ expect_success_aborting(er, &"tslp_tsk");
+ }
+ }
+
+ // Remove `waiter` from `self.waiters`. If `waiter` is still in
+ // `waiters`, it means we woke up because of a timeout. Otherwise,
+ // we woke up because of `notify_*`.
+ let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) });
+
+ unsafe { mutex.lock() };
+ success
+ }
+}
+
+mod waiter_queue {
+ use super::*;
+
+ pub struct WaiterQueue {
+ head: Option<ListHead>,
+ }
+
+ #[derive(Copy, Clone)]
+ struct ListHead {
+ first: NonNull<Waiter>,
+ last: NonNull<Waiter>,
+ }
+
+ unsafe impl Send for ListHead {}
+ unsafe impl Sync for ListHead {}
+
+ pub struct Waiter {
+ // These fields are only accessed through `&[mut] WaiterQueue`.
+ /// The waiting task's ID. Will be zeroed when the task is woken up
+ /// and removed from a queue.
+ task: abi::ID,
+ priority: abi::PRI,
+ prev: Option<NonNull<Waiter>>,
+ next: Option<NonNull<Waiter>>,
+ }
+
+ unsafe impl Send for Waiter {}
+ unsafe impl Sync for Waiter {}
+
+ impl Waiter {
+ #[inline]
+ pub fn new() -> Self {
+ let task = task::current_task_id();
+ let priority = task::task_priority(abi::TSK_SELF);
+
+ // Zeroness of `Waiter::task` indicates whether the `Waiter` is
+ // linked to a queue or not. This invariant is important for
+ // the correctness.
+ debug_assert_ne!(task, 0);
+
+ Self { task, priority, prev: None, next: None }
+ }
+ }
+
+ impl WaiterQueue {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { head: None }
+ }
+
+ /// # Safety
+ ///
+ /// - The caller must own `*waiter_ptr`. The caller will lose the
+ /// ownership until `*waiter_ptr` is removed from `self`.
+ ///
+ /// - `*waiter_ptr` must be valid until it's removed from the queue.
+ ///
+ /// - `*waiter_ptr` must not have been previously inserted to a `WaiterQueue`.
+ ///
+ pub unsafe fn insert(&mut self, mut waiter_ptr: NonNull<Waiter>) {
+ unsafe {
+ let waiter = waiter_ptr.as_mut();
+
+ debug_assert!(waiter.prev.is_none());
+ debug_assert!(waiter.next.is_none());
+
+ if let Some(head) = &mut self.head {
+ // Find the insertion position and insert `waiter`
+ let insert_after = {
+ let mut cursor = head.last;
+ loop {
+ if waiter.priority >= cursor.as_ref().priority {
+ // `cursor` and all previous waiters have the same or higher
+ // priority than `current_task_priority`. Insert the new
+ // waiter right after `cursor`.
+ break Some(cursor);
+ }
+ cursor = if let Some(prev) = cursor.as_ref().prev {
+ prev
+ } else {
+ break None;
+ };
+ }
+ };
+
+ if let Some(mut insert_after) = insert_after {
+ // Insert `waiter` after `insert_after`
+ let insert_before = insert_after.as_ref().next;
+
+ waiter.prev = Some(insert_after);
+ insert_after.as_mut().next = Some(waiter_ptr);
+
+ waiter.next = insert_before;
+ if let Some(mut insert_before) = insert_before {
+ insert_before.as_mut().prev = Some(waiter_ptr);
+ } else {
+ head.last = waiter_ptr;
+ }
+ } else {
+ // Insert `waiter` to the front
+ waiter.next = Some(head.first);
+ head.first.as_mut().prev = Some(waiter_ptr);
+ head.first = waiter_ptr;
+ }
+ } else {
+ // `waiter` is the only element
+ self.head = Some(ListHead { first: waiter_ptr, last: waiter_ptr });
+ }
+ }
+ }
+
+ /// Given a `Waiter` that was previously inserted to `self`, remove
+ /// it from `self` if it's still there.
+ #[inline]
+ pub unsafe fn remove(&mut self, mut waiter_ptr: NonNull<Waiter>) -> bool {
+ unsafe {
+ let waiter = waiter_ptr.as_mut();
+ if waiter.task != 0 {
+ let head = self.head.as_mut().unwrap();
+
+ match (waiter.prev, waiter.next) {
+ (Some(mut prev), Some(mut next)) => {
+ prev.as_mut().next = Some(next);
+ next.as_mut().prev = Some(prev);
+ }
+ (None, Some(mut next)) => {
+ head.first = next;
+ next.as_mut().prev = None;
+ }
+ (Some(mut prev), None) => {
+ prev.as_mut().next = None;
+ head.last = prev;
+ }
+ (None, None) => {
+ self.head = None;
+ }
+ }
+
+ waiter.task = 0;
+
+ true
+ } else {
+ false
+ }
+ }
+ }
+
+ /// Given a `Waiter` that was previously inserted to `self`, return a
+ /// flag indicating whether it's still in `self`.
+ #[inline]
+ pub unsafe fn is_queued(&self, waiter: NonNull<Waiter>) -> bool {
+ unsafe { waiter.as_ref().task != 0 }
+ }
+
+ #[inline]
+ pub fn pop_front(&mut self) -> Option<abi::ID> {
+ unsafe {
+ let head = self.head.as_mut()?;
+ let waiter = head.first.as_mut();
+
+ // Get the ID
+ let id = replace(&mut waiter.task, 0);
+
+ // Unlink the waiter
+ if let Some(mut next) = waiter.next {
+ head.first = next;
+ next.as_mut().prev = None;
+ } else {
+ self.head = None;
+ }
+
+ Some(id)
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/itron/error.rs b/library/std/src/sys/itron/error.rs
new file mode 100644
index 000000000..830c60d32
--- /dev/null
+++ b/library/std/src/sys/itron/error.rs
@@ -0,0 +1,159 @@
+use crate::{fmt, io::ErrorKind};
+
+use super::abi;
+
+/// Wraps a μITRON error code.
+#[derive(Debug, Copy, Clone)]
+pub struct ItronError {
+ er: abi::ER,
+}
+
+impl ItronError {
+ /// Construct `ItronError` from the specified error code. Returns `None` if the
+ /// error code does not represent a failure or warning.
+ #[inline]
+ pub fn new(er: abi::ER) -> Option<Self> {
+ if er < 0 { Some(Self { er }) } else { None }
+ }
+
+ /// Returns `Ok(er)` if `er` represents a success or `Err(_)` otherwise.
+ #[inline]
+ pub fn err_if_negative(er: abi::ER) -> Result<abi::ER, Self> {
+ if let Some(error) = Self::new(er) { Err(error) } else { Ok(er) }
+ }
+
+ /// Get the raw error code.
+ #[inline]
+ pub fn as_raw(&self) -> abi::ER {
+ self.er
+ }
+}
+
+impl fmt::Display for ItronError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Allow the platforms to extend `error_name`
+ if let Some(name) = crate::sys::error::error_name(self.er) {
+ write!(f, "{} ({})", name, self.er)
+ } else {
+ write!(f, "{}", self.er)
+ }
+ }
+}
+
+/// Describe the specified μITRON error code. Returns `None` if it's an
+/// undefined error code.
+pub fn error_name(er: abi::ER) -> Option<&'static str> {
+ match er {
+ // Success
+ er if er >= 0 => None,
+
+ // μITRON 4.0
+ abi::E_SYS => Some("system error"),
+ abi::E_NOSPT => Some("unsupported function"),
+ abi::E_RSFN => Some("reserved function code"),
+ abi::E_RSATR => Some("reserved attribute"),
+ abi::E_PAR => Some("parameter error"),
+ abi::E_ID => Some("invalid ID number"),
+ abi::E_CTX => Some("context error"),
+ abi::E_MACV => Some("memory access violation"),
+ abi::E_OACV => Some("object access violation"),
+ abi::E_ILUSE => Some("illegal service call use"),
+ abi::E_NOMEM => Some("insufficient memory"),
+ abi::E_NOID => Some("no ID number available"),
+ abi::E_OBJ => Some("object state error"),
+ abi::E_NOEXS => Some("non-existent object"),
+ abi::E_QOVR => Some("queue overflow"),
+ abi::E_RLWAI => Some("forced release from waiting"),
+ abi::E_TMOUT => Some("polling failure or timeout"),
+ abi::E_DLT => Some("waiting object deleted"),
+ abi::E_CLS => Some("waiting object state changed"),
+ abi::E_WBLK => Some("non-blocking code accepted"),
+ abi::E_BOVR => Some("buffer overflow"),
+
+ // The TOPPERS third generation kernels
+ abi::E_NORES => Some("insufficient system resources"),
+ abi::E_RASTER => Some("termination request raised"),
+ abi::E_COMM => Some("communication failure"),
+
+ _ => None,
+ }
+}
+
+pub fn decode_error_kind(er: abi::ER) -> ErrorKind {
+ match er {
+ // Success
+ er if er >= 0 => ErrorKind::Uncategorized,
+
+ // μITRON 4.0
+ // abi::E_SYS
+ abi::E_NOSPT => ErrorKind::Unsupported, // Some("unsupported function"),
+ abi::E_RSFN => ErrorKind::InvalidInput, // Some("reserved function code"),
+ abi::E_RSATR => ErrorKind::InvalidInput, // Some("reserved attribute"),
+ abi::E_PAR => ErrorKind::InvalidInput, // Some("parameter error"),
+ abi::E_ID => ErrorKind::NotFound, // Some("invalid ID number"),
+ // abi::E_CTX
+ abi::E_MACV => ErrorKind::PermissionDenied, // Some("memory access violation"),
+ abi::E_OACV => ErrorKind::PermissionDenied, // Some("object access violation"),
+ // abi::E_ILUSE
+ abi::E_NOMEM => ErrorKind::OutOfMemory, // Some("insufficient memory"),
+ abi::E_NOID => ErrorKind::OutOfMemory, // Some("no ID number available"),
+ // abi::E_OBJ
+ abi::E_NOEXS => ErrorKind::NotFound, // Some("non-existent object"),
+ // abi::E_QOVR
+ abi::E_RLWAI => ErrorKind::Interrupted, // Some("forced release from waiting"),
+ abi::E_TMOUT => ErrorKind::TimedOut, // Some("polling failure or timeout"),
+ // abi::E_DLT
+ // abi::E_CLS
+ // abi::E_WBLK
+ // abi::E_BOVR
+
+ // The TOPPERS third generation kernels
+ abi::E_NORES => ErrorKind::OutOfMemory, // Some("insufficient system resources"),
+ // abi::E_RASTER
+ // abi::E_COMM
+ _ => ErrorKind::Uncategorized,
+ }
+}
+
+/// Similar to `ItronError::err_if_negative(er).expect()` except that, while
+/// panicking, it prints the message to `panic_output` and aborts the program
+/// instead. This ensures the error message is not obscured by double
+/// panicking.
+///
+/// This is useful for diagnosing creation failures of synchronization
+/// primitives that are used by `std`'s internal mechanisms. Such failures
+/// are common when the system is mis-configured to provide a too-small pool for
+/// kernel objects.
+#[inline]
+pub fn expect_success(er: abi::ER, msg: &&str) -> abi::ER {
+ match ItronError::err_if_negative(er) {
+ Ok(x) => x,
+ Err(e) => fail(e, msg),
+ }
+}
+
+/// Similar to `ItronError::err_if_negative(er).expect()` but aborts instead.
+///
+/// Use this where panicking is not allowed or the effect of the failure
+/// would be persistent.
+#[inline]
+pub fn expect_success_aborting(er: abi::ER, msg: &&str) -> abi::ER {
+ match ItronError::err_if_negative(er) {
+ Ok(x) => x,
+ Err(e) => fail_aborting(e, msg),
+ }
+}
+
+#[cold]
+pub fn fail(e: impl fmt::Display, msg: &&str) -> ! {
+ if crate::thread::panicking() {
+ fail_aborting(e, msg)
+ } else {
+ panic!("{} failed: {}", *msg, e)
+ }
+}
+
+#[cold]
+pub fn fail_aborting(e: impl fmt::Display, msg: &&str) -> ! {
+ rtabort!("{} failed: {}", *msg, e)
+}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
new file mode 100644
index 000000000..715e94c3b
--- /dev/null
+++ b/library/std/src/sys/itron/mutex.rs
@@ -0,0 +1,93 @@
+//! Mutex implementation backed by μITRON mutexes. Assumes `acre_mtx` and
+//! `TA_INHERIT` are available.
+use super::{
+ abi,
+ error::{expect_success, expect_success_aborting, fail, ItronError},
+ spin::SpinIdOnceCell,
+};
+
+pub struct Mutex {
+ /// The ID of the underlying mutex object
+ mtx: SpinIdOnceCell<()>,
+}
+
+pub type MovableMutex = Mutex;
+
+/// Create a mutex object. This function never panics.
+fn new_mtx() -> Result<abi::ID, ItronError> {
+ ItronError::err_if_negative(unsafe {
+ abi::acre_mtx(&abi::T_CMTX {
+ // Priority inheritance mutex
+ mtxatr: abi::TA_INHERIT,
+ // Unused
+ ceilpri: 0,
+ })
+ })
+}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex { mtx: SpinIdOnceCell::new() }
+ }
+
+ pub unsafe fn init(&mut self) {
+ // Initialize `self.mtx` eagerly
+ let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
+ unsafe { self.mtx.set_unchecked((id, ())) };
+ }
+
+ /// Get the inner mutex's ID, which is lazily created.
+ fn raw(&self) -> abi::ID {
+ match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {
+ Ok((id, ())) => id,
+ Err(e) => fail(e, &"acre_mtx"),
+ }
+ }
+
+ pub unsafe fn lock(&self) {
+ let mtx = self.raw();
+ expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx");
+ }
+
+ pub unsafe fn unlock(&self) {
+ let mtx = unsafe { self.mtx.get_unchecked().0 };
+ expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx");
+ }
+
+ pub unsafe fn try_lock(&self) -> bool {
+ let mtx = self.raw();
+ match unsafe { abi::ploc_mtx(mtx) } {
+ abi::E_TMOUT => false,
+ er => {
+ expect_success(er, &"ploc_mtx");
+ true
+ }
+ }
+ }
+}
+
+impl Drop for Mutex {
+ fn drop(&mut self) {
+ if let Some(mtx) = self.mtx.get().map(|x| x.0) {
+ expect_success_aborting(unsafe { abi::del_mtx(mtx) }, &"del_mtx");
+ }
+ }
+}
+
+pub(super) struct MutexGuard<'a>(&'a Mutex);
+
+impl<'a> MutexGuard<'a> {
+ #[inline]
+ pub(super) fn lock(x: &'a Mutex) -> Self {
+ unsafe { x.lock() };
+ Self(x)
+ }
+}
+
+impl Drop for MutexGuard<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { self.0.unlock() };
+ }
+}
diff --git a/library/std/src/sys/itron/spin.rs b/library/std/src/sys/itron/spin.rs
new file mode 100644
index 000000000..44d409444
--- /dev/null
+++ b/library/std/src/sys/itron/spin.rs
@@ -0,0 +1,163 @@
+use super::abi;
+use crate::{
+ cell::UnsafeCell,
+ mem::MaybeUninit,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+/// A mutex implemented by `dis_dsp` (for intra-core synchronization) and a
+/// spinlock (for inter-core synchronization).
+pub struct SpinMutex<T = ()> {
+ locked: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+impl<T> SpinMutex<T> {
+ #[inline]
+ pub const fn new(x: T) -> Self {
+ Self { locked: AtomicBool::new(false), data: UnsafeCell::new(x) }
+ }
+
+ /// Acquire a lock.
+ #[inline]
+ pub fn with_locked<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
+ struct SpinMutexGuard<'a>(&'a AtomicBool);
+
+ impl Drop for SpinMutexGuard<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ self.0.store(false, Ordering::Release);
+ unsafe { abi::ena_dsp() };
+ }
+ }
+
+ let _guard;
+ if unsafe { abi::sns_dsp() } == 0 {
+ let er = unsafe { abi::dis_dsp() };
+ debug_assert!(er >= 0);
+
+ // Wait until the current processor acquires a lock.
+ while self.locked.swap(true, Ordering::Acquire) {}
+
+ _guard = SpinMutexGuard(&self.locked);
+ }
+
+ f(unsafe { &mut *self.data.get() })
+ }
+}
+
+/// `OnceCell<(abi::ID, T)>` implemented by `dis_dsp` (for intra-core
+/// synchronization) and a spinlock (for inter-core synchronization).
+///
+/// It's assumed that `0` is not a valid ID, and all kernel
+/// object IDs fall into range `1..=usize::MAX`.
+pub struct SpinIdOnceCell<T = ()> {
+ id: AtomicUsize,
+ spin: SpinMutex<()>,
+ extra: UnsafeCell<MaybeUninit<T>>,
+}
+
+const ID_UNINIT: usize = 0;
+
+impl<T> SpinIdOnceCell<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ id: AtomicUsize::new(ID_UNINIT),
+ extra: UnsafeCell::new(MaybeUninit::uninit()),
+ spin: SpinMutex::new(()),
+ }
+ }
+
+ #[inline]
+ pub fn get(&self) -> Option<(abi::ID, &T)> {
+ match self.id.load(Ordering::Acquire) {
+ ID_UNINIT => None,
+ id => Some((id as abi::ID, unsafe { (&*self.extra.get()).assume_init_ref() })),
+ }
+ }
+
+ #[inline]
+ pub fn get_mut(&mut self) -> Option<(abi::ID, &mut T)> {
+ match *self.id.get_mut() {
+ ID_UNINIT => None,
+ id => Some((id as abi::ID, unsafe { (&mut *self.extra.get()).assume_init_mut() })),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn get_unchecked(&self) -> (abi::ID, &T) {
+ (self.id.load(Ordering::Acquire) as abi::ID, unsafe {
+ (&*self.extra.get()).assume_init_ref()
+ })
+ }
+
+ /// Assign the content without checking if it's already initialized or
+ /// being initialized.
+ pub unsafe fn set_unchecked(&self, (id, extra): (abi::ID, T)) {
+ debug_assert!(self.get().is_none());
+
+ // Assumption: A positive `abi::ID` fits in `usize`.
+ debug_assert!(id >= 0);
+ debug_assert!(usize::try_from(id).is_ok());
+ let id = id as usize;
+
+ unsafe { *self.extra.get() = MaybeUninit::new(extra) };
+ self.id.store(id, Ordering::Release);
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if
+ /// the cell was empty. If the cell was empty and `f` failed, an
+ /// error is returned.
+ ///
+ /// Warning: `f` must not perform a blocking operation, which
+ /// includes panicking.
+ #[inline]
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<(abi::ID, &T), E>
+ where
+ F: FnOnce() -> Result<(abi::ID, T), E>,
+ {
+ // Fast path
+ if let Some(x) = self.get() {
+ return Ok(x);
+ }
+
+ self.initialize(f)?;
+
+ debug_assert!(self.get().is_some());
+
+ // Safety: The inner value has been initialized
+ Ok(unsafe { self.get_unchecked() })
+ }
+
+ fn initialize<F, E>(&self, f: F) -> Result<(), E>
+ where
+ F: FnOnce() -> Result<(abi::ID, T), E>,
+ {
+ self.spin.with_locked(|_| {
+ if self.id.load(Ordering::Relaxed) == ID_UNINIT {
+ let (initialized_id, initialized_extra) = f()?;
+
+ // Assumption: A positive `abi::ID` fits in `usize`.
+ debug_assert!(initialized_id >= 0);
+ debug_assert!(usize::try_from(initialized_id).is_ok());
+ let initialized_id = initialized_id as usize;
+
+ // Store the initialized contents. Use the release ordering to
+ // make sure the write is visible to the callers of `get`.
+ unsafe { *self.extra.get() = MaybeUninit::new(initialized_extra) };
+ self.id.store(initialized_id, Ordering::Release);
+ }
+ Ok(())
+ })
+ }
+}
+
+impl<T> Drop for SpinIdOnceCell<T> {
+ #[inline]
+ fn drop(&mut self) {
+ if self.get_mut().is_some() {
+ unsafe { (&mut *self.extra.get()).assume_init_drop() };
+ }
+ }
+}
diff --git a/library/std/src/sys/itron/task.rs b/library/std/src/sys/itron/task.rs
new file mode 100644
index 000000000..94beb50a2
--- /dev/null
+++ b/library/std/src/sys/itron/task.rs
@@ -0,0 +1,44 @@
+use super::{
+ abi,
+ error::{fail, fail_aborting, ItronError},
+};
+
+use crate::mem::MaybeUninit;
+
+/// Get the ID of the task in Running state. Panics on failure.
+#[inline]
+pub fn current_task_id() -> abi::ID {
+ try_current_task_id().unwrap_or_else(|e| fail(e, &"get_tid"))
+}
+
+/// Get the ID of the task in Running state. Aborts on failure.
+#[inline]
+pub fn current_task_id_aborting() -> abi::ID {
+ try_current_task_id().unwrap_or_else(|e| fail_aborting(e, &"get_tid"))
+}
+
+/// Get the ID of the task in Running state.
+#[inline]
+pub fn try_current_task_id() -> Result<abi::ID, ItronError> {
+ unsafe {
+ let mut out = MaybeUninit::uninit();
+ ItronError::err_if_negative(abi::get_tid(out.as_mut_ptr()))?;
+ Ok(out.assume_init())
+ }
+}
+
+/// Get the specified task's priority. Panics on failure.
+#[inline]
+pub fn task_priority(task: abi::ID) -> abi::PRI {
+ try_task_priority(task).unwrap_or_else(|e| fail(e, &"get_pri"))
+}
+
+/// Get the specified task's priority.
+#[inline]
+pub fn try_task_priority(task: abi::ID) -> Result<abi::PRI, ItronError> {
+ unsafe {
+ let mut out = MaybeUninit::uninit();
+ ItronError::err_if_negative(abi::get_pri(task, out.as_mut_ptr()))?;
+ Ok(out.assume_init())
+ }
+}
diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs
new file mode 100644
index 000000000..d28f57f33
--- /dev/null
+++ b/library/std/src/sys/itron/thread.rs
@@ -0,0 +1,349 @@
+//! Thread implementation backed by μITRON tasks. Assumes `acre_tsk` and
+//! `exd_tsk` are available.
+use super::{
+ abi,
+ error::{expect_success, expect_success_aborting, ItronError},
+ task,
+ time::dur2reltims,
+};
+use crate::{
+ cell::UnsafeCell,
+ ffi::CStr,
+ hint, io,
+ mem::ManuallyDrop,
+ sync::atomic::{AtomicUsize, Ordering},
+ sys::thread_local_dtor::run_dtors,
+ time::Duration,
+};
+
+pub struct Thread {
+ inner: ManuallyDrop<Box<ThreadInner>>,
+
+ /// The ID of the underlying task.
+ task: abi::ID,
+}
+
+/// State data shared between a parent thread and child thread. It's dropped on
+/// a transition to one of the final states.
+struct ThreadInner {
+ /// This field is used on thread creation to pass a closure from
+ /// `Thread::new` to the created task.
+ start: UnsafeCell<ManuallyDrop<Box<dyn FnOnce()>>>,
+
+ /// A state machine. Each transition is annotated with `[...]` in the
+ /// source code.
+ ///
+ /// ```text
+ ///
+ /// <P>: parent, <C>: child, (?): don't-care
+ ///
+ /// DETACHED (-1) --------------------> EXITED (?)
+ /// <C>finish/exd_tsk
+ /// ^
+ /// |
+ /// | <P>detach
+ /// |
+ ///
+ /// INIT (0) -----------------------> FINISHED (-1)
+ /// <C>finish
+ /// | |
+ /// | <P>join/slp_tsk | <P>join/del_tsk
+ /// | | <P>detach/del_tsk
+ /// v v
+ ///
+ /// JOINING JOINED (?)
+ /// (parent_tid)
+ /// ^
+ /// \ /
+ /// \ <C>finish/wup_tsk / <P>slp_tsk-complete/ter_tsk
+ /// \ / & del_tsk
+ /// \ /
+ /// '--> JOIN_FINALIZE ---'
+ /// (-1)
+ ///
+ lifecycle: AtomicUsize,
+}
+
+// Safety: The only `!Sync` field, `ThreadInner::start`, is only touched by
+// the task represented by `ThreadInner`.
+unsafe impl Sync for ThreadInner {}
+
+const LIFECYCLE_INIT: usize = 0;
+const LIFECYCLE_FINISHED: usize = usize::MAX;
+const LIFECYCLE_DETACHED: usize = usize::MAX;
+const LIFECYCLE_JOIN_FINALIZE: usize = usize::MAX;
+const LIFECYCLE_DETACHED_OR_JOINED: usize = usize::MAX;
+const LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE: usize = usize::MAX;
+// there's no single value for `JOINING`
+
+// 64KiB for 32-bit ISAs, 128KiB for 64-bit ISAs.
+pub const DEFAULT_MIN_STACK_SIZE: usize = 0x4000 * crate::mem::size_of::<usize>();
+
+impl Thread {
+ /// # Safety
+ ///
+ /// See `thread::Builder::spawn_unchecked` for safety requirements.
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let inner = Box::new(ThreadInner {
+ start: UnsafeCell::new(ManuallyDrop::new(p)),
+ lifecycle: AtomicUsize::new(LIFECYCLE_INIT),
+ });
+
+ unsafe extern "C" fn trampoline(exinf: isize) {
+ // Safety: `ThreadInner` is alive at this point
+ let inner = unsafe { &*(exinf as *const ThreadInner) };
+
+ // Safety: Since `trampoline` is called only once for each
+ // `ThreadInner` and only `trampoline` touches `start`,
+ // `start` contains contents and is safe to mutably borrow.
+ let p = unsafe { ManuallyDrop::take(&mut *inner.start.get()) };
+ p();
+
+ // Fix the current thread's state just in case, so that the
+ // destructors won't abort
+ // Safety: Not really unsafe
+ let _ = unsafe { abi::unl_cpu() };
+ let _ = unsafe { abi::ena_dsp() };
+
+ // Run TLS destructors now because they are not
+ // called automatically for terminated tasks.
+ unsafe { run_dtors() };
+
+ let old_lifecycle = inner
+ .lifecycle
+ .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::Release);
+
+ match old_lifecycle {
+ LIFECYCLE_DETACHED => {
+ // [DETACHED → EXITED]
+ // No one will ever join, so we'll ask the collector task to
+ // delete the task.
+
+ // In this case, `inner`'s ownership has been moved to us,
+ // And we are responsible for dropping it. The acquire
+ // ordering is not necessary because the parent thread made
+ // no memory access needing synchronization since the call
+ // to `acre_tsk`.
+ // Safety: See above.
+ let _ = unsafe { Box::from_raw(inner as *const _ as *mut ThreadInner) };
+
+ // Safety: There are no pinned references to the stack
+ unsafe { terminate_and_delete_current_task() };
+ }
+ LIFECYCLE_INIT => {
+ // [INIT → FINISHED]
+ // The parent hasn't decided whether to join or detach this
+ // thread yet. Whichever option the parent chooses,
+ // it'll have to delete this task.
+ // Since the parent might drop `*inner` as soon as it sees
+ // `FINISHED`, the release ordering must be used in the
+ // above `swap` call.
+ }
+ parent_tid => {
+ // Since the parent might drop `*inner` and terminate us as
+ // soon as it sees `JOIN_FINALIZE`, the release ordering
+ // must be used in the above `swap` call.
+
+ // [JOINING → JOIN_FINALIZE]
+ // Wake up the parent task.
+ expect_success(
+ unsafe {
+ let mut er = abi::wup_tsk(parent_tid as _);
+ if er == abi::E_QOVR {
+ // `E_QOVR` indicates there's already
+ // a parking token
+ er = abi::E_OK;
+ }
+ er
+ },
+ &"wup_tsk",
+ );
+ }
+ }
+ }
+
+ let inner_ptr = (&*inner) as *const ThreadInner;
+
+ let new_task = ItronError::err_if_negative(unsafe {
+ abi::acre_tsk(&abi::T_CTSK {
+ // Activate this task immediately
+ tskatr: abi::TA_ACT,
+ exinf: inner_ptr as abi::EXINF,
+ // The entry point
+ task: Some(trampoline),
+ // Inherit the calling task's base priority
+ itskpri: abi::TPRI_SELF,
+ stksz: stack,
+ // Let the kernel allocate the stack,
+ stk: crate::ptr::null_mut(),
+ })
+ })
+ .map_err(|e| e.as_io_error())?;
+
+ Ok(Self { inner: ManuallyDrop::new(inner), task: new_task })
+ }
+
+ pub fn yield_now() {
+ expect_success(unsafe { abi::rot_rdq(abi::TPRI_SELF) }, &"rot_rdq");
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ pub fn sleep(dur: Duration) {
+ for timeout in dur2reltims(dur) {
+ expect_success(unsafe { abi::dly_tsk(timeout) }, &"dly_tsk");
+ }
+ }
+
+ pub fn join(mut self) {
+ let inner = &*self.inner;
+ // Get the current task ID. Panicking here would cause a resource leak,
+ // so just abort on failure.
+ let current_task = task::current_task_id_aborting();
+ debug_assert!(usize::try_from(current_task).is_ok());
+ debug_assert_ne!(current_task as usize, LIFECYCLE_INIT);
+ debug_assert_ne!(current_task as usize, LIFECYCLE_DETACHED);
+
+ let current_task = current_task as usize;
+
+ match inner.lifecycle.swap(current_task, Ordering::Acquire) {
+ LIFECYCLE_INIT => {
+ // [INIT → JOINING]
+ // The child task will transition the state to `JOIN_FINALIZE`
+ // and wake us up.
+ loop {
+ expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk");
+ // To synchronize with the child task's memory accesses to
+ // `inner` up to the point of the assignment of
+ // `JOIN_FINALIZE`, `Ordering::Acquire` must be used for the
+ // `load`.
+ if inner.lifecycle.load(Ordering::Acquire) == LIFECYCLE_JOIN_FINALIZE {
+ break;
+ }
+ }
+
+ // [JOIN_FINALIZE → JOINED]
+ }
+ LIFECYCLE_FINISHED => {
+ // [FINISHED → JOINED]
+ // To synchronize with the child task's memory accesses to
+ // `inner` up to the point of the assignment of `FINISHED`,
+ // `Ordering::Acquire` must be used for the above `swap` call`.
+ }
+ _ => unsafe { hint::unreachable_unchecked() },
+ }
+
+ // Terminate and delete the task
+ // Safety: `self.task` still represents a task we own (because this
+ // method or `detach_inner` is called only once for each
+ // `Thread`). The task indicated that it's safe to delete by
+ // entering the `FINISHED` or `JOIN_FINALIZE` state.
+ unsafe { terminate_and_delete_task(self.task) };
+
+ // In either case, we are responsible for dropping `inner`.
+ // Safety: The contents of `self.inner` will not be accessed hereafter
+ let _inner = unsafe { ManuallyDrop::take(&mut self.inner) };
+
+ // Skip the destructor (because it would attempt to detach the thread)
+ crate::mem::forget(self);
+ }
+}
+
+impl Drop for Thread {
+ fn drop(&mut self) {
+ // Detach the thread.
+ match self.inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
+ LIFECYCLE_INIT => {
+ // [INIT → DETACHED]
+ // When the time comes, the child will figure out that no
+ // one will ever join it.
+ // The ownership of `self.inner` is moved to the child thread.
+ // However, the release ordering is not necessary because we
+ // made no memory access needing synchronization since the call
+ // to `acre_tsk`.
+ }
+ LIFECYCLE_FINISHED => {
+ // [FINISHED → JOINED]
+ // The task has already decided that we should delete the task.
+ // To synchronize with the child task's memory accesses to
+ // `inner` up to the point of the assignment of `FINISHED`,
+ // the acquire ordering is required for the above `swap` call.
+
+ // Terminate and delete the task
+ // Safety: `self.task` still represents a task we own (because
+ // this method or `join_inner` is called only once for
+ // each `Thread`). The task indicated that it's safe to
+ // delete by entering the `FINISHED` state.
+ unsafe { terminate_and_delete_task(self.task) };
+
+ // Wwe are responsible for dropping `inner`.
+ // Safety: The contents of `self.inner` will not be accessed
+ // hereafter
+ unsafe { ManuallyDrop::drop(&mut self.inner) };
+ }
+ _ => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
+
+/// Terminate and delete the specified task.
+///
+/// This function will abort if `deleted_task` refers to the calling task.
+///
+/// It is assumed that the specified task is solely managed by the caller -
+/// i.e., other threads must not "resuscitate" the specified task or delete it
+/// prematurely while this function is still in progress. It is allowed for the
+/// specified task to exit by its own.
+///
+/// # Safety
+///
+/// The task must be safe to terminate. This is in general not true
+/// because there might be pinned references to the task's stack.
+unsafe fn terminate_and_delete_task(deleted_task: abi::ID) {
+ // Terminate the task
+ // Safety: Upheld by the caller
+ match unsafe { abi::ter_tsk(deleted_task) } {
+ // Indicates the task is already dormant, ignore it
+ abi::E_OBJ => {}
+ er => {
+ expect_success_aborting(er, &"ter_tsk");
+ }
+ }
+
+ // Delete the task
+ // Safety: Upheld by the caller
+ expect_success_aborting(unsafe { abi::del_tsk(deleted_task) }, &"del_tsk");
+}
+
+/// Terminate and delete the calling task.
+///
+/// Atomicity is not required - i.e., it can be assumed that other threads won't
+/// `ter_tsk` the calling task while this function is still in progress. (This
+/// property makes it easy to implement this operation on μITRON-derived kernels
+/// that don't support `exd_tsk`.)
+///
+/// # Safety
+///
+/// The task must be safe to terminate. This is in general not true
+/// because there might be pinned references to the task's stack.
+unsafe fn terminate_and_delete_current_task() -> ! {
+ expect_success_aborting(unsafe { abi::exd_tsk() }, &"exd_tsk");
+ // Safety: `exd_tsk` never returns on success
+ unsafe { crate::hint::unreachable_unchecked() };
+}
+
+pub fn available_parallelism() -> io::Result<crate::num::NonZeroUsize> {
+ super::unsupported()
+}
diff --git a/library/std/src/sys/itron/time.rs b/library/std/src/sys/itron/time.rs
new file mode 100644
index 000000000..427ea0d80
--- /dev/null
+++ b/library/std/src/sys/itron/time.rs
@@ -0,0 +1,114 @@
+use super::{abi, error::expect_success};
+use crate::{mem::MaybeUninit, time::Duration};
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(abi::SYSTIM);
+
+impl Instant {
+ pub fn now() -> Instant {
+ // Safety: The provided pointer is valid
+ unsafe {
+ let mut out = MaybeUninit::uninit();
+ expect_success(abi::get_tim(out.as_mut_ptr()), &"get_tim");
+ Instant(out.assume_init())
+ }
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.0.checked_sub(other.0).map(|ticks| {
+ // `SYSTIM` is measured in microseconds
+ Duration::from_micros(ticks)
+ })
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ // `SYSTIM` is measured in microseconds
+ let ticks = other.as_micros();
+
+ Some(Instant(self.0.checked_add(ticks.try_into().ok()?)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ // `SYSTIM` is measured in microseconds
+ let ticks = other.as_micros();
+
+ Some(Instant(self.0.checked_sub(ticks.try_into().ok()?)?))
+ }
+}
+
+/// Split `Duration` into zero or more `RELTIM`s.
+#[inline]
+pub fn dur2reltims(dur: Duration) -> impl Iterator<Item = abi::RELTIM> {
+ // `RELTIM` is microseconds
+ let mut ticks = dur.as_micros();
+
+ crate::iter::from_fn(move || {
+ if ticks == 0 {
+ None
+ } else if ticks <= abi::TMAX_RELTIM as u128 {
+ Some(crate::mem::replace(&mut ticks, 0) as abi::RELTIM)
+ } else {
+ ticks -= abi::TMAX_RELTIM as u128;
+ Some(abi::TMAX_RELTIM)
+ }
+ })
+}
+
+/// Split `Duration` into one or more `TMO`s.
+#[inline]
+fn dur2tmos(dur: Duration) -> impl Iterator<Item = abi::TMO> {
+ // `TMO` is microseconds
+ let mut ticks = dur.as_micros();
+ let mut end = false;
+
+ crate::iter::from_fn(move || {
+ if end {
+ None
+ } else if ticks <= abi::TMAX_RELTIM as u128 {
+ end = true;
+ Some(crate::mem::replace(&mut ticks, 0) as abi::TMO)
+ } else {
+ ticks -= abi::TMAX_RELTIM as u128;
+ Some(abi::TMAX_RELTIM)
+ }
+ })
+}
+
+/// Split `Duration` into one or more API calls with timeout.
+#[inline]
+pub fn with_tmos(dur: Duration, mut f: impl FnMut(abi::TMO) -> abi::ER) -> abi::ER {
+ let mut er = abi::E_TMOUT;
+ for tmo in dur2tmos(dur) {
+ er = f(tmo);
+ if er != abi::E_TMOUT {
+ break;
+ }
+ }
+ er
+}
+
+/// Split `Duration` into one or more API calls with timeout. This function can
+/// handle spurious wakeups.
+#[inline]
+pub fn with_tmos_strong(dur: Duration, mut f: impl FnMut(abi::TMO) -> abi::ER) -> abi::ER {
+ // `TMO` and `SYSTIM` are microseconds.
+ // Clamp at `SYSTIM::MAX` for performance reasons. This shouldn't cause
+ // a problem in practice. (`u64::MAX` μs ≈ 584942 years)
+ let ticks = dur.as_micros().min(abi::SYSTIM::MAX as u128) as abi::SYSTIM;
+
+ let start = Instant::now().0;
+ let mut elapsed = 0;
+ let mut er = abi::E_TMOUT;
+ while elapsed <= ticks {
+ er = f(elapsed.min(abi::TMAX_RELTIM as abi::SYSTIM) as abi::TMO);
+ if er != abi::E_TMOUT {
+ break;
+ }
+ elapsed = Instant::now().0.wrapping_sub(start);
+ }
+
+ er
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sys/itron/time/tests.rs b/library/std/src/sys/itron/time/tests.rs
new file mode 100644
index 000000000..d14035d9d
--- /dev/null
+++ b/library/std/src/sys/itron/time/tests.rs
@@ -0,0 +1,33 @@
+use super::*;
+
+fn reltim2dur(t: u64) -> Duration {
+ Duration::from_micros(t)
+}
+
+#[test]
+fn test_dur2reltims() {
+ assert_eq!(dur2reltims(reltim2dur(0)).collect::<Vec<_>>(), vec![]);
+ assert_eq!(dur2reltims(reltim2dur(42)).collect::<Vec<_>>(), vec![42]);
+ assert_eq!(
+ dur2reltims(reltim2dur(abi::TMAX_RELTIM as u64)).collect::<Vec<_>>(),
+ vec![abi::TMAX_RELTIM]
+ );
+ assert_eq!(
+ dur2reltims(reltim2dur(abi::TMAX_RELTIM as u64 + 10000)).collect::<Vec<_>>(),
+ vec![abi::TMAX_RELTIM, 10000]
+ );
+}
+
+#[test]
+fn test_dur2tmos() {
+ assert_eq!(dur2tmos(reltim2dur(0)).collect::<Vec<_>>(), vec![0]);
+ assert_eq!(dur2tmos(reltim2dur(42)).collect::<Vec<_>>(), vec![42]);
+ assert_eq!(
+ dur2tmos(reltim2dur(abi::TMAX_RELTIM as u64)).collect::<Vec<_>>(),
+ vec![abi::TMAX_RELTIM]
+ );
+ assert_eq!(
+ dur2tmos(reltim2dur(abi::TMAX_RELTIM as u64 + 10000)).collect::<Vec<_>>(),
+ vec![abi::TMAX_RELTIM, 10000]
+ );
+}
diff --git a/library/std/src/sys/itron/wait_flag.rs b/library/std/src/sys/itron/wait_flag.rs
new file mode 100644
index 000000000..e432edd20
--- /dev/null
+++ b/library/std/src/sys/itron/wait_flag.rs
@@ -0,0 +1,72 @@
+use crate::mem::MaybeUninit;
+use crate::time::Duration;
+
+use super::{
+ abi,
+ error::{expect_success, fail},
+ time::with_tmos,
+};
+
+const CLEAR: abi::FLGPTN = 0;
+const RAISED: abi::FLGPTN = 1;
+
+/// A thread parking primitive that is not susceptible to race conditions,
+/// but provides no atomic ordering guarantees and allows only one `raise` per wait.
+pub struct WaitFlag {
+ flag: abi::ID,
+}
+
+impl WaitFlag {
+ /// Creates a new wait flag.
+ pub fn new() -> WaitFlag {
+ let flag = expect_success(
+ unsafe {
+ abi::acre_flg(&abi::T_CFLG {
+ flgatr: abi::TA_FIFO | abi::TA_WSGL | abi::TA_CLR,
+ iflgptn: CLEAR,
+ })
+ },
+ &"acre_flg",
+ );
+
+ WaitFlag { flag }
+ }
+
+ /// Wait for the wait flag to be raised.
+ pub fn wait(&self) {
+ let mut token = MaybeUninit::uninit();
+ expect_success(
+ unsafe { abi::wai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr()) },
+ &"wai_flg",
+ );
+ }
+
+ /// Wait for the wait flag to be raised or the timeout to occur.
+ ///
+ /// Returns whether the flag was raised (`true`) or the operation timed out (`false`).
+ pub fn wait_timeout(&self, dur: Duration) -> bool {
+ let mut token = MaybeUninit::uninit();
+ let res = with_tmos(dur, |tmout| unsafe {
+ abi::twai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr(), tmout)
+ });
+
+ match res {
+ abi::E_OK => true,
+ abi::E_TMOUT => false,
+ error => fail(error, &"twai_flg"),
+ }
+ }
+
+ /// Raise the wait flag.
+ ///
+ /// Calls to this function should be balanced with the number of successful waits.
+ pub fn raise(&self) {
+ expect_success(unsafe { abi::set_flg(self.flag, RAISED) }, &"set_flg");
+ }
+}
+
+impl Drop for WaitFlag {
+ fn drop(&mut self) {
+ expect_success(unsafe { abi::del_flg(self.flag) }, &"del_flg");
+ }
+}
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
new file mode 100644
index 000000000..167c918c9
--- /dev/null
+++ b/library/std/src/sys/mod.rs
@@ -0,0 +1,78 @@
+//! Platform-dependent platform abstraction.
+//!
+//! The `std::sys` module is the abstracted interface through which
+//! `std` talks to the underlying operating system. It has different
+//! implementations for different operating system families, today
+//! just Unix and Windows, and initial support for Redox.
+//!
+//! The centralization of platform-specific code in this module is
+//! enforced by the "platform abstraction layer" tidy script in
+//! `tools/tidy/src/pal.rs`.
+//!
+//! This module is closely related to the platform-independent system
+//! integration code in `std::sys_common`. See that module's
+//! documentation for details.
+//!
+//! In the future it would be desirable for the independent
+//! implementations of this module to be extracted to their own crates
+//! that `std` can link to, thus enabling their implementation
+//! out-of-tree via crate replacement. Though due to the complex
+//! inter-dependencies within `std` that will be a challenging goal to
+//! achieve.
+
+#![allow(missing_debug_implementations)]
+
+mod common;
+
+cfg_if::cfg_if! {
+ if #[cfg(unix)] {
+ mod unix;
+ pub use self::unix::*;
+ } else if #[cfg(windows)] {
+ mod windows;
+ pub use self::windows::*;
+ } else if #[cfg(target_os = "solid_asp3")] {
+ mod solid;
+ pub use self::solid::*;
+ } else if #[cfg(target_os = "hermit")] {
+ mod hermit;
+ pub use self::hermit::*;
+ } else if #[cfg(target_os = "wasi")] {
+ mod wasi;
+ pub use self::wasi::*;
+ } else if #[cfg(target_family = "wasm")] {
+ mod wasm;
+ pub use self::wasm::*;
+ } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
+ mod sgx;
+ pub use self::sgx::*;
+ } else {
+ mod unsupported;
+ pub use self::unsupported::*;
+ }
+}
+
+// Import essential modules from platforms used in `std::os` when documenting.
+//
+// Note that on some platforms those modules don't compile
+// (missing things in `libc` which is empty), so they are not included in `std::os` and can be
+// omitted here as well.
+
+#[cfg(doc)]
+#[cfg(not(any(
+ all(target_arch = "wasm32", not(target_os = "wasi")),
+ all(target_vendor = "fortanix", target_env = "sgx")
+)))]
+cfg_if::cfg_if! {
+ if #[cfg(not(windows))] {
+ // On non-Windows platforms (aka linux/osx/etc) pull in a "minimal"
+ // amount of windows goop which ends up compiling
+
+ #[macro_use]
+ #[path = "windows/compat.rs"]
+ pub mod compat;
+
+ #[path = "windows/c.rs"]
+ pub mod c;
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/entry.S b/library/std/src/sys/sgx/abi/entry.S
new file mode 100644
index 000000000..f61bcf06f
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/entry.S
@@ -0,0 +1,372 @@
+/* This symbol is used at runtime to figure out the virtual address that the */
+/* enclave is loaded at. */
+.section absolute
+.global IMAGE_BASE
+IMAGE_BASE:
+
+.section ".note.x86_64-fortanix-unknown-sgx", "", @note
+ .align 4
+ .long 1f - 0f /* name length (not including padding) */
+ .long 3f - 2f /* desc length (not including padding) */
+ .long 1 /* type = NT_VERSION */
+0: .asciz "toolchain-version" /* name */
+1: .align 4
+2: .long 1 /* desc - toolchain version number, 32-bit LE */
+3: .align 4
+
+.section .rodata
+/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
+/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
+/* parts need to have a defined value. In particular: */
+/* */
+/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
+/* RFBM[2] is set, regardless of the value of XSTATE_BV */
+/* * XSAVE header */
+.align 64
+.Lxsave_clear:
+.org .+24
+.Lxsave_mxcsr:
+ .short 0x1f80
+
+/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
+
+/* The following symbols point at read-only data that will be filled in by the */
+/* post-linker. */
+
+/* When using this macro, don't forget to adjust the linker version script! */
+.macro globvar name:req size:req
+ .global \name
+ .protected \name
+ .align \size
+ .size \name , \size
+ \name :
+ .org .+\size
+.endm
+ /* The base address (relative to enclave start) of the heap area */
+ globvar HEAP_BASE 8
+ /* The heap size in bytes */
+ globvar HEAP_SIZE 8
+ /* Value of the RELA entry in the dynamic table */
+ globvar RELA 8
+ /* Value of the RELACOUNT entry in the dynamic table */
+ globvar RELACOUNT 8
+ /* The enclave size in bytes */
+ globvar ENCLAVE_SIZE 8
+ /* The base address (relative to enclave start) of the enclave configuration area */
+ globvar CFGDATA_BASE 8
+ /* Non-zero if debugging is enabled, zero otherwise */
+ globvar DEBUG 1
+ /* The base address (relative to enclave start) of the enclave text section */
+ globvar TEXT_BASE 8
+ /* The size in bytes of enclacve text section */
+ globvar TEXT_SIZE 8
+ /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
+ globvar EH_FRM_HDR_OFFSET 8
+ /* The size in bytes of enclave .eh_frame_hdr section */
+ globvar EH_FRM_HDR_LEN 8
+ /* The base address (relative to enclave start) of the enclave .eh_frame section */
+ globvar EH_FRM_OFFSET 8
+ /* The size in bytes of enclacve .eh_frame section */
+ globvar EH_FRM_LEN 8
+
+.org .Lxsave_clear+512
+.Lxsave_header:
+ .int 0, 0 /* XSTATE_BV */
+ .int 0, 0 /* XCOMP_BV */
+ .org .+48 /* reserved bits */
+
+.data
+.Laborted:
+ .byte 0
+
+/* TCS local storage section */
+.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
+.equ tcsls_flags, 0x08 /* initialized by loader */
+.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
+.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
+/* 14 unused bits */
+.equ tcsls_user_fcw, 0x0a
+.equ tcsls_user_mxcsr, 0x0c
+.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
+.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
+.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
+.equ tcsls_user_rsp, 0x28
+.equ tcsls_user_retip, 0x30
+.equ tcsls_user_rbp, 0x38
+.equ tcsls_user_r12, 0x40
+.equ tcsls_user_r13, 0x48
+.equ tcsls_user_r14, 0x50
+.equ tcsls_user_r15, 0x58
+.equ tcsls_tls_ptr, 0x60
+.equ tcsls_tcs_addr, 0x68
+
+.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
+ .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
+ .abort
+ .endif
+ mov $(1<<tcsls_flag_secondary),%e\reg
+ and %gs:tcsls_flags,%\reg
+.endm
+
+/* We place the ELF entry point in a separate section so it can be removed by
+ elf2sgxs */
+.section .text_no_sgx, "ax"
+.Lelf_entry_error_msg:
+ .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
+.Lelf_entry_error_msg_end:
+
+.global elf_entry
+.type elf_entry,function
+elf_entry:
+/* print error message */
+ movq $2,%rdi /* write to stderr (fd 2) */
+ lea .Lelf_entry_error_msg(%rip),%rsi
+ movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
+.Lelf_entry_call:
+ movq $1,%rax /* write() syscall */
+ syscall
+ test %rax,%rax
+ jle .Lelf_exit /* exit on error */
+ add %rax,%rsi
+ sub %rax,%rdx /* all chars written? */
+ jnz .Lelf_entry_call
+
+.Lelf_exit:
+ movq $60,%rax /* exit() syscall */
+ movq $1,%rdi /* exit code 1 */
+ syscall
+ ud2 /* should not be reached */
+/* end elf_entry */
+
+/* This code needs to be called *after* the enclave stack has been setup. */
+/* There are 3 places where this needs to happen, so this is put in a macro. */
+.macro entry_sanitize_final
+/* Sanitize rflags received from user */
+/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
+/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
+ pushfq
+ andq $~0x40400, (%rsp)
+ popfq
+/* check for abort */
+ bt $0,.Laborted(%rip)
+ jc .Lreentry_panic
+.endm
+
+.text
+.global sgx_entry
+.type sgx_entry,function
+sgx_entry:
+/* save user registers */
+ mov %rcx,%gs:tcsls_user_retip
+ mov %rsp,%gs:tcsls_user_rsp
+ mov %rbp,%gs:tcsls_user_rbp
+ mov %r12,%gs:tcsls_user_r12
+ mov %r13,%gs:tcsls_user_r13
+ mov %r14,%gs:tcsls_user_r14
+ mov %r15,%gs:tcsls_user_r15
+ mov %rbx,%gs:tcsls_tcs_addr
+ stmxcsr %gs:tcsls_user_mxcsr
+ fnstcw %gs:tcsls_user_fcw
+
+/* check for debug buffer pointer */
+ testb $0xff,DEBUG(%rip)
+ jz .Lskip_debug_init
+ mov %r10,%gs:tcsls_debug_panic_buf_ptr
+.Lskip_debug_init:
+/* reset cpu state */
+ mov %rdx, %r10
+ mov $-1, %rax
+ mov $-1, %rdx
+ xrstor .Lxsave_clear(%rip)
+ mov %r10, %rdx
+
+/* check if returning from usercall */
+ mov %gs:tcsls_last_rsp,%r11
+ test %r11,%r11
+ jnz .Lusercall_ret
+/* setup stack */
+ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
+ /* here. This is fixed below under "adjust stack". */
+/* check for thread init */
+ bts $tcsls_flag_init_once,%gs:tcsls_flags
+ jc .Lskip_init
+/* adjust stack */
+ lea IMAGE_BASE(%rip),%rax
+ add %rax,%rsp
+ mov %rsp,%gs:tcsls_tos
+ entry_sanitize_final
+/* call tcs_init */
+/* store caller-saved registers in callee-saved registers */
+ mov %rdi,%rbx
+ mov %rsi,%r12
+ mov %rdx,%r13
+ mov %r8,%r14
+ mov %r9,%r15
+ load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
+ call tcs_init
+/* reload caller-saved registers */
+ mov %rbx,%rdi
+ mov %r12,%rsi
+ mov %r13,%rdx
+ mov %r14,%r8
+ mov %r15,%r9
+ jmp .Lafter_init
+.Lskip_init:
+ entry_sanitize_final
+.Lafter_init:
+/* call into main entry point */
+ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
+ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
+ mov %rax,%rsi /* RSI = return value */
+ /* NOP: mov %rdx,%rdx */ /* RDX = return value */
+ xor %rdi,%rdi /* RDI = normal exit */
+.Lexit:
+/* clear general purpose register state */
+ /* RAX overwritten by ENCLU */
+ /* RBX set later */
+ /* RCX overwritten by ENCLU */
+ /* RDX contains return value */
+ /* RSP set later */
+ /* RBP set later */
+ /* RDI contains exit mode */
+ /* RSI contains return value */
+ xor %r8,%r8
+ xor %r9,%r9
+ xor %r10,%r10
+ xor %r11,%r11
+ /* R12 ~ R15 set by sgx_exit */
+.Lsgx_exit:
+/* clear extended register state */
+ mov %rdx, %rcx /* save RDX */
+ mov $-1, %rax
+ mov %rax, %rdx
+ xrstor .Lxsave_clear(%rip)
+ mov %rcx, %rdx /* restore RDX */
+/* clear flags */
+ pushq $0
+ popfq
+/* restore user registers */
+ mov %gs:tcsls_user_r12,%r12
+ mov %gs:tcsls_user_r13,%r13
+ mov %gs:tcsls_user_r14,%r14
+ mov %gs:tcsls_user_r15,%r15
+ mov %gs:tcsls_user_retip,%rbx
+ mov %gs:tcsls_user_rsp,%rsp
+ mov %gs:tcsls_user_rbp,%rbp
+ fldcw %gs:tcsls_user_fcw
+ ldmxcsr %gs:tcsls_user_mxcsr
+/* exit enclave */
+ mov $0x4,%eax /* EEXIT */
+ enclu
+/* end sgx_entry */
+
+.Lreentry_panic:
+ orq $8,%rsp
+ jmp abort_reentry
+
+/* This *MUST* be called with 6 parameters, otherwise register information */
+/* might leak! */
+.global usercall
+usercall:
+ test %rcx,%rcx /* check `abort` function argument */
+ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
+ jmp .Lusercall_save_state /* non-aborting usercall */
+.Lusercall_abort:
+/* set aborted bit */
+ movb $1,.Laborted(%rip)
+/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
+ testb $0xff,DEBUG(%rip)
+ jz .Lusercall_noreturn
+.Lusercall_save_state:
+/* save callee-saved state */
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+ push %rbp
+ push %rbx
+ sub $8, %rsp
+ fstcw 4(%rsp)
+ stmxcsr (%rsp)
+ movq %rsp,%gs:tcsls_last_rsp
+.Lusercall_noreturn:
+/* clear general purpose register state */
+ /* RAX overwritten by ENCLU */
+ /* RBX set by sgx_exit */
+ /* RCX overwritten by ENCLU */
+ /* RDX contains parameter */
+ /* RSP set by sgx_exit */
+ /* RBP set by sgx_exit */
+ /* RDI contains parameter */
+ /* RSI contains parameter */
+ /* R8 contains parameter */
+ /* R9 contains parameter */
+ xor %r10,%r10
+ xor %r11,%r11
+ /* R12 ~ R15 set by sgx_exit */
+/* extended registers/flags cleared by sgx_exit */
+/* exit */
+ jmp .Lsgx_exit
+.Lusercall_ret:
+ movq $0,%gs:tcsls_last_rsp
+/* restore callee-saved state, cf. "save" above */
+ mov %r11,%rsp
+ ldmxcsr (%rsp)
+ fldcw 4(%rsp)
+ add $8, %rsp
+ entry_sanitize_final
+ pop %rbx
+ pop %rbp
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+/* return */
+ mov %rsi,%rax /* RAX = return value */
+ /* NOP: mov %rdx,%rdx */ /* RDX = return value */
+ pop %r11
+ lfence
+ jmp *%r11
+
+/*
+The following functions need to be defined externally:
+```
+// Called by entry code on re-entry after exit
+extern "C" fn abort_reentry() -> !;
+
+// Called once when a TCS is first entered
+extern "C" fn tcs_init(secondary: bool);
+
+// Standard TCS entrypoint
+extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
+```
+*/
+
+.global get_tcs_addr
+get_tcs_addr:
+ mov %gs:tcsls_tcs_addr,%rax
+ pop %r11
+ lfence
+ jmp *%r11
+
+.global get_tls_ptr
+get_tls_ptr:
+ mov %gs:tcsls_tls_ptr,%rax
+ pop %r11
+ lfence
+ jmp *%r11
+
+.global set_tls_ptr
+set_tls_ptr:
+ mov %rdi,%gs:tcsls_tls_ptr
+ pop %r11
+ lfence
+ jmp *%r11
+
+.global take_debug_panic_buf_ptr
+take_debug_panic_buf_ptr:
+ xor %rax,%rax
+ xchg %gs:tcsls_debug_panic_buf_ptr,%rax
+ pop %r11
+ lfence
+ jmp *%r11
diff --git a/library/std/src/sys/sgx/abi/mem.rs b/library/std/src/sys/sgx/abi/mem.rs
new file mode 100644
index 000000000..18e6d5b3f
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/mem.rs
@@ -0,0 +1,93 @@
+use core::arch::asm;
+
+// Do not remove inline: will result in relocation failure
+#[inline(always)]
+pub(crate) unsafe fn rel_ptr<T>(offset: u64) -> *const T {
+ (image_base() + offset) as *const T
+}
+
+// Do not remove inline: will result in relocation failure
+#[inline(always)]
+pub(crate) unsafe fn rel_ptr_mut<T>(offset: u64) -> *mut T {
+ (image_base() + offset) as *mut T
+}
+
+extern "C" {
+ static ENCLAVE_SIZE: usize;
+ static HEAP_BASE: u64;
+ static HEAP_SIZE: usize;
+}
+
+/// Returns the base memory address of the heap
+pub(crate) fn heap_base() -> *const u8 {
+ unsafe { rel_ptr_mut(HEAP_BASE) }
+}
+
+/// Returns the size of the heap
+pub(crate) fn heap_size() -> usize {
+ unsafe { HEAP_SIZE }
+}
+
+// Do not remove inline: will result in relocation failure
+// For the same reason we use inline ASM here instead of an extern static to
+// locate the base
+/// Returns address at which current enclave is loaded.
+#[inline(always)]
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn image_base() -> u64 {
+ let base: u64;
+ unsafe {
+ asm!(
+ "lea IMAGE_BASE(%rip), {}",
+ lateout(reg) base,
+ options(att_syntax, nostack, preserves_flags, nomem, pure),
+ )
+ };
+ base
+}
+
+/// Returns `true` if the specified memory range is in the enclave.
+///
+/// For safety, this function also checks whether the range given overflows,
+/// returning `false` if so.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn is_enclave_range(p: *const u8, len: usize) -> bool {
+ let start = p as usize;
+
+ // Subtract one from `len` when calculating `end` in case `p + len` is
+ // exactly at the end of addressable memory (`p + len` would overflow, but
+ // the range is still valid).
+ let end = if len == 0 {
+ start
+ } else if let Some(end) = start.checked_add(len - 1) {
+ end
+ } else {
+ return false;
+ };
+
+ let base = image_base() as usize;
+ start >= base && end <= base + (unsafe { ENCLAVE_SIZE } - 1) // unsafe ok: link-time constant
+}
+
+/// Returns `true` if the specified memory range is in userspace.
+///
+/// For safety, this function also checks whether the range given overflows,
+/// returning `false` if so.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn is_user_range(p: *const u8, len: usize) -> bool {
+ let start = p as usize;
+
+ // Subtract one from `len` when calculating `end` in case `p + len` is
+ // exactly at the end of addressable memory (`p + len` would overflow, but
+ // the range is still valid).
+ let end = if len == 0 {
+ start
+ } else if let Some(end) = start.checked_add(len - 1) {
+ end
+ } else {
+ return false;
+ };
+
+ let base = image_base() as usize;
+ end < base || start > base + (unsafe { ENCLAVE_SIZE } - 1) // unsafe ok: link-time constant
+}
diff --git a/library/std/src/sys/sgx/abi/mod.rs b/library/std/src/sys/sgx/abi/mod.rs
new file mode 100644
index 000000000..9508c3874
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/mod.rs
@@ -0,0 +1,108 @@
+#![cfg_attr(test, allow(unused))] // RT initialization logic is not compiled for test
+
+use crate::io::Write;
+use core::arch::global_asm;
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+// runtime features
+pub(super) mod panic;
+mod reloc;
+
+// library features
+pub mod mem;
+pub mod thread;
+pub mod tls;
+#[macro_use]
+pub mod usercalls;
+
+#[cfg(not(test))]
+global_asm!(include_str!("entry.S"), options(att_syntax));
+
+#[repr(C)]
+struct EntryReturn(u64, u64);
+
+#[cfg(not(test))]
+#[no_mangle]
+unsafe extern "C" fn tcs_init(secondary: bool) {
+ // Be very careful when changing this code: it runs before the binary has been
+ // relocated. Any indirect accesses to symbols will likely fail.
+ const UNINIT: usize = 0;
+ const BUSY: usize = 1;
+ const DONE: usize = 2;
+ // Three-state spin-lock
+ static RELOC_STATE: AtomicUsize = AtomicUsize::new(UNINIT);
+
+ if secondary && RELOC_STATE.load(Ordering::Relaxed) != DONE {
+ rtabort!("Entered secondary TCS before main TCS!")
+ }
+
+ // Try to atomically swap UNINIT with BUSY. The returned state can be:
+ match RELOC_STATE.compare_exchange(UNINIT, BUSY, Ordering::Acquire, Ordering::Acquire) {
+ // This thread just obtained the lock and other threads will observe BUSY
+ Ok(_) => {
+ reloc::relocate_elf_rela();
+ RELOC_STATE.store(DONE, Ordering::Release);
+ }
+ // We need to wait until the initialization is done.
+ Err(BUSY) => {
+ while RELOC_STATE.load(Ordering::Acquire) == BUSY {
+ core::hint::spin_loop();
+ }
+ }
+ // Initialization is done.
+ Err(DONE) => {}
+ _ => unreachable!(),
+ }
+}
+
+// FIXME: this item should only exist if this is linked into an executable
+// (main function exists). If this is a library, the crate author should be
+// able to specify this
+#[cfg(not(test))]
+#[no_mangle]
+extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> EntryReturn {
+ // FIXME: how to support TLS in library mode?
+ let tls = Box::new(tls::Tls::new());
+ let tls_guard = unsafe { tls.activate() };
+
+ if secondary {
+ let join_notifier = super::thread::Thread::entry();
+ drop(tls_guard);
+ drop(join_notifier);
+
+ EntryReturn(0, 0)
+ } else {
+ extern "C" {
+ fn main(argc: isize, argv: *const *const u8) -> isize;
+ }
+
+ // check entry is being called according to ABI
+ rtassert!(p3 == 0);
+ rtassert!(p4 == 0);
+ rtassert!(p5 == 0);
+
+ unsafe {
+ // The actual types of these arguments are `p1: *const Arg, p2:
+ // usize`. We can't currently customize the argument list of Rust's
+ // main function, so we pass these in as the standard pointer-sized
+ // values in `argc` and `argv`.
+ let ret = main(p2 as _, p1 as _);
+ exit_with_code(ret)
+ }
+ }
+}
+
+pub(super) fn exit_with_code(code: isize) -> ! {
+ if code != 0 {
+ if let Some(mut out) = panic::SgxPanicOutput::new() {
+ let _ = write!(out, "Exited with status code {code}");
+ }
+ }
+ usercalls::exit(code != 0);
+}
+
+#[cfg(not(test))]
+#[no_mangle]
+extern "C" fn abort_reentry() -> ! {
+ usercalls::exit(false)
+}
diff --git a/library/std/src/sys/sgx/abi/panic.rs b/library/std/src/sys/sgx/abi/panic.rs
new file mode 100644
index 000000000..229b3b329
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/panic.rs
@@ -0,0 +1,42 @@
+use super::usercalls::alloc::UserRef;
+use crate::cmp;
+use crate::io::{self, Write};
+use crate::mem;
+
+extern "C" {
+ fn take_debug_panic_buf_ptr() -> *mut u8;
+ static DEBUG: u8;
+}
+
+pub(crate) struct SgxPanicOutput(Option<&'static mut UserRef<[u8]>>);
+
+fn empty_user_slice() -> &'static mut UserRef<[u8]> {
+ unsafe { UserRef::from_raw_parts_mut(1 as *mut u8, 0) }
+}
+
+impl SgxPanicOutput {
+ pub(crate) fn new() -> Option<Self> {
+ if unsafe { DEBUG == 0 } { None } else { Some(SgxPanicOutput(None)) }
+ }
+
+ fn init(&mut self) -> &mut &'static mut UserRef<[u8]> {
+ self.0.get_or_insert_with(|| unsafe {
+ let ptr = take_debug_panic_buf_ptr();
+ if ptr.is_null() { empty_user_slice() } else { UserRef::from_raw_parts_mut(ptr, 1024) }
+ })
+ }
+}
+
+impl Write for SgxPanicOutput {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ let dst = mem::replace(self.init(), empty_user_slice());
+ let written = cmp::min(src.len(), dst.len());
+ dst[..written].copy_from_enclave(&src[..written]);
+ self.0 = Some(&mut dst[written..]);
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/reloc.rs b/library/std/src/sys/sgx/abi/reloc.rs
new file mode 100644
index 000000000..02dff0ad2
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/reloc.rs
@@ -0,0 +1,32 @@
+use super::mem;
+use crate::slice::from_raw_parts;
+
+const R_X86_64_RELATIVE: u32 = 8;
+
+#[repr(packed)]
+struct Rela<T> {
+ offset: T,
+ info: T,
+ addend: T,
+}
+
+pub fn relocate_elf_rela() {
+ extern "C" {
+ static RELA: u64;
+ static RELACOUNT: usize;
+ }
+
+ if unsafe { RELACOUNT } == 0 {
+ return;
+ } // unsafe ok: link-time constant
+
+ let relas = unsafe {
+ from_raw_parts::<Rela<u64>>(mem::rel_ptr(RELA), RELACOUNT) // unsafe ok: link-time constant
+ };
+ for rela in relas {
+ if rela.info != (/*0 << 32 |*/R_X86_64_RELATIVE as u64) {
+ rtabort!("Invalid relocation");
+ }
+ unsafe { *mem::rel_ptr_mut::<*const ()>(rela.offset) = mem::rel_ptr(rela.addend) };
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/thread.rs b/library/std/src/sys/sgx/abi/thread.rs
new file mode 100644
index 000000000..ef55b821a
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/thread.rs
@@ -0,0 +1,13 @@
+use fortanix_sgx_abi::Tcs;
+
+/// Gets the ID for the current thread. The ID is guaranteed to be unique among
+/// all currently running threads in the enclave, and it is guaranteed to be
+/// constant for the lifetime of the thread. More specifically for SGX, there
+/// is a one-to-one correspondence of the ID to the address of the TCS.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn current() -> Tcs {
+ extern "C" {
+ fn get_tcs_addr() -> Tcs;
+ }
+ unsafe { get_tcs_addr() }
+}
diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs
new file mode 100644
index 000000000..13d96e9a6
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/tls/mod.rs
@@ -0,0 +1,132 @@
+mod sync_bitset;
+
+use self::sync_bitset::*;
+use crate::cell::Cell;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+
+#[cfg(target_pointer_width = "64")]
+const USIZE_BITS: usize = 64;
+const TLS_KEYS: usize = 128; // Same as POSIX minimum
+const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
+
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"]
+static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
+macro_rules! dup {
+ ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
+ (() $($val:tt)*) => ([$($val),*])
+}
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"]
+static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0)));
+
+extern "C" {
+ fn get_tls_ptr() -> *const u8;
+ fn set_tls_ptr(tls: *const u8);
+}
+
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub struct Key(NonZeroUsize);
+
+impl Key {
+ fn to_index(self) -> usize {
+ self.0.get() - 1
+ }
+
+ fn from_index(index: usize) -> Self {
+ Key(NonZeroUsize::new(index + 1).unwrap())
+ }
+
+ pub fn as_usize(self) -> usize {
+ self.0.get()
+ }
+
+ pub fn from_usize(index: usize) -> Self {
+ Key(NonZeroUsize::new(index).unwrap())
+ }
+}
+
+#[repr(C)]
+pub struct Tls {
+ data: [Cell<*mut u8>; TLS_KEYS],
+}
+
+pub struct ActiveTls<'a> {
+ tls: &'a Tls,
+}
+
+impl<'a> Drop for ActiveTls<'a> {
+ fn drop(&mut self) {
+ let value_with_destructor = |key: usize| {
+ let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed);
+ unsafe { mem::transmute::<_, Option<unsafe extern "C" fn(*mut u8)>>(ptr) }
+ .map(|dtor| (&self.tls.data[key], dtor))
+ };
+
+ let mut any_non_null_dtor = true;
+ while any_non_null_dtor {
+ any_non_null_dtor = false;
+ for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) {
+ let value = value.replace(ptr::null_mut());
+ if !value.is_null() {
+ any_non_null_dtor = true;
+ unsafe { dtor(value) }
+ }
+ }
+ }
+ }
+}
+
+impl Tls {
+ pub fn new() -> Tls {
+ Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
+ }
+
+ pub unsafe fn activate(&self) -> ActiveTls<'_> {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { set_tls_ptr(self as *const Tls as _) };
+ ActiveTls { tls: self }
+ }
+
+ #[allow(unused)]
+ pub unsafe fn activate_persistent(self: Box<Self>) {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { set_tls_ptr((&*self) as *const Tls as _) };
+ mem::forget(self);
+ }
+
+ unsafe fn current<'a>() -> &'a Tls {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { &*(get_tls_ptr() as *const Tls) }
+ }
+
+ pub fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ let index = if let Some(index) = TLS_KEY_IN_USE.set() {
+ index
+ } else {
+ rtabort!("TLS limit exceeded")
+ };
+ TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
+ Key::from_index(index)
+ }
+
+ pub fn set(key: Key, value: *mut u8) {
+ let index = key.to_index();
+ rtassert!(TLS_KEY_IN_USE.get(index));
+ unsafe { Self::current() }.data[index].set(value);
+ }
+
+ pub fn get(key: Key) -> *mut u8 {
+ let index = key.to_index();
+ rtassert!(TLS_KEY_IN_USE.get(index));
+ unsafe { Self::current() }.data[index].get()
+ }
+
+ pub fn destroy(key: Key) {
+ TLS_KEY_IN_USE.clear(key.to_index());
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/tls/sync_bitset.rs b/library/std/src/sys/sgx/abi/tls/sync_bitset.rs
new file mode 100644
index 000000000..4eeff8f6e
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/tls/sync_bitset.rs
@@ -0,0 +1,85 @@
+#[cfg(test)]
+mod tests;
+
+use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS};
+use crate::iter::{Enumerate, Peekable};
+use crate::slice::Iter;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+
+/// A bitset that can be used synchronously.
+pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]);
+
+pub(super) const SYNC_BITSET_INIT: SyncBitset =
+ SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]);
+
+impl SyncBitset {
+ pub fn get(&self, index: usize) -> bool {
+ let (hi, lo) = Self::split(index);
+ (self.0[hi].load(Ordering::Relaxed) & lo) != 0
+ }
+
+ /// Not atomic.
+ pub fn iter(&self) -> SyncBitsetIter<'_> {
+ SyncBitsetIter { iter: self.0.iter().enumerate().peekable(), elem_idx: 0 }
+ }
+
+ pub fn clear(&self, index: usize) {
+ let (hi, lo) = Self::split(index);
+ self.0[hi].fetch_and(!lo, Ordering::Relaxed);
+ }
+
+ /// Sets any unset bit. Not atomic. Returns `None` if all bits were
+ /// observed to be set.
+ pub fn set(&self) -> Option<usize> {
+ 'elems: for (idx, elem) in self.0.iter().enumerate() {
+ let mut current = elem.load(Ordering::Relaxed);
+ loop {
+ if 0 == !current {
+ continue 'elems;
+ }
+ let trailing_ones = (!current).trailing_zeros() as usize;
+ match elem.compare_exchange(
+ current,
+ current | (1 << trailing_ones),
+ Ordering::AcqRel,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => return Some(idx * USIZE_BITS + trailing_ones),
+ Err(previous) => current = previous,
+ }
+ }
+ }
+ None
+ }
+
+ fn split(index: usize) -> (usize, usize) {
+ (index / USIZE_BITS, 1 << (index % USIZE_BITS))
+ }
+}
+
+pub(super) struct SyncBitsetIter<'a> {
+ iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>,
+ elem_idx: usize,
+}
+
+impl<'a> Iterator for SyncBitsetIter<'a> {
+ type Item = usize;
+
+ fn next(&mut self) -> Option<usize> {
+ self.iter.peek().cloned().and_then(|(idx, elem)| {
+ let elem = elem.load(Ordering::Relaxed);
+ let low_mask = (1 << self.elem_idx) - 1;
+ let next = elem & !low_mask;
+ let next_idx = next.trailing_zeros() as usize;
+ self.elem_idx = next_idx + 1;
+ if self.elem_idx >= 64 {
+ self.elem_idx = 0;
+ self.iter.next();
+ }
+ match next_idx {
+ 64 => self.next(),
+ _ => Some(idx * USIZE_BITS + next_idx),
+ }
+ })
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs b/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs
new file mode 100644
index 000000000..d7eb2e139
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/tls/sync_bitset/tests.rs
@@ -0,0 +1,25 @@
+use super::*;
+
+fn test_data(bitset: [usize; 2], bit_indices: &[usize]) {
+ let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]);
+ assert_eq!(set.iter().collect::<Vec<_>>(), bit_indices);
+ for &i in bit_indices {
+ assert!(set.get(i));
+ }
+}
+
+#[test]
+fn iter() {
+ test_data([0b0110_1001, 0], &[0, 3, 5, 6]);
+ test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]);
+ test_data([0, 0], &[]);
+}
+
+#[test]
+fn set_get_clear() {
+ let set = SYNC_BITSET_INIT;
+ let key = set.set().unwrap();
+ assert!(set.get(key));
+ set.clear(key);
+ assert!(!set.get(key));
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
new file mode 100644
index 000000000..ea24fedd0
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -0,0 +1,732 @@
+#![allow(unused)]
+
+use crate::arch::asm;
+use crate::cell::UnsafeCell;
+use crate::cmp;
+use crate::convert::TryInto;
+use crate::mem;
+use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
+use crate::ptr::{self, NonNull};
+use crate::slice;
+use crate::slice::SliceIndex;
+
+use super::super::mem::{is_enclave_range, is_user_range};
+use fortanix_sgx_abi::*;
+
+/// A type that can be safely read from or written to userspace.
+///
+/// Non-exhaustive list of specific requirements for reading and writing:
+/// * **Type is `Copy`** (and therefore also not `Drop`). Copies will be
+/// created when copying from/to userspace. Destructors will not be called.
+/// * **No references or Rust-style owned pointers** (`Vec`, `Arc`, etc.). When
+/// reading from userspace, references into enclave memory must not be
+/// created. Also, only enclave memory is considered managed by the Rust
+/// compiler's static analysis. When reading from userspace, there can be no
+/// guarantee that the value correctly adheres to the expectations of the
+/// type. When writing to userspace, memory addresses of data in enclave
+/// memory must not be leaked for confidentiality reasons. `User` and
+/// `UserRef` are also not allowed for the same reasons.
+/// * **No fat pointers.** When reading from userspace, the size or vtable
+/// pointer could be automatically interpreted and used by the code. When
+/// writing to userspace, memory addresses of data in enclave memory (such
+/// as vtable pointers) must not be leaked for confidentiality reasons.
+///
+/// Non-exhaustive list of specific requirements for reading from userspace:
+/// * **Any bit pattern is valid** for this type (no `enum`s). There can be no
+/// guarantee that the value correctly adheres to the expectations of the
+/// type, so any value must be valid for this type.
+///
+/// Non-exhaustive list of specific requirements for writing to userspace:
+/// * **No pointers to enclave memory.** Memory addresses of data in enclave
+/// memory must not be leaked for confidentiality reasons.
+/// * **No internal padding.** Padding might contain previously-initialized
+/// secret data stored at that memory location and must not be leaked for
+/// confidentiality reasons.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub unsafe trait UserSafeSized: Copy + Sized {}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for u8 {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl<T> UserSafeSized for FifoDescriptor<T> {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for ByteBuffer {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for Usercall {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for Return {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
+
+/// A type that can be represented in memory as one or more `UserSafeSized`s.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub unsafe trait UserSafe {
+ /// Equivalent to `mem::align_of::<Self>`.
+ fn align_of() -> usize;
+
+ /// Construct a pointer to `Self` given a memory range in user space.
+ ///
+ /// N.B., this takes a size, not a length!
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure the memory range is in user memory, is the
+ /// correct size and is correctly aligned and points to the right type.
+ unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self;
+
+ /// Construct a pointer to `Self` given a memory range.
+ ///
+ /// N.B., this takes a size, not a length!
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure the memory range points to the correct type.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * the pointer is not aligned.
+ /// * the pointer is null.
+ /// * the pointed-to range does not fit in the address space.
+ /// * the pointed-to range is not in user memory.
+ unsafe fn from_raw_sized(ptr: *mut u8, size: usize) -> NonNull<Self> {
+ assert!(ptr.wrapping_add(size) >= ptr);
+ // SAFETY: The caller has guaranteed the pointer is valid
+ let ret = unsafe { Self::from_raw_sized_unchecked(ptr, size) };
+ unsafe {
+ Self::check_ptr(ret);
+ NonNull::new_unchecked(ret as _)
+ }
+ }
+
+ /// Checks if a pointer may point to `Self` in user memory.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure the memory range points to the correct type and
+ /// length (if this is a slice).
+ ///
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * the pointer is not aligned.
+ /// * the pointer is null.
+ /// * the pointed-to range is not in user memory.
+ unsafe fn check_ptr(ptr: *const Self) {
+ let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) };
+
+ assert!(is_aligned(ptr as *const u8));
+ assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr })));
+ assert!(!ptr.is_null());
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl<T: UserSafeSized> UserSafe for T {
+ fn align_of() -> usize {
+ mem::align_of::<T>()
+ }
+
+ unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
+ assert_eq!(size, mem::size_of::<T>());
+ ptr as _
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl<T: UserSafeSized> UserSafe for [T] {
+ fn align_of() -> usize {
+ mem::align_of::<T>()
+ }
+
+ /// # Safety
+ /// Behavior is undefined if any of these conditions are violated:
+ /// * `ptr` must be [valid] for writes of `size` many bytes, and it must be
+ /// properly aligned.
+ ///
+ /// [valid]: core::ptr#safety
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * the element size is not a factor of the size
+ unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self {
+ let elem_size = mem::size_of::<T>();
+ assert_eq!(size % elem_size, 0);
+ let len = size / elem_size;
+ // SAFETY: The caller must uphold the safety contract for `from_raw_sized_unchecked`
+ unsafe { slice::from_raw_parts_mut(ptr as _, len) }
+ }
+}
+
+/// A reference to some type in userspace memory. `&UserRef<T>` is equivalent
+/// to `&T` in enclave memory. Access to the memory is only allowed by copying
+/// to avoid TOCTTOU issues. After copying, code should make sure to completely
+/// check the value before use.
+///
+/// It is also possible to obtain a mutable reference `&mut UserRef<T>`. Unlike
+/// regular mutable references, these are not exclusive. Userspace may always
+/// write to the backing memory at any time, so it can't be assumed that there
+/// the pointed-to memory is uniquely borrowed. The two different reference types
+/// are used solely to indicate intent: a mutable reference is for writing to
+/// user memory, an immutable reference for reading from user memory.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct UserRef<T: ?Sized>(UnsafeCell<T>);
+/// An owned type in userspace memory. `User<T>` is equivalent to `Box<T>` in
+/// enclave memory. Access to the memory is only allowed by copying to avoid
+/// TOCTTOU issues. The user memory will be freed when the value is dropped.
+/// After copying, code should make sure to completely check the value before
+/// use.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct User<T: UserSafe + ?Sized>(NonNull<UserRef<T>>);
+
+trait NewUserRef<T: ?Sized> {
+ unsafe fn new_userref(v: T) -> Self;
+}
+
+impl<T: ?Sized> NewUserRef<*mut T> for NonNull<UserRef<T>> {
+ unsafe fn new_userref(v: *mut T) -> Self {
+ // SAFETY: The caller has guaranteed the pointer is valid
+ unsafe { NonNull::new_unchecked(v as _) }
+ }
+}
+
+impl<T: ?Sized> NewUserRef<NonNull<T>> for NonNull<UserRef<T>> {
+ unsafe fn new_userref(v: NonNull<T>) -> Self {
+ // SAFETY: The caller has guaranteed the pointer is valid
+ unsafe { NonNull::new_userref(v.as_ptr()) }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: ?Sized> User<T>
+where
+ T: UserSafe,
+{
+ // This function returns memory that is practically uninitialized, but is
+ // not considered "unspecified" or "undefined" for purposes of an
+ // optimizing compiler. This is achieved by returning a pointer from
+ // from outside as obtained by `super::alloc`.
+ fn new_uninit_bytes(size: usize) -> Self {
+ unsafe {
+ // Mustn't call alloc with size 0.
+ let ptr = if size > 0 {
+ // `copy_to_userspace` is more efficient when data is 8-byte aligned
+ let alignment = cmp::max(T::align_of(), 8);
+ rtunwrap!(Ok, super::alloc(size, alignment)) as _
+ } else {
+ T::align_of() as _ // dangling pointer ok for size 0
+ };
+ if let Ok(v) = crate::panic::catch_unwind(|| T::from_raw_sized(ptr, size)) {
+ User(NonNull::new_userref(v))
+ } else {
+ rtabort!("Got invalid pointer from alloc() usercall")
+ }
+ }
+ }
+
+ /// Copies `val` into freshly allocated space in user memory.
+ pub fn new_from_enclave(val: &T) -> Self {
+ unsafe {
+ let mut user = Self::new_uninit_bytes(mem::size_of_val(val));
+ user.copy_from_enclave(val);
+ user
+ }
+ }
+
+ /// Creates an owned `User<T>` from a raw pointer.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `T`, is freeable with the `free`
+ /// usercall and the alignment of `T`, and is uniquely owned.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_raw(ptr: *mut T) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `from_raw`.
+ unsafe { T::check_ptr(ptr) };
+ User(unsafe { NonNull::new_userref(ptr) })
+ }
+
+ /// Converts this value into a raw pointer. The value will no longer be
+ /// automatically freed.
+ pub fn into_raw(self) -> *mut T {
+ let ret = self.0;
+ mem::forget(self);
+ ret.as_ptr() as _
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T> User<T>
+where
+ T: UserSafe,
+{
+ /// Allocate space for `T` in user memory.
+ pub fn uninitialized() -> Self {
+ Self::new_uninit_bytes(mem::size_of::<T>())
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T> User<[T]>
+where
+ [T]: UserSafe,
+{
+ /// Allocate space for a `[T]` of `n` elements in user memory.
+ pub fn uninitialized(n: usize) -> Self {
+ Self::new_uninit_bytes(n * mem::size_of::<T>())
+ }
+
+ /// Creates an owned `User<[T]>` from a raw thin pointer and a slice length.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `len` elements of `T`, is
+ /// freeable with the `free` usercall and the alignment of `T`, and is
+ /// uniquely owned.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range does not fit in the address space
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self {
+ User(unsafe {
+ NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()))
+ })
+ }
+}
+
+/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
+///
+/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
+/// - preceded by the VERW instruction and followed by the MFENCE; LFENCE instruction sequence
+/// - or are in multiples of 8 bytes, aligned to an 8-byte boundary
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in enclave memory
+/// * The `dst` memory range is not in user memory
+///
+/// # References
+/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html
+/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2
+pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ unsafe {
+ let mut seg_sel: u16 = 0;
+ for off in 0..len {
+ asm!("
+ mov %ds, ({seg_sel})
+ verw ({seg_sel})
+ movb {val}, ({dst})
+ mfence
+ lfence
+ ",
+ val = in(reg_byte) *src.offset(off as isize),
+ dst = in(reg) dst.offset(off as isize),
+ seg_sel = in(reg) &mut seg_sel,
+ options(nostack, att_syntax)
+ );
+ }
+ }
+ }
+
+ unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ unsafe {
+ asm!(
+ "rep movsq (%rsi), (%rdi)",
+ inout("rcx") len / 8 => _,
+ inout("rdi") dst => _,
+ inout("rsi") src => _,
+ options(att_syntax, nostack, preserves_flags)
+ );
+ }
+ }
+ assert!(!src.is_null());
+ assert!(!dst.is_null());
+ assert!(is_enclave_range(src, len));
+ assert!(is_user_range(dst, len));
+ assert!(len < isize::MAX as usize);
+ assert!(!(src as usize).overflowing_add(len).1);
+ assert!(!(dst as usize).overflowing_add(len).1);
+
+ if len < 8 {
+ // Can't align on 8 byte boundary: copy safely byte per byte
+ unsafe {
+ copy_bytewise_to_userspace(src, dst, len);
+ }
+ } else if len % 8 == 0 && dst as usize % 8 == 0 {
+ // Copying 8-byte aligned quadwords: copy quad word per quad word
+ unsafe {
+ copy_aligned_quadwords_to_userspace(src, dst, len);
+ }
+ } else {
+ // Split copies into three parts:
+ // +--------+
+ // | small0 | Chunk smaller than 8 bytes
+ // +--------+
+ // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
+ // +--------+
+ // | small1 | Chunk smaller than 8 bytes
+ // +--------+
+
+ unsafe {
+ // Copy small0
+ let small0_size = (8 - dst as usize % 8) as u8;
+ let small0_src = src;
+ let small0_dst = dst;
+ copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+
+ // Copy big
+ let small1_size = ((len - small0_size as usize) % 8) as u8;
+ let big_size = len - small0_size as usize - small1_size as usize;
+ let big_src = src.offset(small0_size as _);
+ let big_dst = dst.offset(small0_size as _);
+ copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+
+ // Copy small1
+ let small1_src = src.offset(big_size as isize + small0_size as isize);
+ let small1_dst = dst.offset(big_size as isize + small0_size as isize);
+ copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+ }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: ?Sized> UserRef<T>
+where
+ T: UserSafe,
+{
+ /// Creates a `&UserRef<[T]>` from a raw pointer.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `T`.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_ptr<'a>(ptr: *const T) -> &'a Self {
+ // SAFETY: The caller must uphold the safety contract for `from_ptr`.
+ unsafe { T::check_ptr(ptr) };
+ unsafe { &*(ptr as *const Self) }
+ }
+
+ /// Creates a `&mut UserRef<[T]>` from a raw pointer. See the struct
+ /// documentation for the nuances regarding a `&mut UserRef<T>`.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `T`.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_mut_ptr<'a>(ptr: *mut T) -> &'a mut Self {
+ // SAFETY: The caller must uphold the safety contract for `from_mut_ptr`.
+ unsafe { T::check_ptr(ptr) };
+ unsafe { &mut *(ptr as *mut Self) }
+ }
+
+ /// Copies `val` into user memory.
+ ///
+ /// # Panics
+ /// This function panics if the destination doesn't have the same size as
+ /// the source. This can happen for dynamically-sized types such as slices.
+ pub fn copy_from_enclave(&mut self, val: &T) {
+ unsafe {
+ assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get()));
+ copy_to_userspace(
+ val as *const T as *const u8,
+ self.0.get() as *mut T as *mut u8,
+ mem::size_of_val(val),
+ );
+ }
+ }
+
+ /// Copies the value from user memory and place it into `dest`.
+ ///
+ /// # Panics
+ /// This function panics if the destination doesn't have the same size as
+ /// the source. This can happen for dynamically-sized types such as slices.
+ pub fn copy_to_enclave(&self, dest: &mut T) {
+ unsafe {
+ assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
+ ptr::copy(
+ self.0.get() as *const T as *const u8,
+ dest as *mut T as *mut u8,
+ mem::size_of_val(dest),
+ );
+ }
+ }
+
+ /// Obtain a raw pointer from this reference.
+ pub fn as_raw_ptr(&self) -> *const T {
+ self as *const _ as _
+ }
+
+ /// Obtain a raw pointer from this reference.
+ pub fn as_raw_mut_ptr(&mut self) -> *mut T {
+ self as *mut _ as _
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T> UserRef<T>
+where
+ T: UserSafe,
+{
+ /// Copies the value from user memory into enclave memory.
+ pub fn to_enclave(&self) -> T {
+ unsafe { ptr::read(self.0.get()) }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T> UserRef<[T]>
+where
+ [T]: UserSafe,
+{
+ /// Creates a `&UserRef<[T]>` from a raw thin pointer and a slice length.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `n` elements of `T`.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range does not fit in the address space
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self {
+ // SAFETY: The caller must uphold the safety contract for `from_raw_parts`.
+ unsafe {
+ &*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *const Self)
+ }
+ }
+
+ /// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length.
+ /// See the struct documentation for the nuances regarding a
+ /// `&mut UserRef<T>`.
+ ///
+ /// # Safety
+ /// The caller must ensure `ptr` points to `n` elements of `T`.
+ ///
+ /// # Panics
+ /// This function panics if:
+ ///
+ /// * The pointer is not aligned
+ /// * The pointer is null
+ /// * The pointed-to range does not fit in the address space
+ /// * The pointed-to range is not in user memory
+ pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self {
+ // SAFETY: The caller must uphold the safety contract for `from_raw_parts_mut`.
+ unsafe {
+ &mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *mut Self)
+ }
+ }
+
+ /// Obtain a raw pointer to the first element of this user slice.
+ pub fn as_ptr(&self) -> *const T {
+ self.0.get() as _
+ }
+
+ /// Obtain a raw pointer to the first element of this user slice.
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self.0.get() as _
+ }
+
+ /// Obtain the number of elements in this user slice.
+ pub fn len(&self) -> usize {
+ unsafe { (*self.0.get()).len() }
+ }
+
+ /// Copies the value from user memory and place it into `dest`. Afterwards,
+ /// `dest` will contain exactly `self.len()` elements.
+ ///
+ /// # Panics
+ /// This function panics if the destination doesn't have the same size as
+ /// the source. This can happen for dynamically-sized types such as slices.
+ pub fn copy_to_enclave_vec(&self, dest: &mut Vec<T>) {
+ if let Some(missing) = self.len().checked_sub(dest.capacity()) {
+ dest.reserve(missing)
+ }
+ // SAFETY: We reserve enough space above.
+ unsafe { dest.set_len(self.len()) };
+ self.copy_to_enclave(&mut dest[..]);
+ }
+
+ /// Copies the value from user memory into a vector in enclave memory.
+ pub fn to_enclave(&self) -> Vec<T> {
+ let mut ret = Vec::with_capacity(self.len());
+ self.copy_to_enclave_vec(&mut ret);
+ ret
+ }
+
+ /// Returns an iterator over the slice.
+ pub fn iter(&self) -> Iter<'_, T>
+ where
+ T: UserSafe, // FIXME: should be implied by [T]: UserSafe?
+ {
+ unsafe { Iter((&*self.as_raw_ptr()).iter()) }
+ }
+
+ /// Returns an iterator that allows modifying each value.
+ pub fn iter_mut(&mut self) -> IterMut<'_, T>
+ where
+ T: UserSafe, // FIXME: should be implied by [T]: UserSafe?
+ {
+ unsafe { IterMut((&mut *self.as_raw_mut_ptr()).iter_mut()) }
+ }
+}
+
+/// Immutable user slice iterator
+///
+/// This struct is created by the `iter` method on `UserRef<[T]>`.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct Iter<'a, T: 'a + UserSafe>(slice::Iter<'a, T>);
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<'a, T: UserSafe> Iterator for Iter<'a, T> {
+ type Item = &'a UserRef<T>;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ unsafe { self.0.next().map(|e| UserRef::from_ptr(e)) }
+ }
+}
+
+/// Mutable user slice iterator
+///
+/// This struct is created by the `iter_mut` method on `UserRef<[T]>`.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub struct IterMut<'a, T: 'a + UserSafe>(slice::IterMut<'a, T>);
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<'a, T: UserSafe> Iterator for IterMut<'a, T> {
+ type Item = &'a mut UserRef<T>;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ unsafe { self.0.next().map(|e| UserRef::from_mut_ptr(e)) }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: ?Sized> Deref for User<T>
+where
+ T: UserSafe,
+{
+ type Target = UserRef<T>;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.0.as_ptr() }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: ?Sized> DerefMut for User<T>
+where
+ T: UserSafe,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.0.as_ptr() }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: ?Sized> Drop for User<T>
+where
+ T: UserSafe,
+{
+ fn drop(&mut self) {
+ unsafe {
+ let ptr = (*self.0.as_ptr()).0.get();
+ super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of());
+ }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<UserRef<U>> for UserRef<T> {}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T, I> Index<I> for UserRef<[T]>
+where
+ [T]: UserSafe,
+ I: SliceIndex<[T]>,
+ I::Output: UserSafe,
+{
+ type Output = UserRef<I::Output>;
+
+ #[inline]
+ fn index(&self, index: I) -> &UserRef<I::Output> {
+ unsafe {
+ if let Some(slice) = index.get(&*self.as_raw_ptr()) {
+ UserRef::from_ptr(slice)
+ } else {
+ rtabort!("index out of range for user slice");
+ }
+ }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl<T, I> IndexMut<I> for UserRef<[T]>
+where
+ [T]: UserSafe,
+ I: SliceIndex<[T]>,
+ I::Output: UserSafe,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut UserRef<I::Output> {
+ unsafe {
+ if let Some(slice) = index.get_mut(&mut *self.as_raw_mut_ptr()) {
+ UserRef::from_mut_ptr(slice)
+ } else {
+ rtabort!("index out of range for user slice");
+ }
+ }
+ }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+impl UserRef<super::raw::ByteBuffer> {
+ /// Copies the user memory range pointed to by the user `ByteBuffer` to
+ /// enclave memory.
+ ///
+ /// # Panics
+ /// This function panics if, in the user `ByteBuffer`:
+ ///
+ /// * The pointer is null
+ /// * The pointed-to range does not fit in the address space
+ /// * The pointed-to range is not in user memory
+ pub fn copy_user_buffer(&self) -> Vec<u8> {
+ unsafe {
+ let buf = self.to_enclave();
+ if buf.len > 0 {
+ User::from_raw_parts(buf.data as _, buf.len).to_enclave()
+ } else {
+ // Mustn't look at `data` or call `free` if `len` is `0`.
+ Vec::with_capacity(0)
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
new file mode 100644
index 000000000..79d1db5e1
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -0,0 +1,323 @@
+use crate::cmp;
+use crate::io::{Error as IoError, ErrorKind, IoSlice, IoSliceMut, Result as IoResult};
+use crate::sys::rand::rdrand64;
+use crate::time::{Duration, Instant};
+
+pub(crate) mod alloc;
+#[macro_use]
+pub(crate) mod raw;
+#[cfg(test)]
+mod tests;
+
+use self::raw::*;
+
+/// Usercall `read`. See the ABI documentation for more information.
+///
+/// This will do a single `read` usercall and scatter the read data among
+/// `bufs`. To read to a single buffer, just pass a slice of length one.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn read(fd: Fd, bufs: &mut [IoSliceMut<'_>]) -> IoResult<usize> {
+ unsafe {
+ let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len()));
+ let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len);
+ let ret_len = raw::read(fd, userbuf.as_mut_ptr(), userbuf.len()).from_sgx_result()?;
+ let userbuf = &userbuf[..ret_len];
+ let mut index = 0;
+ for buf in bufs {
+ let end = cmp::min(index + buf.len(), userbuf.len());
+ if let Some(buflen) = end.checked_sub(index) {
+ userbuf[index..end].copy_to_enclave(&mut buf[..buflen]);
+ index += buf.len();
+ } else {
+ break;
+ }
+ }
+ Ok(userbuf.len())
+ }
+}
+
+/// Usercall `read_alloc`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn read_alloc(fd: Fd) -> IoResult<Vec<u8>> {
+ unsafe {
+ let userbuf = ByteBuffer { data: crate::ptr::null_mut(), len: 0 };
+ let mut userbuf = alloc::User::new_from_enclave(&userbuf);
+ raw::read_alloc(fd, userbuf.as_raw_mut_ptr()).from_sgx_result()?;
+ Ok(userbuf.copy_user_buffer())
+ }
+}
+
+/// Usercall `write`. See the ABI documentation for more information.
+///
+/// This will do a single `write` usercall and gather the written data from
+/// `bufs`. To write from a single buffer, just pass a slice of length one.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn write(fd: Fd, bufs: &[IoSlice<'_>]) -> IoResult<usize> {
+ unsafe {
+ let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len()));
+ let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len);
+ let mut index = 0;
+ for buf in bufs {
+ let end = cmp::min(index + buf.len(), userbuf.len());
+ if let Some(buflen) = end.checked_sub(index) {
+ userbuf[index..end].copy_from_enclave(&buf[..buflen]);
+ index += buf.len();
+ } else {
+ break;
+ }
+ }
+ raw::write(fd, userbuf.as_ptr(), userbuf.len()).from_sgx_result()
+ }
+}
+
+/// Usercall `flush`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn flush(fd: Fd) -> IoResult<()> {
+ unsafe { raw::flush(fd).from_sgx_result() }
+}
+
+/// Usercall `close`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn close(fd: Fd) {
+ unsafe { raw::close(fd) }
+}
+
+fn string_from_bytebuffer(buf: &alloc::UserRef<ByteBuffer>, usercall: &str, arg: &str) -> String {
+ String::from_utf8(buf.copy_user_buffer())
+ .unwrap_or_else(|_| rtabort!("Usercall {usercall}: expected {arg} to be valid UTF-8"))
+}
+
+/// Usercall `bind_stream`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn bind_stream(addr: &str) -> IoResult<(Fd, String)> {
+ unsafe {
+ let addr_user = alloc::User::new_from_enclave(addr.as_bytes());
+ let mut local = alloc::User::<ByteBuffer>::uninitialized();
+ let fd = raw::bind_stream(addr_user.as_ptr(), addr_user.len(), local.as_raw_mut_ptr())
+ .from_sgx_result()?;
+ let local = string_from_bytebuffer(&local, "bind_stream", "local_addr");
+ Ok((fd, local))
+ }
+}
+
+/// Usercall `accept_stream`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn accept_stream(fd: Fd) -> IoResult<(Fd, String, String)> {
+ unsafe {
+ let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized();
+ let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done
+ // without forcing coercion?
+ let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap());
+ let fd = raw::accept_stream(fd, local.as_raw_mut_ptr(), peer.as_raw_mut_ptr())
+ .from_sgx_result()?;
+ let local = string_from_bytebuffer(&local, "accept_stream", "local_addr");
+ let peer = string_from_bytebuffer(&peer, "accept_stream", "peer_addr");
+ Ok((fd, local, peer))
+ }
+}
+
+/// Usercall `connect_stream`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn connect_stream(addr: &str) -> IoResult<(Fd, String, String)> {
+ unsafe {
+ let addr_user = alloc::User::new_from_enclave(addr.as_bytes());
+ let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized();
+ let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done
+ // without forcing coercion?
+ let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap());
+ let fd = raw::connect_stream(
+ addr_user.as_ptr(),
+ addr_user.len(),
+ local.as_raw_mut_ptr(),
+ peer.as_raw_mut_ptr(),
+ )
+ .from_sgx_result()?;
+ let local = string_from_bytebuffer(&local, "connect_stream", "local_addr");
+ let peer = string_from_bytebuffer(&peer, "connect_stream", "peer_addr");
+ Ok((fd, local, peer))
+ }
+}
+
+/// Usercall `launch_thread`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub unsafe fn launch_thread() -> IoResult<()> {
+ // SAFETY: The caller must uphold the safety contract for `launch_thread`.
+ unsafe { raw::launch_thread().from_sgx_result() }
+}
+
+/// Usercall `exit`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn exit(panic: bool) -> ! {
+ unsafe { raw::exit(panic) }
+}
+
+/// Usercall `wait`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn wait(event_mask: u64, mut timeout: u64) -> IoResult<u64> {
+ if timeout != WAIT_NO && timeout != WAIT_INDEFINITE {
+ // We don't want people to rely on accuracy of timeouts to make
+ // security decisions in an SGX enclave. That's why we add a random
+ // amount not exceeding +/- 10% to the timeout value to discourage
+ // people from relying on accuracy of timeouts while providing a way
+ // to make things work in other cases. Note that in the SGX threat
+ // model the enclave runner which is serving the wait usercall is not
+ // trusted to ensure accurate timeouts.
+ if let Ok(timeout_signed) = i64::try_from(timeout) {
+ let tenth = timeout_signed / 10;
+ let deviation = (rdrand64() as i64).checked_rem(tenth).unwrap_or(0);
+ timeout = timeout_signed.saturating_add(deviation) as _;
+ }
+ }
+ unsafe { raw::wait(event_mask, timeout).from_sgx_result() }
+}
+
+/// This function makes an effort to wait for a non-spurious event at least as
+/// long as `duration`. Note that in general there is no guarantee about accuracy
+/// of time and timeouts in SGX model. The enclave runner serving usercalls may
+/// lie about current time and/or ignore timeout values.
+///
+/// Once the event is observed, `should_wake_up` will be used to determine
+/// whether or not the event was spurious.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn wait_timeout<F>(event_mask: u64, duration: Duration, should_wake_up: F)
+where
+ F: Fn() -> bool,
+{
+ // Calls the wait usercall and checks the result. Returns true if event was
+ // returned, and false if WouldBlock/TimedOut was returned.
+ // If duration is None, it will use WAIT_NO.
+ fn wait_checked(event_mask: u64, duration: Option<Duration>) -> bool {
+ let timeout = duration.map_or(raw::WAIT_NO, |duration| {
+ cmp::min((u64::MAX - 1) as u128, duration.as_nanos()) as u64
+ });
+ match wait(event_mask, timeout) {
+ Ok(eventset) => {
+ if event_mask == 0 {
+ rtabort!("expected wait() to return Err, found Ok.");
+ }
+ rtassert!(eventset != 0 && eventset & !event_mask == 0);
+ true
+ }
+ Err(e) => {
+ rtassert!(e.kind() == ErrorKind::TimedOut || e.kind() == ErrorKind::WouldBlock);
+ false
+ }
+ }
+ }
+
+ match wait_checked(event_mask, Some(duration)) {
+ false => return, // timed out
+ true if should_wake_up() => return, // woken up
+ true => {} // spurious event
+ }
+
+ // Drain all cached events.
+ // Note that `event_mask != 0` is implied if we get here.
+ loop {
+ match wait_checked(event_mask, None) {
+ false => break, // no more cached events
+ true if should_wake_up() => return, // woken up
+ true => {} // spurious event
+ }
+ }
+
+ // Continue waiting, but take note of time spent waiting so we don't wait
+ // forever. We intentionally don't call `Instant::now()` before this point
+ // to avoid the cost of the `insecure_time` usercall in case there are no
+ // spurious wakeups.
+
+ let start = Instant::now();
+ let mut remaining = duration;
+ loop {
+ match wait_checked(event_mask, Some(remaining)) {
+ false => return, // timed out
+ true if should_wake_up() => return, // woken up
+ true => {} // spurious event
+ }
+ remaining = match duration.checked_sub(start.elapsed()) {
+ Some(remaining) => remaining,
+ None => break,
+ }
+ }
+}
+
+/// Usercall `send`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn send(event_set: u64, tcs: Option<Tcs>) -> IoResult<()> {
+ unsafe { raw::send(event_set, tcs).from_sgx_result() }
+}
+
+/// Usercall `insecure_time`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn insecure_time() -> Duration {
+ let t = unsafe { raw::insecure_time() };
+ Duration::new(t / 1_000_000_000, (t % 1_000_000_000) as _)
+}
+
+/// Usercall `alloc`. See the ABI documentation for more information.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub fn alloc(size: usize, alignment: usize) -> IoResult<*mut u8> {
+ unsafe { raw::alloc(size, alignment).from_sgx_result() }
+}
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+#[doc(inline)]
+pub use self::raw::free;
+
+fn check_os_error(err: Result) -> i32 {
+ // FIXME: not sure how to make sure all variants of Error are covered
+ if err == Error::NotFound as _
+ || err == Error::PermissionDenied as _
+ || err == Error::ConnectionRefused as _
+ || err == Error::ConnectionReset as _
+ || err == Error::ConnectionAborted as _
+ || err == Error::NotConnected as _
+ || err == Error::AddrInUse as _
+ || err == Error::AddrNotAvailable as _
+ || err == Error::BrokenPipe as _
+ || err == Error::AlreadyExists as _
+ || err == Error::WouldBlock as _
+ || err == Error::InvalidInput as _
+ || err == Error::InvalidData as _
+ || err == Error::TimedOut as _
+ || err == Error::WriteZero as _
+ || err == Error::Interrupted as _
+ || err == Error::Other as _
+ || err == Error::UnexpectedEof as _
+ || ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&err)
+ {
+ err
+ } else {
+ rtabort!("Usercall: returned invalid error value {err}")
+ }
+}
+
+trait FromSgxResult {
+ type Return;
+
+ fn from_sgx_result(self) -> IoResult<Self::Return>;
+}
+
+impl<T> FromSgxResult for (Result, T) {
+ type Return = T;
+
+ fn from_sgx_result(self) -> IoResult<Self::Return> {
+ if self.0 == RESULT_SUCCESS {
+ Ok(self.1)
+ } else {
+ Err(IoError::from_raw_os_error(check_os_error(self.0)))
+ }
+ }
+}
+
+impl FromSgxResult for Result {
+ type Return = ();
+
+ fn from_sgx_result(self) -> IoResult<Self::Return> {
+ if self == RESULT_SUCCESS {
+ Ok(())
+ } else {
+ Err(IoError::from_raw_os_error(check_os_error(self)))
+ }
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs
new file mode 100644
index 000000000..4267b96cc
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs
@@ -0,0 +1,251 @@
+#![allow(unused)]
+
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub use fortanix_sgx_abi::*;
+
+use crate::num::NonZeroU64;
+use crate::ptr::NonNull;
+
+#[repr(C)]
+struct UsercallReturn(u64, u64);
+
+extern "C" {
+ fn usercall(nr: NonZeroU64, p1: u64, p2: u64, abort: u64, p3: u64, p4: u64) -> UsercallReturn;
+}
+
+/// Performs the raw usercall operation as defined in the ABI calling convention.
+///
+/// # Safety
+///
+/// The caller must ensure to pass parameters appropriate for the usercall `nr`
+/// and to observe all requirements specified in the ABI.
+///
+/// # Panics
+///
+/// Panics if `nr` is `0`.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+#[inline]
+pub unsafe fn do_usercall(
+ nr: NonZeroU64,
+ p1: u64,
+ p2: u64,
+ p3: u64,
+ p4: u64,
+ abort: bool,
+) -> (u64, u64) {
+ let UsercallReturn(a, b) = unsafe { usercall(nr, p1, p2, abort as _, p3, p4) };
+ (a, b)
+}
+
+type Register = u64;
+
+trait RegisterArgument {
+ fn from_register(_: Register) -> Self;
+ fn into_register(self) -> Register;
+}
+
+trait ReturnValue {
+ fn from_registers(call: &'static str, regs: (Register, Register)) -> Self;
+}
+
+macro_rules! define_usercalls {
+ ($(fn $f:ident($($n:ident: $t:ty),*) $(-> $r:tt)*; )*) => {
+ /// Usercall numbers as per the ABI.
+ #[repr(u64)]
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+ #[allow(missing_docs, non_camel_case_types)]
+ #[non_exhaustive]
+ pub enum Usercalls {
+ #[doc(hidden)]
+ __enclave_usercalls_invalid = 0,
+ $($f,)*
+ }
+
+ $(enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) $(-> $r)*);)*
+ };
+}
+
+macro_rules! define_ra {
+ (< $i:ident > $t:ty) => {
+ impl<$i> RegisterArgument for $t {
+ fn from_register(a: Register) -> Self {
+ a as _
+ }
+ fn into_register(self) -> Register {
+ self as _
+ }
+ }
+ };
+ ($i:ty as $t:ty) => {
+ impl RegisterArgument for $t {
+ fn from_register(a: Register) -> Self {
+ a as $i as _
+ }
+ fn into_register(self) -> Register {
+ self as $i as _
+ }
+ }
+ };
+ ($t:ty) => {
+ impl RegisterArgument for $t {
+ fn from_register(a: Register) -> Self {
+ a as _
+ }
+ fn into_register(self) -> Register {
+ self as _
+ }
+ }
+ };
+}
+
+define_ra!(Register);
+define_ra!(i64);
+define_ra!(u32);
+define_ra!(u32 as i32);
+define_ra!(u16);
+define_ra!(u16 as i16);
+define_ra!(u8);
+define_ra!(u8 as i8);
+define_ra!(usize);
+define_ra!(usize as isize);
+define_ra!(<T> *const T);
+define_ra!(<T> *mut T);
+
+impl RegisterArgument for bool {
+ fn from_register(a: Register) -> bool {
+ if a != 0 { true } else { false }
+ }
+ fn into_register(self) -> Register {
+ self as _
+ }
+}
+
+impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
+ fn from_register(a: Register) -> Option<NonNull<T>> {
+ NonNull::new(a as _)
+ }
+ fn into_register(self) -> Register {
+ self.map_or(0 as _, NonNull::as_ptr) as _
+ }
+}
+
+impl ReturnValue for ! {
+ fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self {
+ rtabort!("Usercall {call}: did not expect to be re-entered");
+ }
+}
+
+impl ReturnValue for () {
+ fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
+ rtassert!(usercall_retval.0 == 0);
+ rtassert!(usercall_retval.1 == 0);
+ ()
+ }
+}
+
+impl<T: RegisterArgument> ReturnValue for T {
+ fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
+ rtassert!(usercall_retval.1 == 0);
+ T::from_register(usercall_retval.0)
+ }
+}
+
+impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) {
+ fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self {
+ (T::from_register(regs.0), U::from_register(regs.1))
+ }
+}
+
+macro_rules! return_type_is_abort {
+ (!) => {
+ true
+ };
+ ($r:ty) => {
+ false
+ };
+}
+
+// In this macro: using `$r:tt` because `$r:ty` doesn't match ! in `return_type_is_abort`
+macro_rules! enclave_usercalls_internal_define_usercalls {
+ (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty,
+ $n3:ident: $t3:ty, $n4:ident: $t4:ty) -> $r:tt) => (
+ /// This is the raw function definition, see the ABI documentation for
+ /// more information.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[inline(always)]
+ pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3, $n4: $t4) -> $r {
+ ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
+ rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)),
+ RegisterArgument::into_register($n1),
+ RegisterArgument::into_register($n2),
+ RegisterArgument::into_register($n3),
+ RegisterArgument::into_register($n4),
+ return_type_is_abort!($r)
+ ) })
+ }
+ );
+ (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty, $n3:ident: $t3:ty) -> $r:tt) => (
+ /// This is the raw function definition, see the ABI documentation for
+ /// more information.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[inline(always)]
+ pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3) -> $r {
+ ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
+ rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)),
+ RegisterArgument::into_register($n1),
+ RegisterArgument::into_register($n2),
+ RegisterArgument::into_register($n3),
+ 0,
+ return_type_is_abort!($r)
+ ) })
+ }
+ );
+ (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty) -> $r:tt) => (
+ /// This is the raw function definition, see the ABI documentation for
+ /// more information.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[inline(always)]
+ pub unsafe fn $f($n1: $t1, $n2: $t2) -> $r {
+ ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
+ rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)),
+ RegisterArgument::into_register($n1),
+ RegisterArgument::into_register($n2),
+ 0,0,
+ return_type_is_abort!($r)
+ ) })
+ }
+ );
+ (def fn $f:ident($n1:ident: $t1:ty) -> $r:tt) => (
+ /// This is the raw function definition, see the ABI documentation for
+ /// more information.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[inline(always)]
+ pub unsafe fn $f($n1: $t1) -> $r {
+ ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
+ rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)),
+ RegisterArgument::into_register($n1),
+ 0,0,0,
+ return_type_is_abort!($r)
+ ) })
+ }
+ );
+ (def fn $f:ident() -> $r:tt) => (
+ /// This is the raw function definition, see the ABI documentation for
+ /// more information.
+ #[unstable(feature = "sgx_platform", issue = "56975")]
+ #[inline(always)]
+ pub unsafe fn $f() -> $r {
+ ReturnValue::from_registers(stringify!($f), unsafe { do_usercall(
+ rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)),
+ 0,0,0,0,
+ return_type_is_abort!($r)
+ ) })
+ }
+ );
+ (def fn $f:ident($($n:ident: $t:ty),*)) => (
+ enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) -> ());
+ );
+}
+
+invoke_with_usercalls!(define_usercalls);
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
new file mode 100644
index 000000000..cbf7d7d54
--- /dev/null
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -0,0 +1,30 @@
+use super::alloc::copy_to_userspace;
+use super::alloc::User;
+
+#[test]
+fn test_copy_function() {
+ let mut src = [0u8; 100];
+ let mut dst = User::<[u8]>::uninitialized(100);
+
+ for i in 0..src.len() {
+ src[i] = i as _;
+ }
+
+ for size in 0..48 {
+ // For all possible alignment
+ for offset in 0..8 {
+ // overwrite complete dst
+ dst.copy_from_enclave(&[0u8; 100]);
+
+ // Copy src[0..size] to dst + offset
+ unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
+
+ // Verify copy
+ for byte in 0..size {
+ unsafe {
+ assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
+ }
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/sgx/alloc.rs b/library/std/src/sys/sgx/alloc.rs
new file mode 100644
index 000000000..4aea28cb8
--- /dev/null
+++ b/library/std/src/sys/sgx/alloc.rs
@@ -0,0 +1,98 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr;
+use crate::sys::sgx::abi::mem as sgx_mem;
+use core::sync::atomic::{AtomicBool, Ordering};
+
+use super::waitqueue::SpinMutex;
+
+// Using a SpinMutex because we never want to exit the enclave waiting for the
+// allocator.
+//
+// The current allocator here is the `dlmalloc` crate which we've got included
+// in the rust-lang/rust repository as a submodule. The crate is a port of
+// dlmalloc.c from C to Rust.
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx5alloc8DLMALLOCE"]
+static DLMALLOC: SpinMutex<dlmalloc::Dlmalloc<Sgx>> =
+ SpinMutex::new(dlmalloc::Dlmalloc::new_with_allocator(Sgx {}));
+
+struct Sgx;
+
+unsafe impl dlmalloc::Allocator for Sgx {
+ /// Allocs system resources
+ fn alloc(&self, _size: usize) -> (*mut u8, usize, u32) {
+ static INIT: AtomicBool = AtomicBool::new(false);
+
+ // No ordering requirement since this function is protected by the global lock.
+ if !INIT.swap(true, Ordering::Relaxed) {
+ (sgx_mem::heap_base() as _, sgx_mem::heap_size(), 0)
+ } else {
+ (ptr::null_mut(), 0, 0)
+ }
+ }
+
+ fn remap(&self, _ptr: *mut u8, _oldsize: usize, _newsize: usize, _can_move: bool) -> *mut u8 {
+ ptr::null_mut()
+ }
+
+ fn free_part(&self, _ptr: *mut u8, _oldsize: usize, _newsize: usize) -> bool {
+ false
+ }
+
+ fn free(&self, _ptr: *mut u8, _size: usize) -> bool {
+ return false;
+ }
+
+ fn can_release_part(&self, _flags: u32) -> bool {
+ false
+ }
+
+ fn allocates_zeros(&self) -> bool {
+ false
+ }
+
+ fn page_size(&self) -> usize {
+ 0x1000
+ }
+}
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: the caller must uphold the safety contract for `malloc`
+ unsafe { DLMALLOC.lock().malloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: the caller must uphold the safety contract for `malloc`
+ unsafe { DLMALLOC.lock().calloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ // SAFETY: the caller must uphold the safety contract for `malloc`
+ unsafe { DLMALLOC.lock().free(ptr, layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: the caller must uphold the safety contract for `malloc`
+ unsafe { DLMALLOC.lock().realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
+}
+
+// The following functions are needed by libunwind. These symbols are named
+// in pre-link args for the target specification, so keep that in sync.
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_c_alloc(size: usize, align: usize) -> *mut u8 {
+ unsafe { crate::alloc::alloc(Layout::from_size_align_unchecked(size, align)) }
+}
+
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_c_dealloc(ptr: *mut u8, size: usize, align: usize) {
+ unsafe { crate::alloc::dealloc(ptr, Layout::from_size_align_unchecked(size, align)) }
+}
diff --git a/library/std/src/sys/sgx/args.rs b/library/std/src/sys/sgx/args.rs
new file mode 100644
index 000000000..ef4176c4a
--- /dev/null
+++ b/library/std/src/sys/sgx/args.rs
@@ -0,0 +1,59 @@
+use super::abi::usercalls::{alloc, raw::ByteBuffer};
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::slice;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sys::os_str::Buf;
+use crate::sys_common::FromInner;
+
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
+static ARGS: AtomicUsize = AtomicUsize::new(0);
+type ArgsStore = Vec<OsString>;
+
+#[cfg_attr(test, allow(dead_code))]
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ if argc != 0 {
+ let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
+ let args = args
+ .iter()
+ .map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
+ .collect::<ArgsStore>();
+ ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
+ }
+}
+
+pub fn args() -> Args {
+ let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
+ if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
+}
+
+pub struct Args(slice::Iter<'static, OsString>);
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.as_slice().fmt(f)
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.0.next().cloned()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.0.len()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.0.next_back().cloned()
+ }
+}
diff --git a/library/std/src/sys/sgx/condvar.rs b/library/std/src/sys/sgx/condvar.rs
new file mode 100644
index 000000000..36534e0ef
--- /dev/null
+++ b/library/std/src/sys/sgx/condvar.rs
@@ -0,0 +1,45 @@
+use crate::sys::locks::Mutex;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+use crate::time::Duration;
+
+use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+
+pub struct Condvar {
+ inner: SpinMutex<WaitVariable<()>>,
+}
+
+pub(crate) type MovableCondvar = LazyBox<Condvar>;
+
+impl LazyInit for Condvar {
+ fn init() -> Box<Self> {
+ Box::new(Self::new())
+ }
+}
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: SpinMutex::new(WaitVariable::new(())) }
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {
+ let _ = WaitQueue::notify_one(self.inner.lock());
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ let _ = WaitQueue::notify_all(self.inner.lock());
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let guard = self.inner.lock();
+ WaitQueue::wait(guard, || unsafe { mutex.unlock() });
+ unsafe { mutex.lock() }
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ let success = WaitQueue::wait_timeout(&self.inner, dur, || unsafe { mutex.unlock() });
+ unsafe { mutex.lock() };
+ success
+ }
+}
diff --git a/library/std/src/sys/sgx/env.rs b/library/std/src/sys/sgx/env.rs
new file mode 100644
index 000000000..8043b7c52
--- /dev/null
+++ b/library/std/src/sys/sgx/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".sgxs";
+ pub const DLL_EXTENSION: &str = "sgxs";
+ pub const EXE_SUFFIX: &str = ".sgxs";
+ pub const EXE_EXTENSION: &str = "sgxs";
+}
diff --git a/library/std/src/sys/sgx/fd.rs b/library/std/src/sys/sgx/fd.rs
new file mode 100644
index 000000000..e5dc5b5ad
--- /dev/null
+++ b/library/std/src/sys/sgx/fd.rs
@@ -0,0 +1,84 @@
+use fortanix_sgx_abi::Fd;
+
+use super::abi::usercalls;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem;
+use crate::sys::{AsInner, FromInner, IntoInner};
+
+#[derive(Debug)]
+pub struct FileDesc {
+ fd: Fd,
+}
+
+impl FileDesc {
+ pub fn new(fd: Fd) -> FileDesc {
+ FileDesc { fd: fd }
+ }
+
+ pub fn raw(&self) -> Fd {
+ self.fd
+ }
+
+ /// Extracts the actual file descriptor without closing it.
+ pub fn into_raw(self) -> Fd {
+ let fd = self.fd;
+ mem::forget(self);
+ fd
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ usercalls::read(self.fd, &mut [IoSliceMut::new(buf)])
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ usercalls::read(self.fd, bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ usercalls::write(self.fd, &[IoSlice::new(buf)])
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ usercalls::write(self.fd, bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ usercalls::flush(self.fd)
+ }
+}
+
+impl AsInner<Fd> for FileDesc {
+ fn as_inner(&self) -> &Fd {
+ &self.fd
+ }
+}
+
+impl IntoInner<Fd> for FileDesc {
+ fn into_inner(self) -> Fd {
+ let fd = self.fd;
+ mem::forget(self);
+ fd
+ }
+}
+
+impl FromInner<Fd> for FileDesc {
+ fn from_inner(fd: Fd) -> FileDesc {
+ FileDesc { fd }
+ }
+}
+
+impl Drop for FileDesc {
+ fn drop(&mut self) {
+ usercalls::close(self.fd)
+ }
+}
diff --git a/library/std/src/sys/sgx/memchr.rs b/library/std/src/sys/sgx/memchr.rs
new file mode 100644
index 000000000..996748219
--- /dev/null
+++ b/library/std/src/sys/sgx/memchr.rs
@@ -0,0 +1 @@
+pub use core::slice::memchr::{memchr, memrchr};
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
new file mode 100644
index 000000000..696400670
--- /dev/null
+++ b/library/std/src/sys/sgx/mod.rs
@@ -0,0 +1,167 @@
+//! System bindings for the Fortanix SGX platform
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for Fortanix SGX.
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::io::ErrorKind;
+use crate::sync::atomic::{AtomicBool, Ordering};
+
+pub mod abi;
+mod waitqueue;
+
+pub mod alloc;
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+pub mod fd;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+pub mod memchr;
+pub mod net;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+pub mod stdio;
+pub mod thread;
+pub mod thread_local_key;
+pub mod time;
+
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub mod locks {
+ pub use super::condvar::*;
+ pub use super::mutex::*;
+ pub use super::rwlock::*;
+}
+
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ unsafe {
+ args::init(argc, argv);
+ }
+}
+
+// SAFETY: must be called only once during runtime cleanup.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {}
+
+/// This function is used to implement functionality that simply doesn't exist.
+/// Programs relying on this functionality will need to deal with the error.
+pub fn unsupported<T>() -> crate::io::Result<T> {
+ Err(unsupported_err())
+}
+
+pub fn unsupported_err() -> crate::io::Error {
+ crate::io::const_io_error!(ErrorKind::Unsupported, "operation not supported on SGX yet")
+}
+
+/// This function is used to implement various functions that doesn't exist,
+/// but the lack of which might not be reason for error. If no error is
+/// returned, the program might very well be able to function normally. This is
+/// what happens when `SGX_INEFFECTIVE_ERROR` is set to `true`. If it is
+/// `false`, the behavior is the same as `unsupported`.
+pub fn sgx_ineffective<T>(v: T) -> crate::io::Result<T> {
+ static SGX_INEFFECTIVE_ERROR: AtomicBool = AtomicBool::new(false);
+ if SGX_INEFFECTIVE_ERROR.load(Ordering::Relaxed) {
+ Err(crate::io::const_io_error!(
+ ErrorKind::Uncategorized,
+ "operation can't be trusted to have any effect on SGX",
+ ))
+ } else {
+ Ok(v)
+ }
+}
+
+pub fn decode_error_kind(code: i32) -> ErrorKind {
+ use fortanix_sgx_abi::Error;
+
+ // FIXME: not sure how to make sure all variants of Error are covered
+ if code == Error::NotFound as _ {
+ ErrorKind::NotFound
+ } else if code == Error::PermissionDenied as _ {
+ ErrorKind::PermissionDenied
+ } else if code == Error::ConnectionRefused as _ {
+ ErrorKind::ConnectionRefused
+ } else if code == Error::ConnectionReset as _ {
+ ErrorKind::ConnectionReset
+ } else if code == Error::ConnectionAborted as _ {
+ ErrorKind::ConnectionAborted
+ } else if code == Error::NotConnected as _ {
+ ErrorKind::NotConnected
+ } else if code == Error::AddrInUse as _ {
+ ErrorKind::AddrInUse
+ } else if code == Error::AddrNotAvailable as _ {
+ ErrorKind::AddrNotAvailable
+ } else if code == Error::BrokenPipe as _ {
+ ErrorKind::BrokenPipe
+ } else if code == Error::AlreadyExists as _ {
+ ErrorKind::AlreadyExists
+ } else if code == Error::WouldBlock as _ {
+ ErrorKind::WouldBlock
+ } else if code == Error::InvalidInput as _ {
+ ErrorKind::InvalidInput
+ } else if code == Error::InvalidData as _ {
+ ErrorKind::InvalidData
+ } else if code == Error::TimedOut as _ {
+ ErrorKind::TimedOut
+ } else if code == Error::WriteZero as _ {
+ ErrorKind::WriteZero
+ } else if code == Error::Interrupted as _ {
+ ErrorKind::Interrupted
+ } else if code == Error::Other as _ {
+ ErrorKind::Uncategorized
+ } else if code == Error::UnexpectedEof as _ {
+ ErrorKind::UnexpectedEof
+ } else {
+ ErrorKind::Uncategorized
+ }
+}
+
+pub fn abort_internal() -> ! {
+ abi::usercalls::exit(true)
+}
+
+// This function is needed by the panic runtime. The symbol is named in
+// pre-link args for the target specification, so keep that in sync.
+#[cfg(not(test))]
+#[no_mangle]
+// NB. used by both libunwind and libpanic_abort
+pub extern "C" fn __rust_abort() {
+ abort_internal();
+}
+
+pub mod rand {
+ pub fn rdrand64() -> u64 {
+ unsafe {
+ let mut ret: u64 = 0;
+ for _ in 0..10 {
+ if crate::arch::x86_64::_rdrand64_step(&mut ret) == 1 {
+ return ret;
+ }
+ }
+ rtabort!("Failed to obtain random data");
+ }
+ }
+}
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ (self::rand::rdrand64(), self::rand::rdrand64())
+}
+
+pub use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+pub trait TryIntoInner<Inner>: Sized {
+ fn try_into_inner(self) -> Result<Inner, Self>;
+}
diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs
new file mode 100644
index 000000000..513cd77fd
--- /dev/null
+++ b/library/std/src/sys/sgx/mutex.rs
@@ -0,0 +1,62 @@
+use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+
+pub struct Mutex {
+ inner: SpinMutex<WaitVariable<bool>>,
+}
+
+// not movable: see UnsafeList implementation
+pub(crate) type MovableMutex = LazyBox<Mutex>;
+
+impl LazyInit for Mutex {
+ fn init() -> Box<Self> {
+ Box::new(Self::new())
+ }
+}
+
+// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28
+impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ let mut guard = self.inner.lock();
+ if *guard.lock_var() {
+ // Another thread has the lock, wait
+ WaitQueue::wait(guard, || {})
+ // Another thread has passed the lock to us
+ } else {
+ // We are just now obtaining the lock
+ *guard.lock_var_mut() = true;
+ }
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ let guard = self.inner.lock();
+ if let Err(mut guard) = WaitQueue::notify_one(guard) {
+ // No other waiters, unlock
+ *guard.lock_var_mut() = false;
+ } else {
+ // There was a thread waiting, just pass the lock
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ let mut guard = try_lock_or_false!(self.inner);
+ if *guard.lock_var() {
+ // Another thread has the lock
+ false
+ } else {
+ // We are just now obtaining the lock
+ *guard.lock_var_mut() = true;
+ true
+ }
+ }
+}
diff --git a/library/std/src/sys/sgx/net.rs b/library/std/src/sys/sgx/net.rs
new file mode 100644
index 000000000..4c4cd7d1d
--- /dev/null
+++ b/library/std/src/sys/sgx/net.rs
@@ -0,0 +1,541 @@
+use crate::error;
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr, ToSocketAddrs};
+use crate::sync::Arc;
+use crate::sys::fd::FileDesc;
+use crate::sys::{sgx_ineffective, unsupported, AsInner, FromInner, IntoInner, TryIntoInner};
+use crate::time::Duration;
+
+use super::abi::usercalls;
+
+const DEFAULT_FAKE_TTL: u32 = 64;
+
+#[derive(Debug, Clone)]
+pub struct Socket {
+ inner: Arc<FileDesc>,
+ local_addr: Option<String>,
+}
+
+impl Socket {
+ fn new(fd: usercalls::raw::Fd, local_addr: String) -> Socket {
+ Socket { inner: Arc::new(FileDesc::new(fd)), local_addr: Some(local_addr) }
+ }
+}
+
+impl AsInner<FileDesc> for Socket {
+ fn as_inner(&self) -> &FileDesc {
+ &self.inner
+ }
+}
+
+impl TryIntoInner<FileDesc> for Socket {
+ fn try_into_inner(self) -> Result<FileDesc, Socket> {
+ let Socket { inner, local_addr } = self;
+ Arc::try_unwrap(inner).map_err(|inner| Socket { inner, local_addr })
+ }
+}
+
+impl FromInner<(FileDesc, Option<String>)> for Socket {
+ fn from_inner((inner, local_addr): (FileDesc, Option<String>)) -> Socket {
+ Socket { inner: Arc::new(inner), local_addr }
+ }
+}
+
+#[derive(Clone)]
+pub struct TcpStream {
+ inner: Socket,
+ peer_addr: Option<String>,
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut res = f.debug_struct("TcpStream");
+
+ if let Some(ref addr) = self.inner.local_addr {
+ res.field("addr", addr);
+ }
+
+ if let Some(ref peer) = self.peer_addr {
+ res.field("peer", peer);
+ }
+
+ res.field("fd", &self.inner.inner.as_inner()).finish()
+ }
+}
+
+fn io_err_to_addr(result: io::Result<&SocketAddr>) -> io::Result<String> {
+ match result {
+ Ok(saddr) => Ok(saddr.to_string()),
+ // need to downcast twice because io::Error::into_inner doesn't return the original
+ // value if the conversion fails
+ Err(e) => {
+ if e.get_ref().and_then(|e| e.downcast_ref::<NonIpSockAddr>()).is_some() {
+ Ok(e.into_inner().unwrap().downcast::<NonIpSockAddr>().unwrap().host)
+ } else {
+ Err(e)
+ }
+ }
+ }
+}
+
+fn addr_to_sockaddr(addr: &Option<String>) -> io::Result<SocketAddr> {
+ addr.as_ref()
+ .ok_or(io::ErrorKind::AddrNotAvailable)?
+ .to_socket_addrs()
+ // unwrap OK: if an iterator is returned, we're guaranteed to get exactly one entry
+ .map(|mut it| it.next().unwrap())
+}
+
+impl TcpStream {
+ pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ let addr = io_err_to_addr(addr)?;
+ let (fd, local_addr, peer_addr) = usercalls::connect_stream(&addr)?;
+ Ok(TcpStream { inner: Socket::new(fd, local_addr), peer_addr: Some(peer_addr) })
+ }
+
+ pub fn connect_timeout(addr: &SocketAddr, dur: Duration) -> io::Result<TcpStream> {
+ if dur == Duration::default() {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+ Self::connect(Ok(addr)) // FIXME: ignoring timeout
+ }
+
+ pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ match dur {
+ Some(dur) if dur == Duration::default() => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+ _ => sgx_ineffective(()),
+ }
+ }
+
+ pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ match dur {
+ Some(dur) if dur == Duration::default() => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+ _ => sgx_ineffective(()),
+ }
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ sgx_ineffective(None)
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ sgx_ineffective(None)
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.inner.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.inner.inner.is_read_vectored()
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.inner.inner.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.inner.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.inner.inner.is_write_vectored()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ addr_to_sockaddr(&self.peer_addr)
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ addr_to_sockaddr(&self.inner.local_addr)
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ Ok(self.clone())
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ sgx_ineffective(None)
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ sgx_ineffective(false)
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ sgx_ineffective(DEFAULT_FAKE_TTL)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(None)
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+}
+
+impl AsInner<Socket> for TcpStream {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
+// `Inner` includes `peer_addr` so that a `TcpStream` maybe correctly
+// reconstructed if `Socket::try_into_inner` fails.
+impl IntoInner<(Socket, Option<String>)> for TcpStream {
+ fn into_inner(self) -> (Socket, Option<String>) {
+ (self.inner, self.peer_addr)
+ }
+}
+
+impl FromInner<(Socket, Option<String>)> for TcpStream {
+ fn from_inner((inner, peer_addr): (Socket, Option<String>)) -> TcpStream {
+ TcpStream { inner, peer_addr }
+ }
+}
+
+#[derive(Clone)]
+pub struct TcpListener {
+ inner: Socket,
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut res = f.debug_struct("TcpListener");
+
+ if let Some(ref addr) = self.inner.local_addr {
+ res.field("addr", addr);
+ }
+
+ res.field("fd", &self.inner.inner.as_inner()).finish()
+ }
+}
+
+impl TcpListener {
+ pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ let addr = io_err_to_addr(addr)?;
+ let (fd, local_addr) = usercalls::bind_stream(&addr)?;
+ Ok(TcpListener { inner: Socket::new(fd, local_addr) })
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ addr_to_sockaddr(&self.inner.local_addr)
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let (fd, local_addr, peer_addr) = usercalls::accept_stream(self.inner.inner.raw())?;
+ let peer_addr = Some(peer_addr);
+ let ret_peer = addr_to_sockaddr(&peer_addr).unwrap_or_else(|_| ([0; 4], 0).into());
+ Ok((TcpStream { inner: Socket::new(fd, local_addr), peer_addr }, ret_peer))
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ Ok(self.clone())
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ sgx_ineffective(DEFAULT_FAKE_TTL)
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ sgx_ineffective(false)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ Ok(None)
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ sgx_ineffective(())
+ }
+}
+
+impl AsInner<Socket> for TcpListener {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
+impl IntoInner<Socket> for TcpListener {
+ fn into_inner(self) -> Socket {
+ self.inner
+ }
+}
+
+impl FromInner<Socket> for TcpListener {
+ fn from_inner(inner: Socket) -> TcpListener {
+ TcpListener { inner }
+ }
+}
+
+pub struct UdpSocket(!);
+
+impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ self.0
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct NonIpSockAddr {
+ host: String,
+}
+
+impl error::Error for NonIpSockAddr {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "Failed to convert address to SocketAddr"
+ }
+}
+
+impl fmt::Display for NonIpSockAddr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Failed to convert address to SocketAddr: {}", self.host)
+ }
+}
+
+pub struct LookupHost(!);
+
+impl LookupHost {
+ fn new(host: String) -> io::Result<LookupHost> {
+ Err(io::Error::new(io::ErrorKind::Uncategorized, NonIpSockAddr { host }))
+ }
+
+ pub fn port(&self) -> u16 {
+ self.0
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ self.0
+ }
+}
+
+impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(v: &str) -> io::Result<LookupHost> {
+ LookupHost::new(v.to_owned())
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> {
+ LookupHost::new(format!("{host}:{port}"))
+ }
+}
+
+#[allow(bad_style)]
+pub mod netc {
+ pub const AF_INET: u8 = 0;
+ pub const AF_INET6: u8 = 1;
+ pub type sa_family_t = u8;
+
+ #[derive(Copy, Clone)]
+ pub struct in_addr {
+ pub s_addr: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in {
+ pub sin_family: sa_family_t,
+ pub sin_port: u16,
+ pub sin_addr: in_addr,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in6 {
+ pub sin6_family: sa_family_t,
+ pub sin6_port: u16,
+ pub sin6_addr: in6_addr,
+ pub sin6_flowinfo: u32,
+ pub sin6_scope_id: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr {}
+}
diff --git a/library/std/src/sys/sgx/os.rs b/library/std/src/sys/sgx/os.rs
new file mode 100644
index 000000000..5da0257f3
--- /dev/null
+++ b/library/std/src/sys/sgx/os.rs
@@ -0,0 +1,140 @@
+use fortanix_sgx_abi::{Error, RESULT_SUCCESS};
+
+use crate::collections::HashMap;
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::path::{self, PathBuf};
+use crate::str;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sync::Mutex;
+use crate::sync::Once;
+use crate::sys::{decode_error_kind, sgx_ineffective, unsupported};
+use crate::vec;
+
+pub fn errno() -> i32 {
+ RESULT_SUCCESS
+}
+
+pub fn error_string(errno: i32) -> String {
+ if errno == RESULT_SUCCESS {
+ "operation successful".into()
+ } else if ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&errno) {
+ format!("user-specified error {errno:08x}")
+ } else {
+ decode_error_kind(errno).as_str().into()
+ }
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ sgx_ineffective(())
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported in SGX yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported in SGX yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx2os3ENVE"]
+static ENV: AtomicUsize = AtomicUsize::new(0);
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx2os8ENV_INITE"]
+static ENV_INIT: Once = Once::new();
+type EnvStore = Mutex<HashMap<OsString, OsString>>;
+
+fn get_env_store() -> Option<&'static EnvStore> {
+ unsafe { (ENV.load(Ordering::Relaxed) as *const EnvStore).as_ref() }
+}
+
+fn create_env_store() -> &'static EnvStore {
+ ENV_INIT.call_once(|| {
+ ENV.store(Box::into_raw(Box::new(EnvStore::default())) as _, Ordering::Relaxed)
+ });
+ unsafe { &*(ENV.load(Ordering::Relaxed) as *const EnvStore) }
+}
+
+pub type Env = vec::IntoIter<(OsString, OsString)>;
+
+pub fn env() -> Env {
+ let clone_to_vec = |map: &HashMap<OsString, OsString>| -> Vec<_> {
+ map.iter().map(|(k, v)| (k.clone(), v.clone())).collect()
+ };
+
+ get_env_store().map(|env| clone_to_vec(&env.lock().unwrap())).unwrap_or_default().into_iter()
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ get_env_store().and_then(|s| s.lock().unwrap().get(k).cloned())
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ let (k, v) = (k.to_owned(), v.to_owned());
+ create_env_store().lock().unwrap().insert(k, v);
+ Ok(())
+}
+
+pub fn unsetenv(k: &OsStr) -> io::Result<()> {
+ if let Some(env) = get_env_store() {
+ env.lock().unwrap().remove(k);
+ }
+ Ok(())
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem in SGX")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ super::abi::exit_with_code(code as _)
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids in SGX")
+}
diff --git a/library/std/src/sys/sgx/path.rs b/library/std/src/sys/sgx/path.rs
new file mode 100644
index 000000000..c805c15e7
--- /dev/null
+++ b/library/std/src/sys/sgx/path.rs
@@ -0,0 +1,25 @@
+use crate::ffi::OsStr;
+use crate::io;
+use crate::path::{Path, PathBuf, Prefix};
+use crate::sys::unsupported;
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'/'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'/'
+}
+
+pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> {
+ None
+}
+
+pub const MAIN_SEP_STR: &str = "/";
+pub const MAIN_SEP: char = '/';
+
+pub(crate) fn absolute(_path: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs
new file mode 100644
index 000000000..a97fb9ab0
--- /dev/null
+++ b/library/std/src/sys/sgx/rwlock.rs
@@ -0,0 +1,212 @@
+#[cfg(test)]
+mod tests;
+
+use crate::num::NonZeroUsize;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+
+use super::waitqueue::{
+ try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
+};
+use crate::mem;
+
+pub struct RwLock {
+ readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
+ writer: SpinMutex<WaitVariable<bool>>,
+}
+
+pub(crate) type MovableRwLock = LazyBox<RwLock>;
+
+impl LazyInit for RwLock {
+ fn init() -> Box<Self> {
+ Box::new(Self::new())
+ }
+}
+
+// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
+//
+// # Safety
+// Never called, as it is a compile time check.
+#[allow(dead_code)]
+unsafe fn rw_lock_size_assert(r: RwLock) {
+ unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
+}
+
+impl RwLock {
+ pub const fn new() -> RwLock {
+ RwLock {
+ readers: SpinMutex::new(WaitVariable::new(None)),
+ writer: SpinMutex::new(WaitVariable::new(false)),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let mut rguard = self.readers.lock();
+ let wguard = self.writer.lock();
+ if *wguard.lock_var() || !wguard.queue_empty() {
+ // Another thread has or is waiting for the write lock, wait
+ drop(wguard);
+ WaitQueue::wait(rguard, || {});
+ // Another thread has passed the lock to us
+ } else {
+ // No waiting writers, acquire the read lock
+ *rguard.lock_var_mut() =
+ NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let mut rguard = try_lock_or_false!(self.readers);
+ let wguard = try_lock_or_false!(self.writer);
+ if *wguard.lock_var() || !wguard.queue_empty() {
+ // Another thread has or is waiting for the write lock
+ false
+ } else {
+ // No waiting writers, acquire the read lock
+ *rguard.lock_var_mut() =
+ NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
+ true
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ let rguard = self.readers.lock();
+ let mut wguard = self.writer.lock();
+ if *wguard.lock_var() || rguard.lock_var().is_some() {
+ // Another thread has the lock, wait
+ drop(rguard);
+ WaitQueue::wait(wguard, || {});
+ // Another thread has passed the lock to us
+ } else {
+ // We are just now obtaining the lock
+ *wguard.lock_var_mut() = true;
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ let rguard = try_lock_or_false!(self.readers);
+ let mut wguard = try_lock_or_false!(self.writer);
+ if *wguard.lock_var() || rguard.lock_var().is_some() {
+ // Another thread has the lock
+ false
+ } else {
+ // We are just now obtaining the lock
+ *wguard.lock_var_mut() = true;
+ true
+ }
+ }
+
+ #[inline]
+ unsafe fn __read_unlock(
+ &self,
+ mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
+ wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
+ ) {
+ *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1);
+ if rguard.lock_var().is_some() {
+ // There are other active readers
+ } else {
+ if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
+ // A writer was waiting, pass the lock
+ *wguard.lock_var_mut() = true;
+ wguard.drop_after(rguard);
+ } else {
+ // No writers were waiting, the lock is released
+ rtassert!(rguard.queue_empty());
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ let rguard = self.readers.lock();
+ let wguard = self.writer.lock();
+ unsafe { self.__read_unlock(rguard, wguard) };
+ }
+
+ #[inline]
+ unsafe fn __write_unlock(
+ &self,
+ rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
+ wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
+ ) {
+ match WaitQueue::notify_one(wguard) {
+ Err(mut wguard) => {
+ // No writers waiting, release the write lock
+ *wguard.lock_var_mut() = false;
+ if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
+ // One or more readers were waiting, pass the lock to them
+ if let NotifiedTcs::All { count } = rguard.notified_tcs() {
+ *rguard.lock_var_mut() = Some(count)
+ } else {
+ unreachable!() // called notify_all
+ }
+ rguard.drop_after(wguard);
+ } else {
+ // No readers waiting, the lock is released
+ }
+ }
+ Ok(wguard) => {
+ // There was a thread waiting for write, just pass the lock
+ wguard.drop_after(rguard);
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ let rguard = self.readers.lock();
+ let wguard = self.writer.lock();
+ unsafe { self.__write_unlock(rguard, wguard) };
+ }
+
+ // only used by __rust_rwlock_unlock below
+ #[inline]
+ #[cfg_attr(test, allow(dead_code))]
+ unsafe fn unlock(&self) {
+ let rguard = self.readers.lock();
+ let wguard = self.writer.lock();
+ if *wguard.lock_var() == true {
+ unsafe { self.__write_unlock(rguard, wguard) };
+ } else {
+ unsafe { self.__read_unlock(rguard, wguard) };
+ }
+ }
+}
+
+// The following functions are needed by libunwind. These symbols are named
+// in pre-link args for the target specification, so keep that in sync.
+#[cfg(not(test))]
+const EINVAL: i32 = 22;
+
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 {
+ if p.is_null() {
+ return EINVAL;
+ }
+ unsafe { (*p).read() };
+ return 0;
+}
+
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
+ if p.is_null() {
+ return EINVAL;
+ }
+ unsafe { (*p).write() };
+ return 0;
+}
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
+ if p.is_null() {
+ return EINVAL;
+ }
+ unsafe { (*p).unlock() };
+ return 0;
+}
diff --git a/library/std/src/sys/sgx/rwlock/tests.rs b/library/std/src/sys/sgx/rwlock/tests.rs
new file mode 100644
index 000000000..479996115
--- /dev/null
+++ b/library/std/src/sys/sgx/rwlock/tests.rs
@@ -0,0 +1,31 @@
+use super::*;
+
+// Verify that the byte pattern libunwind uses to initialize an RwLock is
+// equivalent to the value of RwLock::new(). If the value changes,
+// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
+#[test]
+fn test_c_rwlock_initializer() {
+ #[rustfmt::skip]
+ const C_RWLOCK_INIT: &[u8] = &[
+ /* 0x00 */ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x10 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x20 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x30 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x40 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x50 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x60 */ 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x70 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ /* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ ];
+
+ // For the test to work, we need the padding/unused bytes in RwLock to be
+ // initialized as 0. In practice, this is the case with statics.
+ static RUST_RWLOCK_INIT: RwLock = RwLock::new();
+
+ unsafe {
+ // If the assertion fails, that not necessarily an issue with the value
+ // of C_RWLOCK_INIT. It might just be an issue with the way padding
+ // bytes are initialized in the test code.
+ assert_eq!(&crate::mem::transmute_copy::<_, [u8; 144]>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
+ };
+}
diff --git a/library/std/src/sys/sgx/stdio.rs b/library/std/src/sys/sgx/stdio.rs
new file mode 100644
index 000000000..2e680e740
--- /dev/null
+++ b/library/std/src/sys/sgx/stdio.rs
@@ -0,0 +1,88 @@
+use fortanix_sgx_abi as abi;
+
+use crate::io;
+#[cfg(not(test))]
+use crate::slice;
+#[cfg(not(test))]
+use crate::str;
+use crate::sys::fd::FileDesc;
+
+pub struct Stdin(());
+pub struct Stdout(());
+pub struct Stderr(());
+
+fn with_std_fd<F: FnOnce(&FileDesc) -> R, R>(fd: abi::Fd, f: F) -> R {
+ let fd = FileDesc::new(fd);
+ let ret = f(&fd);
+ fd.into_raw();
+ ret
+}
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin(())
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ with_std_fd(abi::FD_STDIN, |fd| fd.read(buf))
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout(())
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ with_std_fd(abi::FD_STDOUT, |fd| fd.write(buf))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ with_std_fd(abi::FD_STDOUT, |fd| fd.flush())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr(())
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ with_std_fd(abi::FD_STDERR, |fd| fd.write(buf))
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ with_std_fd(abi::FD_STDERR, |fd| fd.flush())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ // FIXME: Rust normally maps Unix EBADF to `Uncategorized`
+ err.raw_os_error() == Some(abi::Error::BrokenPipe as _)
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ super::abi::panic::SgxPanicOutput::new()
+}
+
+// This function is needed by libunwind. The symbol is named in pre-link args
+// for the target specification, so keep that in sync.
+#[cfg(not(test))]
+#[no_mangle]
+pub unsafe extern "C" fn __rust_print_err(m: *mut u8, s: i32) {
+ if s < 0 {
+ return;
+ }
+ let buf = unsafe { slice::from_raw_parts(m as *const u8, s as _) };
+ if let Ok(s) = str::from_utf8(&buf[..buf.iter().position(|&b| b == 0).unwrap_or(buf.len())]) {
+ eprint!("{s}");
+ }
+}
diff --git a/library/std/src/sys/sgx/thread.rs b/library/std/src/sys/sgx/thread.rs
new file mode 100644
index 000000000..d745a6196
--- /dev/null
+++ b/library/std/src/sys/sgx/thread.rs
@@ -0,0 +1,152 @@
+#![cfg_attr(test, allow(dead_code))] // why is this necessary?
+use super::unsupported;
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::time::Duration;
+
+use super::abi::usercalls;
+
+pub struct Thread(task_queue::JoinHandle);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+pub use self::task_queue::JoinNotifier;
+
+mod task_queue {
+ use super::wait_notify;
+ use crate::sync::{Mutex, MutexGuard, Once};
+
+ pub type JoinHandle = wait_notify::Waiter;
+
+ pub struct JoinNotifier(Option<wait_notify::Notifier>);
+
+ impl Drop for JoinNotifier {
+ fn drop(&mut self) {
+ self.0.take().unwrap().notify();
+ }
+ }
+
+ pub(super) struct Task {
+ p: Box<dyn FnOnce()>,
+ done: JoinNotifier,
+ }
+
+ impl Task {
+ pub(super) fn new(p: Box<dyn FnOnce()>) -> (Task, JoinHandle) {
+ let (done, recv) = wait_notify::new();
+ let done = JoinNotifier(Some(done));
+ (Task { p, done }, recv)
+ }
+
+ pub(super) fn run(self) -> JoinNotifier {
+ (self.p)();
+ self.done
+ }
+ }
+
+ #[cfg_attr(test, linkage = "available_externally")]
+ #[export_name = "_ZN16__rust_internals3std3sys3sgx6thread15TASK_QUEUE_INITE"]
+ static TASK_QUEUE_INIT: Once = Once::new();
+ #[cfg_attr(test, linkage = "available_externally")]
+ #[export_name = "_ZN16__rust_internals3std3sys3sgx6thread10TASK_QUEUEE"]
+ static mut TASK_QUEUE: Option<Mutex<Vec<Task>>> = None;
+
+ pub(super) fn lock() -> MutexGuard<'static, Vec<Task>> {
+ unsafe {
+ TASK_QUEUE_INIT.call_once(|| TASK_QUEUE = Some(Default::default()));
+ TASK_QUEUE.as_ref().unwrap().lock().unwrap()
+ }
+ }
+}
+
+/// This module provides a synchronization primitive that does not use thread
+/// local variables. This is needed for signaling that a thread has finished
+/// execution. The signal is sent once all TLS destructors have finished at
+/// which point no new thread locals should be created.
+pub mod wait_notify {
+ use super::super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+ use crate::sync::Arc;
+
+ pub struct Notifier(Arc<SpinMutex<WaitVariable<bool>>>);
+
+ impl Notifier {
+ /// Notify the waiter. The waiter is either notified right away (if
+ /// currently blocked in `Waiter::wait()`) or later when it calls the
+ /// `Waiter::wait()` method.
+ pub fn notify(self) {
+ let mut guard = self.0.lock();
+ *guard.lock_var_mut() = true;
+ let _ = WaitQueue::notify_one(guard);
+ }
+ }
+
+ pub struct Waiter(Arc<SpinMutex<WaitVariable<bool>>>);
+
+ impl Waiter {
+ /// Wait for a notification. If `Notifier::notify()` has already been
+ /// called, this will return immediately, otherwise the current thread
+ /// is blocked until notified.
+ pub fn wait(self) {
+ let guard = self.0.lock();
+ if *guard.lock_var() {
+ return;
+ }
+ WaitQueue::wait(guard, || {});
+ }
+ }
+
+ pub fn new() -> (Notifier, Waiter) {
+ let inner = Arc::new(SpinMutex::new(WaitVariable::new(false)));
+ (Notifier(inner.clone()), Waiter(inner))
+ }
+}
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let mut queue_lock = task_queue::lock();
+ unsafe { usercalls::launch_thread()? };
+ let (task, handle) = task_queue::Task::new(p);
+ queue_lock.push(task);
+ Ok(Thread(handle))
+ }
+
+ pub(super) fn entry() -> JoinNotifier {
+ let mut pending_tasks = task_queue::lock();
+ let task = rtunwrap!(Some, pending_tasks.pop());
+ drop(pending_tasks); // make sure to not hold the task queue lock longer than necessary
+ task.run()
+ }
+
+ pub fn yield_now() {
+ let wait_error = rtunwrap!(Err, usercalls::wait(0, usercalls::raw::WAIT_NO));
+ rtassert!(wait_error.kind() == io::ErrorKind::WouldBlock);
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // FIXME: could store this pointer in TLS somewhere
+ }
+
+ pub fn sleep(dur: Duration) {
+ usercalls::wait_timeout(0, dur, || true);
+ }
+
+ pub fn join(self) {
+ self.0.wait();
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs
new file mode 100644
index 000000000..b21784475
--- /dev/null
+++ b/library/std/src/sys/sgx/thread_local_key.rs
@@ -0,0 +1,28 @@
+use super::abi::tls::{Key as AbiKey, Tls};
+
+pub type Key = usize;
+
+#[inline]
+pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ Tls::create(dtor).as_usize()
+}
+
+#[inline]
+pub unsafe fn set(key: Key, value: *mut u8) {
+ Tls::set(AbiKey::from_usize(key), value)
+}
+
+#[inline]
+pub unsafe fn get(key: Key) -> *mut u8 {
+ Tls::get(AbiKey::from_usize(key))
+}
+
+#[inline]
+pub unsafe fn destroy(key: Key) {
+ Tls::destroy(AbiKey::from_usize(key))
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+ false
+}
diff --git a/library/std/src/sys/sgx/time.rs b/library/std/src/sys/sgx/time.rs
new file mode 100644
index 000000000..db4cf2804
--- /dev/null
+++ b/library/std/src/sys/sgx/time.rs
@@ -0,0 +1,46 @@
+use super::abi::usercalls;
+use crate::time::Duration;
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(Duration);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(Duration);
+
+pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
+
+impl Instant {
+ pub fn now() -> Instant {
+ Instant(usercalls::insecure_time())
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.0.checked_sub(other.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_sub(*other)?))
+ }
+}
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ SystemTime(usercalls::insecure_time())
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_sub(*other)?))
+ }
+}
diff --git a/library/std/src/sys/sgx/waitqueue/mod.rs b/library/std/src/sys/sgx/waitqueue/mod.rs
new file mode 100644
index 000000000..61bb11d9a
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/mod.rs
@@ -0,0 +1,240 @@
+//! A simple queue implementation for synchronization primitives.
+//!
+//! This queue is used to implement condition variable and mutexes.
+//!
+//! Users of this API are expected to use the `WaitVariable<T>` type. Since
+//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to
+//! allow shared access.
+//!
+//! Since userspace may send spurious wake-ups, the wakeup event state is
+//! recorded in the enclave. The wakeup event state is protected by a spinlock.
+//! The queue and associated wait state are stored in a `WaitVariable`.
+
+#[cfg(test)]
+mod tests;
+
+mod spin_mutex;
+mod unsafe_list;
+
+use crate::num::NonZeroUsize;
+use crate::ops::{Deref, DerefMut};
+use crate::time::Duration;
+
+use super::abi::thread;
+use super::abi::usercalls;
+use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE};
+
+pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard};
+use self::unsafe_list::{UnsafeList, UnsafeListEntry};
+
+/// An queue entry in a `WaitQueue`.
+struct WaitEntry {
+ /// TCS address of the thread that is waiting
+ tcs: Tcs,
+ /// Whether this thread has been notified to be awoken
+ wake: bool,
+}
+
+/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the
+/// queue and the data are synchronized, since the type itself is not `Sync`.
+///
+/// Consumers of this API should use a synchronization primitive for shared
+/// access, such as `SpinMutex`.
+#[derive(Default)]
+pub struct WaitVariable<T> {
+ queue: WaitQueue,
+ lock: T,
+}
+
+impl<T> WaitVariable<T> {
+ pub const fn new(var: T) -> Self {
+ WaitVariable { queue: WaitQueue::new(), lock: var }
+ }
+
+ pub fn queue_empty(&self) -> bool {
+ self.queue.is_empty()
+ }
+
+ pub fn lock_var(&self) -> &T {
+ &self.lock
+ }
+
+ pub fn lock_var_mut(&mut self) -> &mut T {
+ &mut self.lock
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum NotifiedTcs {
+ Single(Tcs),
+ All { count: NonZeroUsize },
+}
+
+/// An RAII guard that will notify a set of target threads as well as unlock
+/// a mutex on drop.
+pub struct WaitGuard<'a, T: 'a> {
+ mutex_guard: Option<SpinMutexGuard<'a, WaitVariable<T>>>,
+ notified_tcs: NotifiedTcs,
+}
+
+/// A queue of threads that are waiting on some synchronization primitive.
+///
+/// `UnsafeList` entries are allocated on the waiting thread's stack. This
+/// avoids any global locking that might happen in the heap allocator. This is
+/// safe because the waiting thread will not return from that stack frame until
+/// after it is notified. The notifying thread ensures to clean up any
+/// references to the list entries before sending the wakeup event.
+pub struct WaitQueue {
+ // We use an inner Mutex here to protect the data in the face of spurious
+ // wakeups.
+ inner: UnsafeList<SpinMutex<WaitEntry>>,
+}
+unsafe impl Send for WaitQueue {}
+
+impl Default for WaitQueue {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<'a, T> WaitGuard<'a, T> {
+ /// Returns which TCSes will be notified when this guard drops.
+ pub fn notified_tcs(&self) -> NotifiedTcs {
+ self.notified_tcs
+ }
+
+ /// Drop this `WaitGuard`, after dropping another `guard`.
+ pub fn drop_after<U>(self, guard: U) {
+ drop(guard);
+ drop(self);
+ }
+}
+
+impl<'a, T> Deref for WaitGuard<'a, T> {
+ type Target = SpinMutexGuard<'a, WaitVariable<T>>;
+
+ fn deref(&self) -> &Self::Target {
+ self.mutex_guard.as_ref().unwrap()
+ }
+}
+
+impl<'a, T> DerefMut for WaitGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.mutex_guard.as_mut().unwrap()
+ }
+}
+
+impl<'a, T> Drop for WaitGuard<'a, T> {
+ fn drop(&mut self) {
+ drop(self.mutex_guard.take());
+ let target_tcs = match self.notified_tcs {
+ NotifiedTcs::Single(tcs) => Some(tcs),
+ NotifiedTcs::All { .. } => None,
+ };
+ rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs));
+ }
+}
+
+impl WaitQueue {
+ pub const fn new() -> Self {
+ WaitQueue { inner: UnsafeList::new() }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
+ /// until a wakeup event.
+ ///
+ /// This function does not return until this thread has been awoken.
+ pub fn wait<T, F: FnOnce()>(mut guard: SpinMutexGuard<'_, WaitVariable<T>>, before_wait: F) {
+ // very unsafe: check requirements of UnsafeList::push
+ unsafe {
+ let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
+ tcs: thread::current(),
+ wake: false,
+ }));
+ let entry = guard.queue.inner.push(&mut entry);
+ drop(guard);
+ before_wait();
+ while !entry.lock().wake {
+ // don't panic, this would invalidate `entry` during unwinding
+ let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE));
+ rtassert!(eventset & EV_UNPARK == EV_UNPARK);
+ }
+ }
+ }
+
+ /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
+ /// until a wakeup event or timeout. If event was observed, returns true.
+ /// If not, it will remove the calling thread from the wait queue.
+ pub fn wait_timeout<T, F: FnOnce()>(
+ lock: &SpinMutex<WaitVariable<T>>,
+ timeout: Duration,
+ before_wait: F,
+ ) -> bool {
+ // very unsafe: check requirements of UnsafeList::push
+ unsafe {
+ let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
+ tcs: thread::current(),
+ wake: false,
+ }));
+ let entry_lock = lock.lock().queue.inner.push(&mut entry);
+ before_wait();
+ usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake);
+ // acquire the wait queue's lock first to avoid deadlock.
+ let mut guard = lock.lock();
+ let success = entry_lock.lock().wake;
+ if !success {
+ // nobody is waking us up, so remove our entry from the wait queue.
+ guard.queue.inner.remove(&mut entry);
+ }
+ success
+ }
+ }
+
+ /// Either find the next waiter on the wait queue, or return the mutex
+ /// guard unchanged.
+ ///
+ /// If a waiter is found, a `WaitGuard` is returned which will notify the
+ /// waiter when it is dropped.
+ pub fn notify_one<T>(
+ mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
+ ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
+ unsafe {
+ if let Some(entry) = guard.queue.inner.pop() {
+ let mut entry_guard = entry.lock();
+ let tcs = entry_guard.tcs;
+ entry_guard.wake = true;
+ drop(entry);
+ Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) })
+ } else {
+ Err(guard)
+ }
+ }
+ }
+
+ /// Either find any and all waiters on the wait queue, or return the mutex
+ /// guard unchanged.
+ ///
+ /// If at least one waiter is found, a `WaitGuard` is returned which will
+ /// notify all waiters when it is dropped.
+ pub fn notify_all<T>(
+ mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
+ ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
+ unsafe {
+ let mut count = 0;
+ while let Some(entry) = guard.queue.inner.pop() {
+ count += 1;
+ let mut entry_guard = entry.lock();
+ entry_guard.wake = true;
+ }
+ if let Some(count) = NonZeroUsize::new(count) {
+ Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } })
+ } else {
+ Err(guard)
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/sgx/waitqueue/spin_mutex.rs b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs
new file mode 100644
index 000000000..f6e851cca
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/spin_mutex.rs
@@ -0,0 +1,80 @@
+//! Trivial spinlock-based implementation of `sync::Mutex`.
+// FIXME: Perhaps use Intel TSX to avoid locking?
+
+#[cfg(test)]
+mod tests;
+
+use crate::cell::UnsafeCell;
+use crate::hint;
+use crate::ops::{Deref, DerefMut};
+use crate::sync::atomic::{AtomicBool, Ordering};
+
+#[derive(Default)]
+pub struct SpinMutex<T> {
+ value: UnsafeCell<T>,
+ lock: AtomicBool,
+}
+
+unsafe impl<T: Send> Send for SpinMutex<T> {}
+unsafe impl<T: Send> Sync for SpinMutex<T> {}
+
+pub struct SpinMutexGuard<'a, T: 'a> {
+ mutex: &'a SpinMutex<T>,
+}
+
+impl<'a, T> !Send for SpinMutexGuard<'a, T> {}
+unsafe impl<'a, T: Sync> Sync for SpinMutexGuard<'a, T> {}
+
+impl<T> SpinMutex<T> {
+ pub const fn new(value: T) -> Self {
+ SpinMutex { value: UnsafeCell::new(value), lock: AtomicBool::new(false) }
+ }
+
+ #[inline(always)]
+ pub fn lock(&self) -> SpinMutexGuard<'_, T> {
+ loop {
+ match self.try_lock() {
+ None => {
+ while self.lock.load(Ordering::Relaxed) {
+ hint::spin_loop()
+ }
+ }
+ Some(guard) => return guard,
+ }
+ }
+ }
+
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<SpinMutexGuard<'_, T>> {
+ if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Acquire).is_ok() {
+ Some(SpinMutexGuard { mutex: self })
+ } else {
+ None
+ }
+ }
+}
+
+/// Lock the Mutex or return false.
+pub macro try_lock_or_false($e:expr) {
+ if let Some(v) = $e.try_lock() { v } else { return false }
+}
+
+impl<'a, T> Deref for SpinMutexGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.mutex.value.get() }
+ }
+}
+
+impl<'a, T> DerefMut for SpinMutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.mutex.value.get() }
+ }
+}
+
+impl<'a, T> Drop for SpinMutexGuard<'a, T> {
+ fn drop(&mut self) {
+ self.mutex.lock.store(false, Ordering::Release)
+ }
+}
diff --git a/library/std/src/sys/sgx/waitqueue/spin_mutex/tests.rs b/library/std/src/sys/sgx/waitqueue/spin_mutex/tests.rs
new file mode 100644
index 000000000..4c5994bea
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/spin_mutex/tests.rs
@@ -0,0 +1,23 @@
+#![allow(deprecated)]
+
+use super::*;
+use crate::sync::Arc;
+use crate::thread;
+use crate::time::Duration;
+
+#[test]
+fn sleep() {
+ let mutex = Arc::new(SpinMutex::<i32>::default());
+ let mutex2 = mutex.clone();
+ let guard = mutex.lock();
+ let t1 = thread::spawn(move || {
+ *mutex2.lock() = 1;
+ });
+
+ thread::sleep(Duration::from_millis(50));
+
+ assert_eq!(*guard, 0);
+ drop(guard);
+ t1.join().unwrap();
+ assert_eq!(*mutex.lock(), 1);
+}
diff --git a/library/std/src/sys/sgx/waitqueue/tests.rs b/library/std/src/sys/sgx/waitqueue/tests.rs
new file mode 100644
index 000000000..bf91fdd08
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/tests.rs
@@ -0,0 +1,20 @@
+use super::*;
+use crate::sync::Arc;
+use crate::thread;
+
+#[test]
+fn queue() {
+ let wq = Arc::new(SpinMutex::<WaitVariable<()>>::default());
+ let wq2 = wq.clone();
+
+ let locked = wq.lock();
+
+ let t1 = thread::spawn(move || {
+ // if we obtain the lock, the main thread should be waiting
+ assert!(WaitQueue::notify_one(wq2.lock()).is_ok());
+ });
+
+ WaitQueue::wait(locked, || {});
+
+ t1.join().unwrap();
+}
diff --git a/library/std/src/sys/sgx/waitqueue/unsafe_list.rs b/library/std/src/sys/sgx/waitqueue/unsafe_list.rs
new file mode 100644
index 000000000..c736cab57
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/unsafe_list.rs
@@ -0,0 +1,156 @@
+//! A doubly-linked list where callers are in charge of memory allocation
+//! of the nodes in the list.
+
+#[cfg(test)]
+mod tests;
+
+use crate::mem;
+use crate::ptr::NonNull;
+
+pub struct UnsafeListEntry<T> {
+ next: NonNull<UnsafeListEntry<T>>,
+ prev: NonNull<UnsafeListEntry<T>>,
+ value: Option<T>,
+}
+
+impl<T> UnsafeListEntry<T> {
+ fn dummy() -> Self {
+ UnsafeListEntry { next: NonNull::dangling(), prev: NonNull::dangling(), value: None }
+ }
+
+ pub fn new(value: T) -> Self {
+ UnsafeListEntry { value: Some(value), ..Self::dummy() }
+ }
+}
+
+// WARNING: self-referential struct!
+pub struct UnsafeList<T> {
+ head_tail: NonNull<UnsafeListEntry<T>>,
+ head_tail_entry: Option<UnsafeListEntry<T>>,
+}
+
+impl<T> UnsafeList<T> {
+ pub const fn new() -> Self {
+ unsafe { UnsafeList { head_tail: NonNull::new_unchecked(1 as _), head_tail_entry: None } }
+ }
+
+ /// # Safety
+ unsafe fn init(&mut self) {
+ if self.head_tail_entry.is_none() {
+ self.head_tail_entry = Some(UnsafeListEntry::dummy());
+ // SAFETY: `head_tail_entry` must be non-null, which it is because we assign it above.
+ self.head_tail =
+ unsafe { NonNull::new_unchecked(self.head_tail_entry.as_mut().unwrap()) };
+ // SAFETY: `self.head_tail` must meet all requirements for a mutable reference.
+ unsafe { self.head_tail.as_mut() }.next = self.head_tail;
+ unsafe { self.head_tail.as_mut() }.prev = self.head_tail;
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ if self.head_tail_entry.is_some() {
+ let first = unsafe { self.head_tail.as_ref() }.next;
+ if first == self.head_tail {
+ // ,-------> /---------\ next ---,
+ // | |head_tail| |
+ // `--- prev \---------/ <-------`
+ // SAFETY: `self.head_tail` must meet all requirements for a reference.
+ unsafe { rtassert!(self.head_tail.as_ref().prev == first) };
+ true
+ } else {
+ false
+ }
+ } else {
+ true
+ }
+ }
+
+ /// Pushes an entry onto the back of the list.
+ ///
+ /// # Safety
+ ///
+ /// The entry must remain allocated until the entry is removed from the
+ /// list AND the caller who popped is done using the entry. Special
+ /// care must be taken in the caller of `push` to ensure unwinding does
+ /// not destroy the stack frame containing the entry.
+ pub unsafe fn push<'a>(&mut self, entry: &'a mut UnsafeListEntry<T>) -> &'a T {
+ unsafe { self.init() };
+
+ // BEFORE:
+ // /---------\ next ---> /---------\
+ // ... |prev_tail| |head_tail| ...
+ // \---------/ <--- prev \---------/
+ //
+ // AFTER:
+ // /---------\ next ---> /-----\ next ---> /---------\
+ // ... |prev_tail| |entry| |head_tail| ...
+ // \---------/ <--- prev \-----/ <--- prev \---------/
+ let mut entry = unsafe { NonNull::new_unchecked(entry) };
+ let mut prev_tail = mem::replace(&mut unsafe { self.head_tail.as_mut() }.prev, entry);
+ // SAFETY: `entry` must meet all requirements for a mutable reference.
+ unsafe { entry.as_mut() }.prev = prev_tail;
+ unsafe { entry.as_mut() }.next = self.head_tail;
+ // SAFETY: `prev_tail` must meet all requirements for a mutable reference.
+ unsafe { prev_tail.as_mut() }.next = entry;
+ // unwrap ok: always `Some` on non-dummy entries
+ unsafe { (*entry.as_ptr()).value.as_ref() }.unwrap()
+ }
+
+ /// Pops an entry from the front of the list.
+ ///
+ /// # Safety
+ ///
+ /// The caller must make sure to synchronize ending the borrow of the
+ /// return value and deallocation of the containing entry.
+ pub unsafe fn pop<'a>(&mut self) -> Option<&'a T> {
+ unsafe { self.init() };
+
+ if self.is_empty() {
+ None
+ } else {
+ // BEFORE:
+ // /---------\ next ---> /-----\ next ---> /------\
+ // ... |head_tail| |first| |second| ...
+ // \---------/ <--- prev \-----/ <--- prev \------/
+ //
+ // AFTER:
+ // /---------\ next ---> /------\
+ // ... |head_tail| |second| ...
+ // \---------/ <--- prev \------/
+ let mut first = unsafe { self.head_tail.as_mut() }.next;
+ let mut second = unsafe { first.as_mut() }.next;
+ unsafe { self.head_tail.as_mut() }.next = second;
+ unsafe { second.as_mut() }.prev = self.head_tail;
+ unsafe { first.as_mut() }.next = NonNull::dangling();
+ unsafe { first.as_mut() }.prev = NonNull::dangling();
+ // unwrap ok: always `Some` on non-dummy entries
+ Some(unsafe { (*first.as_ptr()).value.as_ref() }.unwrap())
+ }
+ }
+
+ /// Removes an entry from the list.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `entry` has been pushed onto `self`
+ /// prior to this call and has not moved since then.
+ pub unsafe fn remove(&mut self, entry: &mut UnsafeListEntry<T>) {
+ rtassert!(!self.is_empty());
+ // BEFORE:
+ // /----\ next ---> /-----\ next ---> /----\
+ // ... |prev| |entry| |next| ...
+ // \----/ <--- prev \-----/ <--- prev \----/
+ //
+ // AFTER:
+ // /----\ next ---> /----\
+ // ... |prev| |next| ...
+ // \----/ <--- prev \----/
+ let mut prev = entry.prev;
+ let mut next = entry.next;
+ // SAFETY: `prev` and `next` must meet all requirements for a mutable reference.entry
+ unsafe { prev.as_mut() }.next = next;
+ unsafe { next.as_mut() }.prev = prev;
+ entry.next = NonNull::dangling();
+ entry.prev = NonNull::dangling();
+ }
+}
diff --git a/library/std/src/sys/sgx/waitqueue/unsafe_list/tests.rs b/library/std/src/sys/sgx/waitqueue/unsafe_list/tests.rs
new file mode 100644
index 000000000..c653dee17
--- /dev/null
+++ b/library/std/src/sys/sgx/waitqueue/unsafe_list/tests.rs
@@ -0,0 +1,105 @@
+use super::*;
+use crate::cell::Cell;
+
+/// # Safety
+/// List must be valid.
+unsafe fn assert_empty<T>(list: &mut UnsafeList<T>) {
+ assert!(unsafe { list.pop() }.is_none(), "assertion failed: list is not empty");
+}
+
+#[test]
+fn init_empty() {
+ unsafe {
+ assert_empty(&mut UnsafeList::<i32>::new());
+ }
+}
+
+#[test]
+fn push_pop() {
+ unsafe {
+ let mut node = UnsafeListEntry::new(1234);
+ let mut list = UnsafeList::new();
+ assert_eq!(list.push(&mut node), &1234);
+ assert_eq!(list.pop().unwrap(), &1234);
+ assert_empty(&mut list);
+ }
+}
+
+#[test]
+fn push_remove() {
+ unsafe {
+ let mut node = UnsafeListEntry::new(1234);
+ let mut list = UnsafeList::new();
+ assert_eq!(list.push(&mut node), &1234);
+ list.remove(&mut node);
+ assert_empty(&mut list);
+ }
+}
+
+#[test]
+fn push_remove_pop() {
+ unsafe {
+ let mut node1 = UnsafeListEntry::new(11);
+ let mut node2 = UnsafeListEntry::new(12);
+ let mut node3 = UnsafeListEntry::new(13);
+ let mut node4 = UnsafeListEntry::new(14);
+ let mut node5 = UnsafeListEntry::new(15);
+ let mut list = UnsafeList::new();
+ assert_eq!(list.push(&mut node1), &11);
+ assert_eq!(list.push(&mut node2), &12);
+ assert_eq!(list.push(&mut node3), &13);
+ assert_eq!(list.push(&mut node4), &14);
+ assert_eq!(list.push(&mut node5), &15);
+
+ list.remove(&mut node1);
+ assert_eq!(list.pop().unwrap(), &12);
+ list.remove(&mut node3);
+ assert_eq!(list.pop().unwrap(), &14);
+ list.remove(&mut node5);
+ assert_empty(&mut list);
+
+ assert_eq!(list.push(&mut node1), &11);
+ assert_eq!(list.pop().unwrap(), &11);
+ assert_empty(&mut list);
+
+ assert_eq!(list.push(&mut node3), &13);
+ assert_eq!(list.push(&mut node4), &14);
+ list.remove(&mut node3);
+ list.remove(&mut node4);
+ assert_empty(&mut list);
+ }
+}
+
+#[test]
+fn complex_pushes_pops() {
+ unsafe {
+ let mut node1 = UnsafeListEntry::new(1234);
+ let mut node2 = UnsafeListEntry::new(4567);
+ let mut node3 = UnsafeListEntry::new(9999);
+ let mut node4 = UnsafeListEntry::new(8642);
+ let mut list = UnsafeList::new();
+ list.push(&mut node1);
+ list.push(&mut node2);
+ assert_eq!(list.pop().unwrap(), &1234);
+ list.push(&mut node3);
+ assert_eq!(list.pop().unwrap(), &4567);
+ assert_eq!(list.pop().unwrap(), &9999);
+ assert_empty(&mut list);
+ list.push(&mut node4);
+ assert_eq!(list.pop().unwrap(), &8642);
+ assert_empty(&mut list);
+ }
+}
+
+#[test]
+fn cell() {
+ unsafe {
+ let mut node = UnsafeListEntry::new(Cell::new(0));
+ let mut list = UnsafeList::new();
+ let noderef = list.push(&mut node);
+ assert_eq!(noderef.get(), 0);
+ list.pop().unwrap().set(1);
+ assert_empty(&mut list);
+ assert_eq!(noderef.get(), 1);
+ }
+}
diff --git a/library/std/src/sys/solid/abi/fs.rs b/library/std/src/sys/solid/abi/fs.rs
new file mode 100644
index 000000000..32800bd9a
--- /dev/null
+++ b/library/std/src/sys/solid/abi/fs.rs
@@ -0,0 +1,53 @@
+//! `solid_fs.h`
+use crate::os::raw::{c_char, c_int, c_uchar};
+pub use libc::{
+ blksize_t, dev_t, ino_t, off_t, stat, time_t, O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR,
+ O_TRUNC, O_WRONLY, SEEK_CUR, SEEK_END, SEEK_SET, S_IEXEC, S_IFBLK, S_IFCHR, S_IFDIR, S_IFIFO,
+ S_IFMT, S_IFREG, S_IREAD, S_IWRITE,
+};
+
+pub const O_ACCMODE: c_int = 0x3;
+
+pub const SOLID_MAX_PATH: usize = 256;
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct dirent {
+ pub d_ino: ino_t,
+ pub d_type: c_uchar,
+ pub d_name: [c_char; 256usize],
+}
+
+pub const DT_UNKNOWN: c_uchar = 0;
+pub const DT_FIFO: c_uchar = 1;
+pub const DT_CHR: c_uchar = 2;
+pub const DT_DIR: c_uchar = 4;
+pub const DT_BLK: c_uchar = 6;
+pub const DT_REG: c_uchar = 8;
+pub const DT_LNK: c_uchar = 10;
+pub const DT_SOCK: c_uchar = 12;
+pub const DT_WHT: c_uchar = 14;
+
+pub type S_DIR = c_int;
+
+extern "C" {
+ pub fn SOLID_FS_Open(fd: *mut c_int, path: *const c_char, mode: c_int) -> c_int;
+ pub fn SOLID_FS_Close(fd: c_int) -> c_int;
+ pub fn SOLID_FS_Read(fd: c_int, buf: *mut u8, size: usize, result: *mut usize) -> c_int;
+ pub fn SOLID_FS_Write(fd: c_int, buf: *const u8, size: usize, result: *mut usize) -> c_int;
+ pub fn SOLID_FS_Lseek(fd: c_int, offset: off_t, whence: c_int) -> c_int;
+ pub fn SOLID_FS_Sync(fd: c_int) -> c_int;
+ pub fn SOLID_FS_Ftell(fd: c_int, result: *mut off_t) -> c_int;
+ pub fn SOLID_FS_Feof(fd: c_int, result: *mut c_int) -> c_int;
+ pub fn SOLID_FS_Fsize(fd: c_int, result: *mut usize) -> c_int;
+ pub fn SOLID_FS_Truncate(path: *const c_char, size: off_t) -> c_int;
+ pub fn SOLID_FS_OpenDir(path: *const c_char, pDir: *mut S_DIR) -> c_int;
+ pub fn SOLID_FS_CloseDir(dir: S_DIR) -> c_int;
+ pub fn SOLID_FS_ReadDir(dir: S_DIR, dirp: *mut dirent) -> c_int;
+ pub fn SOLID_FS_Stat(path: *const c_char, buf: *mut stat) -> c_int;
+ pub fn SOLID_FS_Unlink(path: *const c_char) -> c_int;
+ pub fn SOLID_FS_Rename(oldpath: *const c_char, newpath: *const c_char) -> c_int;
+ pub fn SOLID_FS_Chmod(path: *const c_char, mode: c_int) -> c_int;
+ pub fn SOLID_FS_Utime(path: *const c_char, time: time_t) -> c_int;
+ pub fn SOLID_FS_Mkdir(path: *const c_char) -> c_int;
+}
diff --git a/library/std/src/sys/solid/abi/mod.rs b/library/std/src/sys/solid/abi/mod.rs
new file mode 100644
index 000000000..8440d572c
--- /dev/null
+++ b/library/std/src/sys/solid/abi/mod.rs
@@ -0,0 +1,65 @@
+use crate::os::raw::c_int;
+
+mod fs;
+pub mod sockets;
+pub use self::fs::*;
+
+// `solid_types.h`
+pub use super::itron::abi::{ER, ER_ID, E_TMOUT, ID};
+
+pub const SOLID_ERR_NOTFOUND: ER = -1000;
+pub const SOLID_ERR_NOTSUPPORTED: ER = -1001;
+pub const SOLID_ERR_EBADF: ER = -1002;
+pub const SOLID_ERR_INVALIDCONTENT: ER = -1003;
+pub const SOLID_ERR_NOTUSED: ER = -1004;
+pub const SOLID_ERR_ALREADYUSED: ER = -1005;
+pub const SOLID_ERR_OUTOFBOUND: ER = -1006;
+pub const SOLID_ERR_BADSEQUENCE: ER = -1007;
+pub const SOLID_ERR_UNKNOWNDEVICE: ER = -1008;
+pub const SOLID_ERR_BUSY: ER = -1009;
+pub const SOLID_ERR_TIMEOUT: ER = -1010;
+pub const SOLID_ERR_INVALIDACCESS: ER = -1011;
+pub const SOLID_ERR_NOTREADY: ER = -1012;
+
+// `solid_rtc.h`
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct SOLID_RTC_TIME {
+ pub tm_sec: c_int,
+ pub tm_min: c_int,
+ pub tm_hour: c_int,
+ pub tm_mday: c_int,
+ pub tm_mon: c_int,
+ pub tm_year: c_int,
+ pub tm_wday: c_int,
+}
+
+extern "C" {
+ pub fn SOLID_RTC_ReadTime(time: *mut SOLID_RTC_TIME) -> c_int;
+}
+
+// `solid_log.h`
+extern "C" {
+ pub fn SOLID_LOG_write(s: *const u8, l: usize);
+}
+
+// `solid_mem.h`
+extern "C" {
+ pub fn SOLID_TLS_AddDestructor(id: i32, dtor: unsafe extern "C" fn(*mut u8));
+}
+
+// `solid_rng.h`
+extern "C" {
+ pub fn SOLID_RNG_SampleRandomBytes(buffer: *mut u8, length: usize) -> c_int;
+}
+
+// `rwlock.h`
+extern "C" {
+ pub fn rwl_loc_rdl(id: ID) -> ER;
+ pub fn rwl_loc_wrl(id: ID) -> ER;
+ pub fn rwl_ploc_rdl(id: ID) -> ER;
+ pub fn rwl_ploc_wrl(id: ID) -> ER;
+ pub fn rwl_unl_rwl(id: ID) -> ER;
+ pub fn rwl_acre_rwl() -> ER_ID;
+ pub fn rwl_del_rwl(id: ID) -> ER;
+}
diff --git a/library/std/src/sys/solid/abi/sockets.rs b/library/std/src/sys/solid/abi/sockets.rs
new file mode 100644
index 000000000..eb06a6dd9
--- /dev/null
+++ b/library/std/src/sys/solid/abi/sockets.rs
@@ -0,0 +1,277 @@
+use crate::os::raw::{c_char, c_uint, c_void};
+pub use libc::{c_int, c_long, size_t, ssize_t, suseconds_t, time_t, timeval};
+
+pub const SOLID_NET_ERR_BASE: c_int = -2000;
+pub const EINPROGRESS: c_int = SOLID_NET_ERR_BASE - libc::EINPROGRESS;
+
+pub const AF_INET6: i32 = 10;
+pub const AF_INET: i32 = 2;
+pub const IPPROTO_IP: i32 = 0;
+pub const IPPROTO_IPV6: i32 = 41;
+pub const IPPROTO_TCP: i32 = 6;
+pub const IPV6_ADD_MEMBERSHIP: i32 = 12;
+pub const IPV6_DROP_MEMBERSHIP: i32 = 13;
+pub const IPV6_MULTICAST_LOOP: i32 = 19;
+pub const IPV6_V6ONLY: i32 = 27;
+pub const IP_TTL: i32 = 2;
+pub const IP_MULTICAST_TTL: i32 = 5;
+pub const IP_MULTICAST_LOOP: i32 = 7;
+pub const IP_ADD_MEMBERSHIP: i32 = 3;
+pub const IP_DROP_MEMBERSHIP: i32 = 4;
+pub const SHUT_RD: i32 = 0;
+pub const SHUT_RDWR: i32 = 2;
+pub const SHUT_WR: i32 = 1;
+pub const SOCK_DGRAM: i32 = 2;
+pub const SOCK_STREAM: i32 = 1;
+pub const SOL_SOCKET: i32 = 4095;
+pub const SO_BROADCAST: i32 = 32;
+pub const SO_ERROR: i32 = 4103;
+pub const SO_RCVTIMEO: i32 = 4102;
+pub const SO_REUSEADDR: i32 = 4;
+pub const SO_SNDTIMEO: i32 = 4101;
+pub const SO_LINGER: i32 = 128;
+pub const TCP_NODELAY: i32 = 1;
+pub const MSG_PEEK: c_int = 1;
+pub const FIONBIO: c_long = 0x8008667eu32 as c_long;
+pub const EAI_NONAME: i32 = -2200;
+pub const EAI_SERVICE: i32 = -2201;
+pub const EAI_FAIL: i32 = -2202;
+pub const EAI_MEMORY: i32 = -2203;
+pub const EAI_FAMILY: i32 = -2204;
+
+pub type sa_family_t = u8;
+pub type socklen_t = u32;
+pub type in_addr_t = u32;
+pub type in_port_t = u16;
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct in_addr {
+ pub s_addr: in_addr_t,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct ip_mreq {
+ pub imr_multiaddr: in_addr,
+ pub imr_interface: in_addr,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct ipv6_mreq {
+ pub ipv6mr_multiaddr: in6_addr,
+ pub ipv6mr_interface: c_uint,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct msghdr {
+ pub msg_name: *mut c_void,
+ pub msg_namelen: socklen_t,
+ pub msg_iov: *mut iovec,
+ pub msg_iovlen: c_int,
+ pub msg_control: *mut c_void,
+ pub msg_controllen: socklen_t,
+ pub msg_flags: c_int,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct sockaddr {
+ pub sa_len: u8,
+ pub sa_family: sa_family_t,
+ pub sa_data: [c_char; 14usize],
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct sockaddr_in {
+ pub sin_len: u8,
+ pub sin_family: sa_family_t,
+ pub sin_port: in_port_t,
+ pub sin_addr: in_addr,
+ pub sin_zero: [c_char; 8usize],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct sockaddr_in6 {
+ pub sin6_len: u8,
+ pub sin6_family: sa_family_t,
+ pub sin6_port: in_port_t,
+ pub sin6_flowinfo: u32,
+ pub sin6_addr: in6_addr,
+ pub sin6_scope_id: u32,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct sockaddr_storage {
+ pub s2_len: u8,
+ pub ss_family: sa_family_t,
+ pub s2_data1: [c_char; 2usize],
+ pub s2_data2: [u32; 3usize],
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct addrinfo {
+ pub ai_flags: c_int,
+ pub ai_family: c_int,
+ pub ai_socktype: c_int,
+ pub ai_protocol: c_int,
+ pub ai_addrlen: socklen_t,
+ pub ai_addr: *mut sockaddr,
+ pub ai_canonname: *mut c_char,
+ pub ai_next: *mut addrinfo,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct linger {
+ pub l_onoff: c_int,
+ pub l_linger: c_int,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct iovec {
+ pub iov_base: *mut c_void,
+ pub iov_len: usize,
+}
+
+/// This value can be chosen by an application
+pub const SOLID_NET_FD_SETSIZE: usize = 1;
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct fd_set {
+ pub num_fds: usize,
+ pub fds: [c_int; SOLID_NET_FD_SETSIZE],
+}
+
+extern "C" {
+ #[link_name = "SOLID_NET_StrError"]
+ pub fn strerror(errnum: c_int) -> *const c_char;
+
+ pub fn SOLID_NET_GetLastError() -> c_int;
+
+ #[link_name = "SOLID_NET_Accept"]
+ pub fn accept(s: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int;
+
+ #[link_name = "SOLID_NET_Bind"]
+ pub fn bind(s: c_int, name: *const sockaddr, namelen: socklen_t) -> c_int;
+
+ #[link_name = "SOLID_NET_Connect"]
+ pub fn connect(s: c_int, name: *const sockaddr, namelen: socklen_t) -> c_int;
+
+ #[link_name = "SOLID_NET_Close"]
+ pub fn close(s: c_int) -> c_int;
+
+ #[link_name = "SOLID_NET_Dup"]
+ pub fn dup(s: c_int) -> c_int;
+
+ #[link_name = "SOLID_NET_GetPeerName"]
+ pub fn getpeername(s: c_int, name: *mut sockaddr, namelen: *mut socklen_t) -> c_int;
+
+ #[link_name = "SOLID_NET_GetSockName"]
+ pub fn getsockname(s: c_int, name: *mut sockaddr, namelen: *mut socklen_t) -> c_int;
+
+ #[link_name = "SOLID_NET_GetSockOpt"]
+ pub fn getsockopt(
+ s: c_int,
+ level: c_int,
+ optname: c_int,
+ optval: *mut c_void,
+ optlen: *mut socklen_t,
+ ) -> c_int;
+
+ #[link_name = "SOLID_NET_SetSockOpt"]
+ pub fn setsockopt(
+ s: c_int,
+ level: c_int,
+ optname: c_int,
+ optval: *const c_void,
+ optlen: socklen_t,
+ ) -> c_int;
+
+ #[link_name = "SOLID_NET_Ioctl"]
+ pub fn ioctl(s: c_int, cmd: c_long, argp: *mut c_void) -> c_int;
+
+ #[link_name = "SOLID_NET_Listen"]
+ pub fn listen(s: c_int, backlog: c_int) -> c_int;
+
+ #[link_name = "SOLID_NET_Recv"]
+ pub fn recv(s: c_int, mem: *mut c_void, len: size_t, flags: c_int) -> ssize_t;
+
+ #[link_name = "SOLID_NET_Read"]
+ pub fn read(s: c_int, mem: *mut c_void, len: size_t) -> ssize_t;
+
+ #[link_name = "SOLID_NET_Readv"]
+ pub fn readv(s: c_int, bufs: *const iovec, bufcnt: c_int) -> ssize_t;
+
+ #[link_name = "SOLID_NET_RecvFrom"]
+ pub fn recvfrom(
+ s: c_int,
+ mem: *mut c_void,
+ len: size_t,
+ flags: c_int,
+ from: *mut sockaddr,
+ fromlen: *mut socklen_t,
+ ) -> ssize_t;
+
+ #[link_name = "SOLID_NET_Send"]
+ pub fn send(s: c_int, mem: *const c_void, len: size_t, flags: c_int) -> ssize_t;
+
+ #[link_name = "SOLID_NET_SendMsg"]
+ pub fn sendmsg(s: c_int, message: *const msghdr, flags: c_int) -> ssize_t;
+
+ #[link_name = "SOLID_NET_SendTo"]
+ pub fn sendto(
+ s: c_int,
+ mem: *const c_void,
+ len: size_t,
+ flags: c_int,
+ to: *const sockaddr,
+ tolen: socklen_t,
+ ) -> ssize_t;
+
+ #[link_name = "SOLID_NET_Shutdown"]
+ pub fn shutdown(s: c_int, how: c_int) -> c_int;
+
+ #[link_name = "SOLID_NET_Socket"]
+ pub fn socket(domain: c_int, type_: c_int, protocol: c_int) -> c_int;
+
+ #[link_name = "SOLID_NET_Write"]
+ pub fn write(s: c_int, mem: *const c_void, len: size_t) -> ssize_t;
+
+ #[link_name = "SOLID_NET_Writev"]
+ pub fn writev(s: c_int, bufs: *const iovec, bufcnt: c_int) -> ssize_t;
+
+ #[link_name = "SOLID_NET_FreeAddrInfo"]
+ pub fn freeaddrinfo(ai: *mut addrinfo);
+
+ #[link_name = "SOLID_NET_GetAddrInfo"]
+ pub fn getaddrinfo(
+ nodename: *const c_char,
+ servname: *const c_char,
+ hints: *const addrinfo,
+ res: *mut *mut addrinfo,
+ ) -> c_int;
+
+ #[link_name = "SOLID_NET_Select"]
+ pub fn select(
+ maxfdp1: c_int,
+ readset: *mut fd_set,
+ writeset: *mut fd_set,
+ exceptset: *mut fd_set,
+ timeout: *mut timeval,
+ ) -> c_int;
+}
diff --git a/library/std/src/sys/solid/alloc.rs b/library/std/src/sys/solid/alloc.rs
new file mode 100644
index 000000000..d013bd876
--- /dev/null
+++ b/library/std/src/sys/solid/alloc.rs
@@ -0,0 +1,32 @@
+use crate::{
+ alloc::{GlobalAlloc, Layout, System},
+ sys::common::alloc::{realloc_fallback, MIN_ALIGN},
+};
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ unsafe { libc::malloc(layout.size()) as *mut u8 }
+ } else {
+ unsafe { libc::memalign(layout.align(), layout.size()) as *mut u8 }
+ }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ unsafe { libc::free(ptr as *mut libc::c_void) }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ realloc_fallback(self, ptr, layout, new_size)
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/solid/env.rs b/library/std/src/sys/solid/env.rs
new file mode 100644
index 000000000..6855c113b
--- /dev/null
+++ b/library/std/src/sys/solid/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "itron";
+ pub const OS: &str = "solid";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
diff --git a/library/std/src/sys/solid/error.rs b/library/std/src/sys/solid/error.rs
new file mode 100644
index 000000000..547b4f3a9
--- /dev/null
+++ b/library/std/src/sys/solid/error.rs
@@ -0,0 +1,55 @@
+use super::{abi, itron, net};
+use crate::io::ErrorKind;
+
+pub use self::itron::error::{expect_success, ItronError as SolidError};
+
+/// Describe the specified SOLID error code. Returns `None` if it's an
+/// undefined error code.
+///
+/// The SOLID error codes are a superset of μITRON error codes.
+pub fn error_name(er: abi::ER) -> Option<&'static str> {
+ match er {
+ // Success
+ er if er >= 0 => None,
+ er if er < abi::sockets::SOLID_NET_ERR_BASE => net::error_name(er),
+
+ abi::SOLID_ERR_NOTFOUND => Some("not found"),
+ abi::SOLID_ERR_NOTSUPPORTED => Some("not supported"),
+ abi::SOLID_ERR_EBADF => Some("bad flags"),
+ abi::SOLID_ERR_INVALIDCONTENT => Some("invalid content"),
+ abi::SOLID_ERR_NOTUSED => Some("not used"),
+ abi::SOLID_ERR_ALREADYUSED => Some("already used"),
+ abi::SOLID_ERR_OUTOFBOUND => Some("out of bounds"),
+ abi::SOLID_ERR_BADSEQUENCE => Some("bad sequence"),
+ abi::SOLID_ERR_UNKNOWNDEVICE => Some("unknown device"),
+ abi::SOLID_ERR_BUSY => Some("busy"),
+ abi::SOLID_ERR_TIMEOUT => Some("operation timed out"),
+ abi::SOLID_ERR_INVALIDACCESS => Some("invalid access"),
+ abi::SOLID_ERR_NOTREADY => Some("not ready"),
+
+ _ => itron::error::error_name(er),
+ }
+}
+
+pub fn decode_error_kind(er: abi::ER) -> ErrorKind {
+ match er {
+ // Success
+ er if er >= 0 => ErrorKind::Uncategorized,
+ er if er < abi::sockets::SOLID_NET_ERR_BASE => net::decode_error_kind(er),
+
+ abi::SOLID_ERR_NOTFOUND => ErrorKind::NotFound,
+ abi::SOLID_ERR_NOTSUPPORTED => ErrorKind::Unsupported,
+ abi::SOLID_ERR_EBADF => ErrorKind::InvalidInput,
+ abi::SOLID_ERR_INVALIDCONTENT => ErrorKind::InvalidData,
+ // abi::SOLID_ERR_NOTUSED
+ // abi::SOLID_ERR_ALREADYUSED
+ abi::SOLID_ERR_OUTOFBOUND => ErrorKind::InvalidInput,
+ // abi::SOLID_ERR_BADSEQUENCE
+ abi::SOLID_ERR_UNKNOWNDEVICE => ErrorKind::NotFound,
+ // abi::SOLID_ERR_BUSY
+ abi::SOLID_ERR_TIMEOUT => ErrorKind::TimedOut,
+ // abi::SOLID_ERR_INVALIDACCESS
+ // abi::SOLID_ERR_NOTREADY
+ _ => itron::error::decode_error_kind(er),
+ }
+}
diff --git a/library/std/src/sys/solid/fs.rs b/library/std/src/sys/solid/fs.rs
new file mode 100644
index 000000000..a2cbee4dc
--- /dev/null
+++ b/library/std/src/sys/solid/fs.rs
@@ -0,0 +1,574 @@
+use super::{abi, error};
+use crate::{
+ ffi::{CStr, CString, OsStr, OsString},
+ fmt,
+ io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom},
+ mem::MaybeUninit,
+ os::raw::{c_int, c_short},
+ os::solid::ffi::OsStrExt,
+ path::{Path, PathBuf},
+ sync::Arc,
+ sys::time::SystemTime,
+ sys::unsupported,
+};
+
+pub use crate::sys_common::fs::try_exists;
+
+/// A file descriptor.
+#[derive(Clone, Copy)]
+#[rustc_layout_scalar_valid_range_start(0)]
+// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
+// 32-bit c_int. Below is -2, in two's complement, but that only works out
+// because c_int is 32 bits.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+struct FileDesc {
+ fd: c_int,
+}
+
+impl FileDesc {
+ #[inline]
+ fn new(fd: c_int) -> FileDesc {
+ assert_ne!(fd, -1i32);
+ // Safety: we just asserted that the value is in the valid range and
+ // isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { FileDesc { fd } }
+ }
+
+ #[inline]
+ fn raw(&self) -> c_int {
+ self.fd
+ }
+}
+
+pub struct File {
+ fd: FileDesc,
+}
+
+#[derive(Clone)]
+pub struct FileAttr {
+ stat: abi::stat,
+}
+
+// all DirEntry's will have a reference to this struct
+struct InnerReadDir {
+ dirp: abi::S_DIR,
+ root: PathBuf,
+}
+
+pub struct ReadDir {
+ inner: Arc<InnerReadDir>,
+}
+
+pub struct DirEntry {
+ entry: abi::dirent,
+ inner: Arc<InnerReadDir>,
+}
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {
+ // generic
+ read: bool,
+ write: bool,
+ append: bool,
+ truncate: bool,
+ create: bool,
+ create_new: bool,
+ // system-specific
+ custom_flags: i32,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct FilePermissions(c_short);
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct FileType(c_short);
+
+#[derive(Debug)]
+pub struct DirBuilder {}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.stat.st_size as u64
+ }
+
+ pub fn perm(&self) -> FilePermissions {
+ FilePermissions(self.stat.st_mode)
+ }
+
+ pub fn file_type(&self) -> FileType {
+ FileType(self.stat.st_mode)
+ }
+
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_time_t(self.stat.st_mtime))
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_time_t(self.stat.st_atime))
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_time_t(self.stat.st_ctime))
+ }
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ (self.0 & abi::S_IWRITE) == 0
+ }
+
+ pub fn set_readonly(&mut self, readonly: bool) {
+ if readonly {
+ self.0 &= !abi::S_IWRITE;
+ } else {
+ self.0 |= abi::S_IWRITE;
+ }
+ }
+}
+
+impl FileType {
+ pub fn is_dir(&self) -> bool {
+ self.is(abi::S_IFDIR)
+ }
+ pub fn is_file(&self) -> bool {
+ self.is(abi::S_IFREG)
+ }
+ pub fn is_symlink(&self) -> bool {
+ false
+ }
+
+ pub fn is(&self, mode: c_short) -> bool {
+ self.0 & abi::S_IFMT == mode
+ }
+}
+
+pub fn readdir(p: &Path) -> io::Result<ReadDir> {
+ unsafe {
+ let mut dir = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_OpenDir(
+ cstr(p)?.as_ptr(),
+ dir.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+ let inner = Arc::new(InnerReadDir { dirp: dir.assume_init(), root: p.to_owned() });
+ Ok(ReadDir { inner })
+ }
+}
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame.
+ // Thus the result will be e g 'ReadDir("/home")'
+ fmt::Debug::fmt(&*self.inner.root, f)
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ unsafe {
+ let mut out_dirent = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir(
+ self.inner.dirp,
+ out_dirent.as_mut_ptr(),
+ ))
+ .ok()?;
+ Some(Ok(DirEntry { entry: out_dirent.assume_init(), inner: Arc::clone(&self.inner) }))
+ }
+ }
+}
+
+impl Drop for InnerReadDir {
+ fn drop(&mut self) {
+ unsafe { abi::SOLID_FS_CloseDir(self.dirp) };
+ }
+}
+
+impl DirEntry {
+ pub fn path(&self) -> PathBuf {
+ self.inner.root.join(OsStr::from_bytes(
+ unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) }.to_bytes(),
+ ))
+ }
+
+ pub fn file_name(&self) -> OsString {
+ OsStr::from_bytes(unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) }.to_bytes())
+ .to_os_string()
+ }
+
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ lstat(&self.path())
+ }
+
+ pub fn file_type(&self) -> io::Result<FileType> {
+ match self.entry.d_type {
+ abi::DT_CHR => Ok(FileType(abi::S_IFCHR)),
+ abi::DT_FIFO => Ok(FileType(abi::S_IFIFO)),
+ abi::DT_REG => Ok(FileType(abi::S_IFREG)),
+ abi::DT_DIR => Ok(FileType(abi::S_IFDIR)),
+ abi::DT_BLK => Ok(FileType(abi::S_IFBLK)),
+ _ => lstat(&self.path()).map(|m| m.file_type()),
+ }
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ OpenOptions {
+ // generic
+ read: false,
+ write: false,
+ append: false,
+ truncate: false,
+ create: false,
+ create_new: false,
+ // system-specific
+ custom_flags: 0,
+ }
+ }
+
+ pub fn read(&mut self, read: bool) {
+ self.read = read;
+ }
+ pub fn write(&mut self, write: bool) {
+ self.write = write;
+ }
+ pub fn append(&mut self, append: bool) {
+ self.append = append;
+ }
+ pub fn truncate(&mut self, truncate: bool) {
+ self.truncate = truncate;
+ }
+ pub fn create(&mut self, create: bool) {
+ self.create = create;
+ }
+ pub fn create_new(&mut self, create_new: bool) {
+ self.create_new = create_new;
+ }
+
+ pub fn custom_flags(&mut self, flags: i32) {
+ self.custom_flags = flags;
+ }
+ pub fn mode(&mut self, _mode: u32) {}
+
+ fn get_access_mode(&self) -> io::Result<c_int> {
+ match (self.read, self.write, self.append) {
+ (true, false, false) => Ok(abi::O_RDONLY),
+ (false, true, false) => Ok(abi::O_WRONLY),
+ (true, true, false) => Ok(abi::O_RDWR),
+ (false, _, true) => Ok(abi::O_WRONLY | abi::O_APPEND),
+ (true, _, true) => Ok(abi::O_RDWR | abi::O_APPEND),
+ (false, false, false) => Err(io::Error::from_raw_os_error(libc::EINVAL)),
+ }
+ }
+
+ fn get_creation_mode(&self) -> io::Result<c_int> {
+ match (self.write, self.append) {
+ (true, false) => {}
+ (false, false) => {
+ if self.truncate || self.create || self.create_new {
+ return Err(io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ }
+ (_, true) => {
+ if self.truncate && !self.create_new {
+ return Err(io::Error::from_raw_os_error(libc::EINVAL));
+ }
+ }
+ }
+
+ Ok(match (self.create, self.truncate, self.create_new) {
+ (false, false, false) => 0,
+ (true, false, false) => abi::O_CREAT,
+ (false, true, false) => abi::O_TRUNC,
+ (true, true, false) => abi::O_CREAT | abi::O_TRUNC,
+ (_, _, true) => abi::O_CREAT | abi::O_EXCL,
+ })
+ }
+}
+
+fn cstr(path: &Path) -> io::Result<CString> {
+ let path = path.as_os_str().as_bytes();
+
+ if !path.starts_with(br"\") {
+ // Relative paths aren't supported
+ return Err(crate::io::const_io_error!(
+ crate::io::ErrorKind::Unsupported,
+ "relative path is not supported on this platform",
+ ));
+ }
+
+ // Apply the thread-safety wrapper
+ const SAFE_PREFIX: &[u8] = br"\TS";
+ let wrapped_path = [SAFE_PREFIX, &path, &[0]].concat();
+
+ CString::from_vec_with_nul(wrapped_path).map_err(|_| {
+ crate::io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "path provided contains a nul byte",
+ )
+ })
+}
+
+impl File {
+ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let flags = opts.get_access_mode()?
+ | opts.get_creation_mode()?
+ | (opts.custom_flags as c_int & !abi::O_ACCMODE);
+ unsafe {
+ let mut fd = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Open(
+ fd.as_mut_ptr(),
+ cstr(path)?.as_ptr(),
+ flags,
+ ))
+ .map_err(|e| e.as_io_error())?;
+ Ok(File { fd: FileDesc::new(fd.assume_init()) })
+ }
+ }
+
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ unsupported()
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ self.flush()
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ self.flush()
+ }
+
+ pub fn truncate(&self, _size: u64) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ unsafe {
+ let mut out_num_bytes = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Read(
+ self.fd.raw(),
+ buf.as_mut_ptr(),
+ buf.len(),
+ out_num_bytes.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+ Ok(out_num_bytes.assume_init())
+ }
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ unsafe {
+ let len = buf.remaining();
+ let mut out_num_bytes = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Read(
+ self.fd.raw(),
+ buf.unfilled_mut().as_mut_ptr() as *mut u8,
+ len,
+ out_num_bytes.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+
+ // Safety: `out_num_bytes` is filled by the successful call to
+ // `SOLID_FS_Read`
+ let num_bytes_read = out_num_bytes.assume_init();
+
+ // Safety: `num_bytes_read` bytes were written to the unfilled
+ // portion of the buffer
+ buf.assume_init(num_bytes_read);
+
+ buf.add_filled(num_bytes_read);
+
+ Ok(())
+ }
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ crate::io::default_read_vectored(|buf| self.read(buf), bufs)
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ unsafe {
+ let mut out_num_bytes = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Write(
+ self.fd.raw(),
+ buf.as_ptr(),
+ buf.len(),
+ out_num_bytes.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+ Ok(out_num_bytes.assume_init())
+ }
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ crate::io::default_write_vectored(|buf| self.write(buf), bufs)
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ error::SolidError::err_if_negative(unsafe { abi::SOLID_FS_Sync(self.fd.raw()) })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+ }
+
+ pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
+ let (whence, pos) = match pos {
+ // Casting to `i64` is fine, too large values will end up as
+ // negative which will cause an error in `SOLID_FS_Lseek`.
+ SeekFrom::Start(off) => (abi::SEEK_SET, off as i64),
+ SeekFrom::End(off) => (abi::SEEK_END, off),
+ SeekFrom::Current(off) => (abi::SEEK_CUR, off),
+ };
+ error::SolidError::err_if_negative(unsafe {
+ abi::SOLID_FS_Lseek(self.fd.raw(), pos, whence)
+ })
+ .map_err(|e| e.as_io_error())?;
+
+ // Get the new offset
+ unsafe {
+ let mut out_offset = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Ftell(
+ self.fd.raw(),
+ out_offset.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+ Ok(out_offset.assume_init() as u64)
+ }
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ unsupported()
+ }
+
+ pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+impl Drop for File {
+ fn drop(&mut self) {
+ unsafe { abi::SOLID_FS_Close(self.fd.raw()) };
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder {}
+ }
+
+ pub fn mkdir(&self, p: &Path) -> io::Result<()> {
+ error::SolidError::err_if_negative(unsafe { abi::SOLID_FS_Mkdir(cstr(p)?.as_ptr()) })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("File").field("fd", &self.fd.raw()).finish()
+ }
+}
+
+pub fn unlink(p: &Path) -> io::Result<()> {
+ if stat(p)?.file_type().is_dir() {
+ Err(io::const_io_error!(io::ErrorKind::IsADirectory, "is a directory"))
+ } else {
+ error::SolidError::err_if_negative(unsafe { abi::SOLID_FS_Unlink(cstr(p)?.as_ptr()) })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+ }
+}
+
+pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
+ error::SolidError::err_if_negative(unsafe {
+ abi::SOLID_FS_Rename(cstr(old)?.as_ptr(), cstr(new)?.as_ptr())
+ })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+}
+
+pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
+ error::SolidError::err_if_negative(unsafe {
+ abi::SOLID_FS_Chmod(cstr(p)?.as_ptr(), perm.0.into())
+ })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+}
+
+pub fn rmdir(p: &Path) -> io::Result<()> {
+ if stat(p)?.file_type().is_dir() {
+ error::SolidError::err_if_negative(unsafe { abi::SOLID_FS_Unlink(cstr(p)?.as_ptr()) })
+ .map_err(|e| e.as_io_error())?;
+ Ok(())
+ } else {
+ Err(io::const_io_error!(io::ErrorKind::NotADirectory, "not a directory"))
+ }
+}
+
+pub fn remove_dir_all(path: &Path) -> io::Result<()> {
+ for child in readdir(path)? {
+ let child = child?;
+ let child_type = child.file_type()?;
+ if child_type.is_dir() {
+ remove_dir_all(&child.path())?;
+ } else {
+ unlink(&child.path())?;
+ }
+ }
+ rmdir(path)
+}
+
+pub fn readlink(p: &Path) -> io::Result<PathBuf> {
+ // This target doesn't support symlinks
+ stat(p)?;
+ Err(io::const_io_error!(io::ErrorKind::InvalidInput, "not a symbolic link"))
+}
+
+pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> {
+ // This target doesn't support symlinks
+ unsupported()
+}
+
+pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> {
+ // This target doesn't support symlinks
+ unsupported()
+}
+
+pub fn stat(p: &Path) -> io::Result<FileAttr> {
+ // This target doesn't support symlinks
+ lstat(p)
+}
+
+pub fn lstat(p: &Path) -> io::Result<FileAttr> {
+ unsafe {
+ let mut out_stat = MaybeUninit::uninit();
+ error::SolidError::err_if_negative(abi::SOLID_FS_Stat(
+ cstr(p)?.as_ptr(),
+ out_stat.as_mut_ptr(),
+ ))
+ .map_err(|e| e.as_io_error())?;
+ Ok(FileAttr { stat: out_stat.assume_init() })
+ }
+}
+
+pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ use crate::fs::File;
+
+ let mut reader = File::open(from)?;
+ let mut writer = File::create(to)?;
+
+ io::copy(&mut reader, &mut writer)
+}
diff --git a/library/std/src/sys/solid/io.rs b/library/std/src/sys/solid/io.rs
new file mode 100644
index 000000000..9eb17a10d
--- /dev/null
+++ b/library/std/src/sys/solid/io.rs
@@ -0,0 +1,77 @@
+use crate::marker::PhantomData;
+use crate::slice;
+
+use super::abi::sockets::iovec;
+use libc::c_void;
+
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct IoSlice<'a> {
+ vec: iovec,
+ _p: PhantomData<&'a [u8]>,
+}
+
+impl<'a> IoSlice<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ IoSlice {
+ vec: iovec { iov_base: buf.as_ptr() as *mut u8 as *mut c_void, iov_len: buf.len() },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.iov_len < n {
+ panic!("advancing IoSlice beyond its length");
+ }
+
+ unsafe {
+ self.vec.iov_len -= n;
+ self.vec.iov_base = self.vec.iov_base.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+}
+
+#[repr(transparent)]
+pub struct IoSliceMut<'a> {
+ vec: iovec,
+ _p: PhantomData<&'a mut [u8]>,
+}
+
+impl<'a> IoSliceMut<'a> {
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ IoSliceMut {
+ vec: iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len() },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.iov_len < n {
+ panic!("advancing IoSliceMut beyond its length");
+ }
+
+ unsafe {
+ self.vec.iov_len -= n;
+ self.vec.iov_base = self.vec.iov_base.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+}
diff --git a/library/std/src/sys/solid/memchr.rs b/library/std/src/sys/solid/memchr.rs
new file mode 100644
index 000000000..452b7a3de
--- /dev/null
+++ b/library/std/src/sys/solid/memchr.rs
@@ -0,0 +1,21 @@
+pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ let p = unsafe {
+ libc::memchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len(),
+ )
+ };
+ if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) }
+}
+
+pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ let p = unsafe {
+ libc::memrchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len(),
+ )
+ };
+ if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) }
+}
diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs
new file mode 100644
index 000000000..778a589d1
--- /dev/null
+++ b/library/std/src/sys/solid/mod.rs
@@ -0,0 +1,92 @@
+#![allow(dead_code)]
+#![allow(missing_docs, nonstandard_style)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+mod abi;
+
+#[path = "../itron"]
+mod itron {
+ pub(super) mod abi;
+ pub mod condvar;
+ pub(super) mod error;
+ pub mod mutex;
+ pub(super) mod spin;
+ pub(super) mod task;
+ pub mod thread;
+ pub(super) mod time;
+ use super::unsupported;
+ pub mod wait_flag;
+}
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+// `error` is `pub(crate)` so that it can be accessed by `itron/error.rs` as
+// `crate::sys::error`
+pub(crate) mod error;
+pub mod fs;
+pub mod io;
+pub mod net;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+pub mod stdio;
+pub use self::itron::thread;
+pub mod memchr;
+pub mod thread_local_dtor;
+pub mod thread_local_key;
+pub mod time;
+pub use self::itron::wait_flag;
+
+mod rwlock;
+
+pub mod locks {
+ pub use super::itron::condvar::*;
+ pub use super::itron::mutex::*;
+ pub use super::rwlock::*;
+}
+
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+
+// SAFETY: must be called only once during runtime cleanup.
+pub unsafe fn cleanup() {}
+
+pub fn unsupported<T>() -> crate::io::Result<T> {
+ Err(unsupported_err())
+}
+
+pub fn unsupported_err() -> crate::io::Error {
+ crate::io::const_io_error!(
+ crate::io::ErrorKind::Unsupported,
+ "operation not supported on this platform",
+ )
+}
+
+pub fn decode_error_kind(code: i32) -> crate::io::ErrorKind {
+ error::decode_error_kind(code)
+}
+
+#[inline]
+pub fn abort_internal() -> ! {
+ unsafe { libc::abort() }
+}
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ unsafe {
+ let mut out = crate::mem::MaybeUninit::<[u64; 2]>::uninit();
+ let result = abi::SOLID_RNG_SampleRandomBytes(out.as_mut_ptr() as *mut u8, 16);
+ assert_eq!(result, 0, "SOLID_RNG_SampleRandomBytes failed: {result}");
+ let [x1, x2] = out.assume_init();
+ (x1, x2)
+ }
+}
diff --git a/library/std/src/sys/solid/net.rs b/library/std/src/sys/solid/net.rs
new file mode 100644
index 000000000..1b98ef993
--- /dev/null
+++ b/library/std/src/sys/solid/net.rs
@@ -0,0 +1,469 @@
+use super::abi;
+use crate::{
+ cmp,
+ ffi::CStr,
+ io::{self, ErrorKind, IoSlice, IoSliceMut},
+ mem,
+ net::{Shutdown, SocketAddr},
+ ptr, str,
+ sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr},
+ sys_common::{AsInner, FromInner, IntoInner},
+ time::Duration,
+};
+
+use self::netc::{sockaddr, socklen_t, MSG_PEEK};
+use libc::{c_int, c_void, size_t};
+
+pub mod netc {
+ pub use super::super::abi::sockets::*;
+}
+
+pub type wrlen_t = size_t;
+
+const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
+
+const fn max_iov() -> usize {
+ // Judging by the source code, it's unlimited, but specify a lower
+ // value just in case.
+ 1024
+}
+
+/// A file descriptor.
+#[rustc_layout_scalar_valid_range_start(0)]
+// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
+// 32-bit c_int. Below is -2, in two's complement, but that only works out
+// because c_int is 32 bits.
+#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
+struct FileDesc {
+ fd: c_int,
+}
+
+impl FileDesc {
+ #[inline]
+ fn new(fd: c_int) -> FileDesc {
+ assert_ne!(fd, -1i32);
+ // Safety: we just asserted that the value is in the valid range and
+ // isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
+ unsafe { FileDesc { fd } }
+ }
+
+ #[inline]
+ fn raw(&self) -> c_int {
+ self.fd
+ }
+
+ /// Extracts the actual file descriptor without closing it.
+ #[inline]
+ fn into_raw(self) -> c_int {
+ let fd = self.fd;
+ mem::forget(self);
+ fd
+ }
+
+ fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ netc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
+ })?;
+ Ok(ret as usize)
+ }
+
+ fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ netc::readv(
+ self.fd,
+ bufs.as_ptr() as *const netc::iovec,
+ cmp::min(bufs.len(), max_iov()) as c_int,
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ netc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
+ })?;
+ Ok(ret as usize)
+ }
+
+ fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ netc::writev(
+ self.fd,
+ bufs.as_ptr() as *const netc::iovec,
+ cmp::min(bufs.len(), max_iov()) as c_int,
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn duplicate(&self) -> io::Result<FileDesc> {
+ cvt(unsafe { netc::dup(self.fd) }).map(Self::new)
+ }
+}
+
+impl AsInner<c_int> for FileDesc {
+ fn as_inner(&self) -> &c_int {
+ &self.fd
+ }
+}
+
+impl Drop for FileDesc {
+ fn drop(&mut self) {
+ unsafe { netc::close(self.fd) };
+ }
+}
+
+#[doc(hidden)]
+pub trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+macro_rules! impl_is_minus_one {
+ ($($t:ident)*) => ($(impl IsMinusOne for $t {
+ fn is_minus_one(&self) -> bool {
+ *self == -1
+ }
+ })*)
+}
+
+impl_is_minus_one! { i8 i16 i32 i64 isize }
+
+pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
+ if t.is_minus_one() { Err(last_error()) } else { Ok(t) }
+}
+
+/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
+pub fn cvt_gai(err: c_int) -> io::Result<()> {
+ if err == 0 {
+ Ok(())
+ } else {
+ let msg: &dyn crate::fmt::Display = match err {
+ netc::EAI_NONAME => &"name or service not known",
+ netc::EAI_SERVICE => &"service not supported",
+ netc::EAI_FAIL => &"non-recoverable failure in name resolution",
+ netc::EAI_MEMORY => &"memory allocation failure",
+ netc::EAI_FAMILY => &"family not supported",
+ _ => &err,
+ };
+ Err(io::Error::new(
+ io::ErrorKind::Uncategorized,
+ &format!("failed to lookup address information: {msg}")[..],
+ ))
+ }
+}
+
+/// Just to provide the same interface as sys/unix/net.rs
+pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
+where
+ T: IsMinusOne,
+ F: FnMut() -> T,
+{
+ cvt(f())
+}
+
+/// Returns the last error from the network subsystem.
+fn last_error() -> io::Error {
+ io::Error::from_raw_os_error(unsafe { netc::SOLID_NET_GetLastError() })
+}
+
+pub(super) fn error_name(er: abi::ER) -> Option<&'static str> {
+ unsafe { CStr::from_ptr(netc::strerror(er)) }.to_str().ok()
+}
+
+pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
+ let errno = netc::SOLID_NET_ERR_BASE - er;
+ match errno as libc::c_int {
+ libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
+ libc::ECONNRESET => ErrorKind::ConnectionReset,
+ libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
+ libc::EPIPE => ErrorKind::BrokenPipe,
+ libc::ENOTCONN => ErrorKind::NotConnected,
+ libc::ECONNABORTED => ErrorKind::ConnectionAborted,
+ libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
+ libc::EADDRINUSE => ErrorKind::AddrInUse,
+ libc::ENOENT => ErrorKind::NotFound,
+ libc::EINTR => ErrorKind::Interrupted,
+ libc::EINVAL => ErrorKind::InvalidInput,
+ libc::ETIMEDOUT => ErrorKind::TimedOut,
+ libc::EEXIST => ErrorKind::AlreadyExists,
+ libc::ENOSYS => ErrorKind::Unsupported,
+ libc::ENOMEM => ErrorKind::OutOfMemory,
+ libc::EAGAIN => ErrorKind::WouldBlock,
+
+ _ => ErrorKind::Uncategorized,
+ }
+}
+
+pub fn init() {}
+
+pub struct Socket(FileDesc);
+
+impl Socket {
+ pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
+ let fam = match *addr {
+ SocketAddr::V4(..) => netc::AF_INET,
+ SocketAddr::V6(..) => netc::AF_INET6,
+ };
+ Socket::new_raw(fam, ty)
+ }
+
+ pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
+ unsafe {
+ let fd = cvt(netc::socket(fam, ty, 0))?;
+ let fd = FileDesc::new(fd);
+ let socket = Socket(fd);
+
+ Ok(socket)
+ }
+ }
+
+ pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
+ self.set_nonblocking(true)?;
+ let r = unsafe {
+ let (addr, len) = addr.into_inner();
+ cvt(netc::connect(self.0.raw(), addr.as_ptr(), len))
+ };
+ self.set_nonblocking(false)?;
+
+ match r {
+ Ok(_) => return Ok(()),
+ // there's no ErrorKind for EINPROGRESS
+ Err(ref e) if e.raw_os_error() == Some(netc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+
+ let mut timeout =
+ netc::timeval { tv_sec: timeout.as_secs() as _, tv_usec: timeout.subsec_micros() as _ };
+ if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
+ timeout.tv_usec = 1;
+ }
+
+ let fds = netc::fd_set { num_fds: 1, fds: [self.0.raw()] };
+
+ let mut writefds = fds;
+ let mut errorfds = fds;
+
+ let n = unsafe {
+ cvt(netc::select(
+ self.0.raw() + 1,
+ ptr::null_mut(),
+ &mut writefds,
+ &mut errorfds,
+ &mut timeout,
+ ))?
+ };
+
+ match n {
+ 0 => Err(io::const_io_error!(io::ErrorKind::TimedOut, "connection timed out")),
+ _ => {
+ let can_write = writefds.num_fds != 0;
+ if !can_write {
+ if let Some(e) = self.take_error()? {
+ return Err(e);
+ }
+ }
+ Ok(())
+ }
+ }
+ }
+
+ pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
+ let fd = cvt_r(|| unsafe { netc::accept(self.0.raw(), storage, len) })?;
+ let fd = FileDesc::new(fd);
+ Ok(Socket(fd))
+ }
+
+ pub fn duplicate(&self) -> io::Result<Socket> {
+ self.0.duplicate().map(Socket)
+ }
+
+ fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ netc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
+ })?;
+ Ok(ret as usize)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, 0)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, MSG_PEEK)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+
+ fn recv_from_with_flags(
+ &self,
+ buf: &mut [u8],
+ flags: c_int,
+ ) -> io::Result<(usize, SocketAddr)> {
+ let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
+ let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
+
+ let n = cvt(unsafe {
+ netc::recvfrom(
+ self.0.raw(),
+ buf.as_mut_ptr() as *mut c_void,
+ buf.len(),
+ flags,
+ &mut storage as *mut _ as *mut _,
+ &mut addrlen,
+ )
+ })?;
+ Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, 0)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, MSG_PEEK)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
+ let timeout = match dur {
+ Some(dur) => {
+ if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+
+ let secs = if dur.as_secs() > netc::c_long::MAX as u64 {
+ netc::c_long::MAX
+ } else {
+ dur.as_secs() as netc::c_long
+ };
+ let mut timeout = netc::timeval { tv_sec: secs, tv_usec: dur.subsec_micros() as _ };
+ if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
+ timeout.tv_usec = 1;
+ }
+ timeout
+ }
+ None => netc::timeval { tv_sec: 0, tv_usec: 0 },
+ };
+ setsockopt(self, netc::SOL_SOCKET, kind, timeout)
+ }
+
+ pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> {
+ let raw: netc::timeval = getsockopt(self, netc::SOL_SOCKET, kind)?;
+ if raw.tv_sec == 0 && raw.tv_usec == 0 {
+ Ok(None)
+ } else {
+ let sec = raw.tv_sec as u64;
+ let nsec = (raw.tv_usec as u32) * 1000;
+ Ok(Some(Duration::new(sec, nsec)))
+ }
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Write => netc::SHUT_WR,
+ Shutdown::Read => netc::SHUT_RD,
+ Shutdown::Both => netc::SHUT_RDWR,
+ };
+ cvt(unsafe { netc::shutdown(self.0.raw(), how) })?;
+ Ok(())
+ }
+
+ pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+ let linger = netc::linger {
+ l_onoff: linger.is_some() as netc::c_int,
+ l_linger: linger.unwrap_or_default().as_secs() as netc::c_int,
+ };
+
+ setsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER, linger)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ let val: netc::linger = getsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER)?;
+
+ Ok((val.l_onoff != 0).then(|| Duration::from_secs(val.l_linger as u64)))
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ setsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY, nodelay as c_int)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY)?;
+ Ok(raw != 0)
+ }
+
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ let mut nonblocking = nonblocking as c_int;
+ cvt(unsafe {
+ netc::ioctl(*self.as_inner(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
+ })
+ .map(drop)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ let raw: c_int = getsockopt(self, netc::SOL_SOCKET, netc::SO_ERROR)?;
+ if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
+ }
+
+ // This method is used by sys_common code to abstract over targets.
+ pub fn as_raw(&self) -> c_int {
+ *self.as_inner()
+ }
+}
+
+impl AsInner<c_int> for Socket {
+ fn as_inner(&self) -> &c_int {
+ self.0.as_inner()
+ }
+}
+
+impl FromInner<c_int> for Socket {
+ fn from_inner(fd: c_int) -> Socket {
+ Socket(FileDesc::new(fd))
+ }
+}
+
+impl IntoInner<c_int> for Socket {
+ fn into_inner(self) -> c_int {
+ self.0.into_raw()
+ }
+}
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
new file mode 100644
index 000000000..b5649d6e0
--- /dev/null
+++ b/library/std/src/sys/solid/os.rs
@@ -0,0 +1,193 @@
+use super::unsupported;
+use crate::error::Error as StdError;
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::os::{
+ raw::{c_char, c_int},
+ solid::ffi::{OsStrExt, OsStringExt},
+};
+use crate::path::{self, PathBuf};
+use crate::sys_common::rwlock::StaticRwLock;
+use crate::vec;
+
+use super::{error, itron, memchr};
+
+// `solid` directly maps `errno`s to μITRON error codes.
+impl itron::error::ItronError {
+ #[inline]
+ pub(crate) fn as_io_error(self) -> crate::io::Error {
+ crate::io::Error::from_raw_os_error(self.as_raw())
+ }
+}
+
+pub fn errno() -> i32 {
+ 0
+}
+
+pub fn error_string(errno: i32) -> String {
+ if let Some(name) = error::error_name(errno) { name.to_owned() } else { format!("{errno}") }
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(&'a !);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ *self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on this platform yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on this platform yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+
+pub struct Env {
+ iter: vec::IntoIter<(OsString, OsString)>,
+}
+
+impl !Send for Env {}
+impl !Sync for Env {}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+/// Returns a vector of (variable, value) byte-vector pairs for all the
+/// environment variables of the current process.
+pub fn env() -> Env {
+ extern "C" {
+ static mut environ: *const *const c_char;
+ }
+
+ unsafe {
+ let _guard = ENV_LOCK.read();
+ let mut result = Vec::new();
+ if !environ.is_null() {
+ while !(*environ).is_null() {
+ if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) {
+ result.push(key_value);
+ }
+ environ = environ.add(1);
+ }
+ }
+ return Env { iter: result.into_iter() };
+ }
+
+ fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
+ // Strategy (copied from glibc): Variable name and value are separated
+ // by an ASCII equals sign '='. Since a variable name must not be
+ // empty, allow variable names starting with an equals sign. Skip all
+ // malformed lines.
+ if input.is_empty() {
+ return None;
+ }
+ let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
+ pos.map(|p| {
+ (
+ OsStringExt::from_vec(input[..p].to_vec()),
+ OsStringExt::from_vec(input[p + 1..].to_vec()),
+ )
+ })
+ }
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ // environment variables with a nul byte can't be set, so their value is
+ // always None as well
+ let k = CString::new(k.as_bytes()).ok()?;
+ unsafe {
+ let _guard = ENV_LOCK.read();
+ let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
+ }
+ }
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ let k = CString::new(k.as_bytes())?;
+ let v = CString::new(v.as_bytes())?;
+
+ unsafe {
+ let _guard = ENV_LOCK.write();
+ cvt_env(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
+ }
+}
+
+pub fn unsetenv(n: &OsStr) -> io::Result<()> {
+ let nbuf = CString::new(n.as_bytes())?;
+
+ unsafe {
+ let _guard = ENV_LOCK.write();
+ cvt_env(libc::unsetenv(nbuf.as_ptr())).map(drop)
+ }
+}
+
+/// In kmclib, `setenv` and `unsetenv` don't always set `errno`, so this
+/// function just returns a generic error.
+fn cvt_env(t: c_int) -> io::Result<c_int> {
+ if t == -1 { Err(io::const_io_error!(io::ErrorKind::Uncategorized, "failure")) } else { Ok(t) }
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no standard temporary directory on this platform")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ rtabort!("exit({}) called", code);
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids on this platform")
+}
diff --git a/library/std/src/sys/solid/path.rs b/library/std/src/sys/solid/path.rs
new file mode 100644
index 000000000..7045c9be2
--- /dev/null
+++ b/library/std/src/sys/solid/path.rs
@@ -0,0 +1,25 @@
+use crate::ffi::OsStr;
+use crate::io;
+use crate::path::{Path, PathBuf, Prefix};
+use crate::sys::unsupported;
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'\\'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'\\'
+}
+
+pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> {
+ None
+}
+
+pub const MAIN_SEP_STR: &str = "\\";
+pub const MAIN_SEP: char = '\\';
+
+pub(crate) fn absolute(_path: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
diff --git a/library/std/src/sys/solid/rwlock.rs b/library/std/src/sys/solid/rwlock.rs
new file mode 100644
index 000000000..0a770cf03
--- /dev/null
+++ b/library/std/src/sys/solid/rwlock.rs
@@ -0,0 +1,95 @@
+//! A readers-writer lock implementation backed by the SOLID kernel extension.
+use super::{
+ abi,
+ itron::{
+ error::{expect_success, expect_success_aborting, fail, ItronError},
+ spin::SpinIdOnceCell,
+ },
+};
+
+pub struct RwLock {
+ /// The ID of the underlying mutex object
+ rwl: SpinIdOnceCell<()>,
+}
+
+pub type MovableRwLock = RwLock;
+
+// Safety: `num_readers` is protected by `mtx_num_readers`
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+fn new_rwl() -> Result<abi::ID, ItronError> {
+ ItronError::err_if_negative(unsafe { abi::rwl_acre_rwl() })
+}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock { rwl: SpinIdOnceCell::new() }
+ }
+
+ /// Get the inner mutex's ID, which is lazily created.
+ fn raw(&self) -> abi::ID {
+ match self.rwl.get_or_try_init(|| new_rwl().map(|id| (id, ()))) {
+ Ok((id, ())) => id,
+ Err(e) => fail(e, &"rwl_acre_rwl"),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let rwl = self.raw();
+ expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl");
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let rwl = self.raw();
+ match unsafe { abi::rwl_ploc_rdl(rwl) } {
+ abi::E_TMOUT => false,
+ er => {
+ expect_success(er, &"rwl_ploc_rdl");
+ true
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ let rwl = self.raw();
+ expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl");
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ let rwl = self.raw();
+ match unsafe { abi::rwl_ploc_wrl(rwl) } {
+ abi::E_TMOUT => false,
+ er => {
+ expect_success(er, &"rwl_ploc_wrl");
+ true
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ let rwl = self.raw();
+ expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl");
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ let rwl = self.raw();
+ expect_success_aborting(unsafe { abi::rwl_unl_rwl(rwl) }, &"rwl_unl_rwl");
+ }
+}
+
+impl Drop for RwLock {
+ #[inline]
+ fn drop(&mut self) {
+ if let Some(rwl) = self.rwl.get().map(|x| x.0) {
+ expect_success_aborting(unsafe { abi::rwl_del_rwl(rwl) }, &"rwl_del_rwl");
+ }
+ }
+}
diff --git a/library/std/src/sys/solid/stdio.rs b/library/std/src/sys/solid/stdio.rs
new file mode 100644
index 000000000..50f017696
--- /dev/null
+++ b/library/std/src/sys/solid/stdio.rs
@@ -0,0 +1,80 @@
+use super::abi;
+use crate::io;
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+struct PanicOutput;
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ unsafe { abi::SOLID_LOG_write(buf.as_ptr(), buf.len()) };
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ unsafe { abi::SOLID_LOG_write(buf.as_ptr(), buf.len()) };
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl PanicOutput {
+ pub const fn new() -> PanicOutput {
+ PanicOutput
+ }
+}
+
+impl io::Write for PanicOutput {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ unsafe { abi::SOLID_LOG_write(buf.as_ptr(), buf.len()) };
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = 0;
+
+pub fn is_ebadf(_err: &io::Error) -> bool {
+ true
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(PanicOutput::new())
+}
diff --git a/library/std/src/sys/solid/thread_local_dtor.rs b/library/std/src/sys/solid/thread_local_dtor.rs
new file mode 100644
index 000000000..973564570
--- /dev/null
+++ b/library/std/src/sys/solid/thread_local_dtor.rs
@@ -0,0 +1,50 @@
+#![cfg(target_thread_local)]
+#![unstable(feature = "thread_local_internals", issue = "none")]
+
+// Simplify dtor registration by using a list of destructors.
+
+use super::{abi, itron::task};
+use crate::cell::Cell;
+use crate::ptr;
+
+#[thread_local]
+static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut());
+
+type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>;
+
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ if DTORS.get().is_null() {
+ let tid = task::current_task_id_aborting();
+ let v: Box<List> = box Vec::new();
+ DTORS.set(Box::into_raw(v));
+
+ // Register `tls_dtor` to make sure the TLS destructors are called
+ // for tasks created by other means than `std::thread`
+ unsafe { abi::SOLID_TLS_AddDestructor(tid as i32, tls_dtor) };
+ }
+
+ let list: &mut List = unsafe { &mut *DTORS.get() };
+ list.push((t, dtor));
+}
+
+pub unsafe fn run_dtors() {
+ let ptr = DTORS.get();
+ if !ptr.is_null() {
+ // Swap the destructor list, call all registered destructors,
+ // and repeat this until the list becomes permanently empty.
+ while let Some(list) = Some(crate::mem::replace(unsafe { &mut *ptr }, Vec::new()))
+ .filter(|list| !list.is_empty())
+ {
+ for (ptr, dtor) in list.into_iter() {
+ unsafe { dtor(ptr) };
+ }
+ }
+
+ // Drop the destructor list
+ unsafe { Box::from_raw(DTORS.replace(ptr::null_mut())) };
+ }
+}
+
+unsafe extern "C" fn tls_dtor(_unused: *mut u8) {
+ unsafe { run_dtors() };
+}
diff --git a/library/std/src/sys/solid/thread_local_key.rs b/library/std/src/sys/solid/thread_local_key.rs
new file mode 100644
index 000000000..b17521f70
--- /dev/null
+++ b/library/std/src/sys/solid/thread_local_key.rs
@@ -0,0 +1,26 @@
+pub type Key = usize;
+
+#[inline]
+pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ panic!("should not be used on the solid target");
+}
+
+#[inline]
+pub unsafe fn set(_key: Key, _value: *mut u8) {
+ panic!("should not be used on the solid target");
+}
+
+#[inline]
+pub unsafe fn get(_key: Key) -> *mut u8 {
+ panic!("should not be used on the solid target");
+}
+
+#[inline]
+pub unsafe fn destroy(_key: Key) {
+ panic!("should not be used on the solid target");
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+ panic!("should not be used on the solid target");
+}
diff --git a/library/std/src/sys/solid/time.rs b/library/std/src/sys/solid/time.rs
new file mode 100644
index 000000000..ce31cb45a
--- /dev/null
+++ b/library/std/src/sys/solid/time.rs
@@ -0,0 +1,56 @@
+use super::{abi, error::expect_success};
+use crate::{mem::MaybeUninit, time::Duration};
+
+pub use super::itron::time::Instant;
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(abi::time_t);
+
+pub const UNIX_EPOCH: SystemTime = SystemTime(0);
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ let rtc = unsafe {
+ let mut out = MaybeUninit::zeroed();
+ expect_success(abi::SOLID_RTC_ReadTime(out.as_mut_ptr()), &"SOLID_RTC_ReadTime");
+ out.assume_init()
+ };
+ let t = unsafe {
+ libc::mktime(&mut libc::tm {
+ tm_sec: rtc.tm_sec,
+ tm_min: rtc.tm_min,
+ tm_hour: rtc.tm_hour,
+ tm_mday: rtc.tm_mday,
+ tm_mon: rtc.tm_mon - 1,
+ tm_year: rtc.tm_year,
+ tm_wday: rtc.tm_wday,
+ tm_yday: 0,
+ tm_isdst: 0,
+ tm_gmtoff: 0,
+ tm_zone: crate::ptr::null_mut(),
+ })
+ };
+ assert_ne!(t, -1, "mktime failed");
+ SystemTime(t)
+ }
+
+ pub(super) fn from_time_t(t: abi::time_t) -> Self {
+ Self(t)
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ if self.0 >= other.0 {
+ Ok(Duration::from_secs((self.0 as u64).wrapping_sub(other.0 as u64)))
+ } else {
+ Err(Duration::from_secs((other.0 as u64).wrapping_sub(self.0 as u64)))
+ }
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_add(other.as_secs().try_into().ok()?)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_sub(other.as_secs().try_into().ok()?)?))
+ }
+}
diff --git a/library/std/src/sys/unix/alloc.rs b/library/std/src/sys/unix/alloc.rs
new file mode 100644
index 000000000..9d6567c9f
--- /dev/null
+++ b/library/std/src/sys/unix/alloc.rs
@@ -0,0 +1,101 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr;
+use crate::sys::common::alloc::{realloc_fallback, MIN_ALIGN};
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // jemalloc provides alignment less than MIN_ALIGN for small allocations.
+ // So only rely on MIN_ALIGN if size >= align.
+ // Also see <https://github.com/rust-lang/rust/issues/45955> and
+ // <https://github.com/rust-lang/rust/issues/62251#issuecomment-507580914>.
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::malloc(layout.size()) as *mut u8
+ } else {
+ #[cfg(target_os = "macos")]
+ {
+ if layout.align() > (1 << 31) {
+ return ptr::null_mut();
+ }
+ }
+ aligned_malloc(&layout)
+ }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // See the comment above in `alloc` for why this check looks the way it does.
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::calloc(layout.size(), 1) as *mut u8
+ } else {
+ let ptr = self.alloc(layout);
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ libc::free(ptr as *mut libc::c_void)
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ realloc_fallback(self, ptr, layout, new_size)
+ }
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "android",
+ target_os = "illumos",
+ target_os = "redox",
+ target_os = "solaris",
+ target_os = "espidf",
+ target_os = "horizon"
+ ))] {
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ // On android we currently target API level 9 which unfortunately
+ // doesn't have the `posix_memalign` API used below. Instead we use
+ // `memalign`, but this unfortunately has the property on some systems
+ // where the memory returned cannot be deallocated by `free`!
+ //
+ // Upon closer inspection, however, this appears to work just fine with
+ // Android, so for this platform we should be fine to call `memalign`
+ // (which is present in API level 9). Some helpful references could
+ // possibly be chromium using memalign [1], attempts at documenting that
+ // memalign + free is ok [2] [3], or the current source of chromium
+ // which still uses memalign on android [4].
+ //
+ // [1]: https://codereview.chromium.org/10796020/
+ // [2]: https://code.google.com/p/android/issues/detail?id=35391
+ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+ // /memory/aligned_memory.cc
+ libc::memalign(layout.align(), layout.size()) as *mut u8
+ }
+ } else if #[cfg(target_os = "wasi")] {
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ libc::aligned_alloc(layout.align(), layout.size()) as *mut u8
+ }
+ } else {
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ // posix_memalign requires that the alignment be a multiple of `sizeof(void*)`.
+ // Since these are all powers of 2, we can just use max.
+ let align = layout.align().max(crate::mem::size_of::<usize>());
+ let ret = libc::posix_memalign(&mut out, align, layout.size());
+ if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/android.rs b/library/std/src/sys/unix/android.rs
new file mode 100644
index 000000000..73ff10ab8
--- /dev/null
+++ b/library/std/src/sys/unix/android.rs
@@ -0,0 +1,81 @@
+//! Android ABI-compatibility module
+//!
+//! The ABI of Android has changed quite a bit over time, and libstd attempts to
+//! be both forwards and backwards compatible as much as possible. We want to
+//! always work with the most recent version of Android, but we also want to
+//! work with older versions of Android for whenever projects need to.
+//!
+//! Our current minimum supported Android version is `android-9`, e.g., Android
+//! with API level 9. We then in theory want to work on that and all future
+//! versions of Android!
+//!
+//! Some of the detection here is done at runtime via `dlopen` and
+//! introspection. Other times no detection is performed at all and we just
+//! provide a fallback implementation as some versions of Android we support
+//! don't have the function.
+//!
+//! You'll find more details below about why each compatibility shim is needed.
+
+#![cfg(target_os = "android")]
+
+use libc::{c_int, sighandler_t};
+
+use super::weak::weak;
+
+// The `log2` and `log2f` functions apparently appeared in android-18, or at
+// least you can see they're not present in the android-17 header [1] and they
+// are present in android-18 [2].
+//
+// [1]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms
+// /android-17/arch-arm/usr/include/math.h
+// [2]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms
+// /android-18/arch-arm/usr/include/math.h
+//
+// Note that these shims are likely less precise than directly calling `log2`,
+// but hopefully that should be enough for now...
+//
+// Note that mathematically, for any arbitrary `y`:
+//
+// log_2(x) = log_y(x) / log_y(2)
+// = log_y(x) / (1 / log_2(y))
+// = log_y(x) * log_2(y)
+//
+// Hence because `ln` (log_e) is available on all Android we just choose `y = e`
+// and get:
+//
+// log_2(x) = ln(x) * log_2(e)
+
+#[cfg(not(test))]
+pub fn log2f32(f: f32) -> f32 {
+ f.ln() * crate::f32::consts::LOG2_E
+}
+
+#[cfg(not(test))]
+pub fn log2f64(f: f64) -> f64 {
+ f.ln() * crate::f64::consts::LOG2_E
+}
+
+// Back in the day [1] the `signal` function was just an inline wrapper
+// around `bsd_signal`, but starting in API level android-20 the `signal`
+// symbols was introduced [2]. Finally, in android-21 the API `bsd_signal` was
+// removed [3].
+//
+// Basically this means that if we want to be binary compatible with multiple
+// Android releases (oldest being 9 and newest being 21) then we need to check
+// for both symbols and not actually link against either.
+//
+// [1]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms
+// /android-18/arch-arm/usr/include/signal.h
+// [2]: https://chromium.googlesource.com/android_tools/+/fbd420/ndk_experimental
+// /platforms/android-20/arch-arm
+// /usr/include/signal.h
+// [3]: https://chromium.googlesource.com/android_tools/+/20ee6d/ndk/platforms
+// /android-21/arch-arm/usr/include/signal.h
+pub unsafe fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t {
+ weak!(fn signal(c_int, sighandler_t) -> sighandler_t);
+ weak!(fn bsd_signal(c_int, sighandler_t) -> sighandler_t);
+
+ let f = signal.get().or_else(|| bsd_signal.get());
+ let f = f.expect("neither `signal` nor `bsd_signal` symbols found");
+ f(signum, handler)
+}
diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs
new file mode 100644
index 000000000..a342f0f5e
--- /dev/null
+++ b/library/std/src/sys/unix/args.rs
@@ -0,0 +1,261 @@
+//! Global initialization and retrieval of command line arguments.
+//!
+//! On some platforms these are stored during runtime startup,
+//! and on some they are retrieved from the system on demand.
+
+#![allow(dead_code)] // runtime init functions not used during testing
+
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::vec;
+
+/// One-time global initialization.
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ imp::init(argc, argv)
+}
+
+/// Returns the command line arguments
+pub fn args() -> Args {
+ imp::args()
+}
+
+pub struct Args {
+ iter: vec::IntoIter<OsString>,
+}
+
+impl !Send for Args {}
+impl !Sync for Args {}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.iter.as_slice().fmt(f)
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.iter.next_back()
+ }
+}
+
+#[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "emscripten",
+ target_os = "haiku",
+ target_os = "l4re",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "vxworks",
+ target_os = "horizon"
+))]
+mod imp {
+ use super::Args;
+ use crate::ffi::{CStr, OsString};
+ use crate::os::unix::prelude::*;
+ use crate::ptr;
+ use crate::sync::atomic::{AtomicIsize, AtomicPtr, Ordering};
+
+ // The system-provided argc and argv, which we store in static memory
+ // here so that we can defer the work of parsing them until its actually
+ // needed.
+ //
+ // Note that we never mutate argv/argc, the argv array, or the argv
+ // strings, which allows the code in this file to be very simple.
+ static ARGC: AtomicIsize = AtomicIsize::new(0);
+ static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut());
+
+ unsafe fn really_init(argc: isize, argv: *const *const u8) {
+ // These don't need to be ordered with each other or other stores,
+ // because they only hold the unmodified system-provide argv/argc.
+ ARGC.store(argc, Ordering::Relaxed);
+ ARGV.store(argv as *mut _, Ordering::Relaxed);
+ }
+
+ #[inline(always)]
+ pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
+ // On Linux-GNU, we rely on `ARGV_INIT_ARRAY` below to initialize
+ // `ARGC` and `ARGV`. But in Miri that does not actually happen so we
+ // still initialize here.
+ #[cfg(any(miri, not(all(target_os = "linux", target_env = "gnu"))))]
+ really_init(_argc, _argv);
+ }
+
+ /// glibc passes argc, argv, and envp to functions in .init_array, as a non-standard extension.
+ /// This allows `std::env::args` to work even in a `cdylib`, as it does on macOS and Windows.
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ #[used]
+ #[link_section = ".init_array.00099"]
+ static ARGV_INIT_ARRAY: extern "C" fn(
+ crate::os::raw::c_int,
+ *const *const u8,
+ *const *const u8,
+ ) = {
+ extern "C" fn init_wrapper(
+ argc: crate::os::raw::c_int,
+ argv: *const *const u8,
+ _envp: *const *const u8,
+ ) {
+ unsafe {
+ really_init(argc as isize, argv);
+ }
+ }
+ init_wrapper
+ };
+
+ pub fn args() -> Args {
+ Args { iter: clone().into_iter() }
+ }
+
+ fn clone() -> Vec<OsString> {
+ unsafe {
+ // Load ARGC and ARGV, which hold the unmodified system-provided
+ // argc/argv, so we can read the pointed-to memory without atomics
+ // or synchronization.
+ //
+ // If either ARGC or ARGV is still zero or null, then either there
+ // really are no arguments, or someone is asking for `args()`
+ // before initialization has completed, and we return an empty
+ // list.
+ let argv = ARGV.load(Ordering::Relaxed);
+ let argc = if argv.is_null() { 0 } else { ARGC.load(Ordering::Relaxed) };
+ (0..argc)
+ .map(|i| {
+ let cstr = CStr::from_ptr(*argv.offset(i) as *const libc::c_char);
+ OsStringExt::from_vec(cstr.to_bytes().to_vec())
+ })
+ .collect()
+ }
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+mod imp {
+ use super::Args;
+ use crate::ffi::CStr;
+
+ pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+
+ #[cfg(target_os = "macos")]
+ pub fn args() -> Args {
+ use crate::os::unix::prelude::*;
+ extern "C" {
+ // These functions are in crt_externs.h.
+ fn _NSGetArgc() -> *mut libc::c_int;
+ fn _NSGetArgv() -> *mut *mut *mut libc::c_char;
+ }
+
+ let vec = unsafe {
+ let (argc, argv) =
+ (*_NSGetArgc() as isize, *_NSGetArgv() as *const *const libc::c_char);
+ (0..argc as isize)
+ .map(|i| {
+ let bytes = CStr::from_ptr(*argv.offset(i)).to_bytes().to_vec();
+ OsStringExt::from_vec(bytes)
+ })
+ .collect::<Vec<_>>()
+ };
+ Args { iter: vec.into_iter() }
+ }
+
+ // As _NSGetArgc and _NSGetArgv aren't mentioned in iOS docs
+ // and use underscores in their names - they're most probably
+ // are considered private and therefore should be avoided
+ // Here is another way to get arguments using Objective C
+ // runtime
+ //
+ // In general it looks like:
+ // res = Vec::new()
+ // let args = [[NSProcessInfo processInfo] arguments]
+ // for i in (0..[args count])
+ // res.push([args objectAtIndex:i])
+ // res
+ #[cfg(any(target_os = "ios", target_os = "watchos"))]
+ pub fn args() -> Args {
+ use crate::ffi::OsString;
+ use crate::mem;
+ use crate::str;
+
+ extern "C" {
+ fn sel_registerName(name: *const libc::c_uchar) -> Sel;
+ fn objc_getClass(class_name: *const libc::c_uchar) -> NsId;
+ }
+
+ #[cfg(target_arch = "aarch64")]
+ extern "C" {
+ fn objc_msgSend(obj: NsId, sel: Sel) -> NsId;
+ #[allow(clashing_extern_declarations)]
+ #[link_name = "objc_msgSend"]
+ fn objc_msgSend_ul(obj: NsId, sel: Sel, i: libc::c_ulong) -> NsId;
+ }
+
+ #[cfg(not(target_arch = "aarch64"))]
+ extern "C" {
+ fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId;
+ #[allow(clashing_extern_declarations)]
+ #[link_name = "objc_msgSend"]
+ fn objc_msgSend_ul(obj: NsId, sel: Sel, ...) -> NsId;
+ }
+
+ type Sel = *const libc::c_void;
+ type NsId = *const libc::c_void;
+
+ let mut res = Vec::new();
+
+ unsafe {
+ let process_info_sel = sel_registerName("processInfo\0".as_ptr());
+ let arguments_sel = sel_registerName("arguments\0".as_ptr());
+ let utf8_sel = sel_registerName("UTF8String\0".as_ptr());
+ let count_sel = sel_registerName("count\0".as_ptr());
+ let object_at_sel = sel_registerName("objectAtIndex:\0".as_ptr());
+
+ let klass = objc_getClass("NSProcessInfo\0".as_ptr());
+ let info = objc_msgSend(klass, process_info_sel);
+ let args = objc_msgSend(info, arguments_sel);
+
+ let cnt: usize = mem::transmute(objc_msgSend(args, count_sel));
+ for i in 0..cnt {
+ let tmp = objc_msgSend_ul(args, object_at_sel, i as libc::c_ulong);
+ let utf_c_str: *const libc::c_char = mem::transmute(objc_msgSend(tmp, utf8_sel));
+ let bytes = CStr::from_ptr(utf_c_str).to_bytes();
+ res.push(OsString::from(str::from_utf8(bytes).unwrap()))
+ }
+ }
+
+ Args { iter: res.into_iter() }
+ }
+}
+
+#[cfg(target_os = "espidf")]
+mod imp {
+ use super::Args;
+
+ #[inline(always)]
+ pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+
+ pub fn args() -> Args {
+ Args { iter: Vec::new().into_iter() }
+ }
+}
diff --git a/library/std/src/sys/unix/cmath.rs b/library/std/src/sys/unix/cmath.rs
new file mode 100644
index 000000000..2bf80d7a4
--- /dev/null
+++ b/library/std/src/sys/unix/cmath.rs
@@ -0,0 +1,33 @@
+#![cfg(not(test))]
+
+// These symbols are all defined by `libm`,
+// or by `compiler-builtins` on unsupported platforms.
+
+extern "C" {
+ pub fn acos(n: f64) -> f64;
+ pub fn acosf(n: f32) -> f32;
+ pub fn asin(n: f64) -> f64;
+ pub fn asinf(n: f32) -> f32;
+ pub fn atan(n: f64) -> f64;
+ pub fn atan2(a: f64, b: f64) -> f64;
+ pub fn atan2f(a: f32, b: f32) -> f32;
+ pub fn atanf(n: f32) -> f32;
+ pub fn cbrt(n: f64) -> f64;
+ pub fn cbrtf(n: f32) -> f32;
+ pub fn cosh(n: f64) -> f64;
+ pub fn coshf(n: f32) -> f32;
+ pub fn expm1(n: f64) -> f64;
+ pub fn expm1f(n: f32) -> f32;
+ pub fn fdim(a: f64, b: f64) -> f64;
+ pub fn fdimf(a: f32, b: f32) -> f32;
+ pub fn hypot(x: f64, y: f64) -> f64;
+ pub fn hypotf(x: f32, y: f32) -> f32;
+ pub fn log1p(n: f64) -> f64;
+ pub fn log1pf(n: f32) -> f32;
+ pub fn sinh(n: f64) -> f64;
+ pub fn sinhf(n: f32) -> f32;
+ pub fn tan(n: f64) -> f64;
+ pub fn tanf(n: f32) -> f32;
+ pub fn tanh(n: f64) -> f64;
+ pub fn tanhf(n: f32) -> f32;
+}
diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs
new file mode 100644
index 000000000..c9ba661c8
--- /dev/null
+++ b/library/std/src/sys/unix/env.rs
@@ -0,0 +1,219 @@
+#[cfg(target_os = "linux")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "linux";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "macos")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "macos";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".dylib";
+ pub const DLL_EXTENSION: &str = "dylib";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "ios")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "ios";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".dylib";
+ pub const DLL_EXTENSION: &str = "dylib";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "watchos")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "watchos";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".dylib";
+ pub const DLL_EXTENSION: &str = "dylib";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "freebsd")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "freebsd";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "dragonfly")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "dragonfly";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "netbsd")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "netbsd";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "openbsd")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "openbsd";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "android")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "android";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "solaris")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "solaris";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "illumos")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "illumos";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "haiku")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "haiku";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "horizon")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "horizon";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = ".elf";
+ pub const EXE_EXTENSION: &str = "elf";
+}
+
+#[cfg(all(target_os = "emscripten", target_arch = "asmjs"))]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "emscripten";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = ".js";
+ pub const EXE_EXTENSION: &str = "js";
+}
+
+#[cfg(all(target_os = "emscripten", target_arch = "wasm32"))]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "emscripten";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = ".js";
+ pub const EXE_EXTENSION: &str = "js";
+}
+
+#[cfg(target_os = "fuchsia")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "fuchsia";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "l4re")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "l4re";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "redox")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "redox";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "vxworks")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "vxworks";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
+
+#[cfg(target_os = "espidf")]
+pub mod os {
+ pub const FAMILY: &str = "unix";
+ pub const OS: &str = "espidf";
+ pub const DLL_PREFIX: &str = "lib";
+ pub const DLL_SUFFIX: &str = ".so";
+ pub const DLL_EXTENSION: &str = "so";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
new file mode 100644
index 000000000..30812dabb
--- /dev/null
+++ b/library/std/src/sys/unix/fd.rs
@@ -0,0 +1,330 @@
+#![unstable(reason = "not public", issue = "none", feature = "fd")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::io::{self, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::sys::cvt;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "l4re"
+))]
+use libc::off64_t;
+#[cfg(not(any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "l4re",
+ target_os = "android"
+)))]
+use libc::off_t as off64_t;
+
+#[derive(Debug)]
+pub struct FileDesc(OwnedFd);
+
+// The maximum read limit on most POSIX-like systems is `SSIZE_MAX`,
+// with the man page quoting that if the count of bytes to read is
+// greater than `SSIZE_MAX` the result is "unspecified".
+//
+// On macOS, however, apparently the 64-bit libc is either buggy or
+// intentionally showing odd behavior by rejecting any read with a size
+// larger than or equal to INT_MAX. To handle both of these the read
+// size is capped on both platforms.
+#[cfg(target_os = "macos")]
+const READ_LIMIT: usize = libc::c_int::MAX as usize - 1;
+#[cfg(not(target_os = "macos"))]
+const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "watchos",
+))]
+const fn max_iov() -> usize {
+ libc::IOV_MAX as usize
+}
+
+#[cfg(any(target_os = "android", target_os = "emscripten", target_os = "linux"))]
+const fn max_iov() -> usize {
+ libc::UIO_MAXIOV as usize
+}
+
+#[cfg(not(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "emscripten",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "horizon",
+ target_os = "watchos",
+)))]
+const fn max_iov() -> usize {
+ 16 // The minimum value required by POSIX.
+}
+
+impl FileDesc {
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::read(
+ self.as_raw_fd(),
+ buf.as_mut_ptr() as *mut libc::c_void,
+ cmp::min(buf.len(), READ_LIMIT),
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[cfg(not(any(target_os = "espidf", target_os = "horizon")))]
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::readv(
+ self.as_raw_fd(),
+ bufs.as_ptr() as *const libc::iovec,
+ cmp::min(bufs.len(), max_iov()) as libc::c_int,
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ return crate::io::default_read_vectored(|b| self.read(b), bufs);
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ cfg!(not(any(target_os = "espidf", target_os = "horizon")))
+ }
+
+ pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ let mut me = self;
+ (&mut me).read_to_end(buf)
+ }
+
+ pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ #[cfg(not(any(target_os = "linux", target_os = "android")))]
+ use libc::pread as pread64;
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ use libc::pread64;
+
+ unsafe {
+ cvt(pread64(
+ self.as_raw_fd(),
+ buf.as_mut_ptr() as *mut libc::c_void,
+ cmp::min(buf.len(), READ_LIMIT),
+ offset as off64_t,
+ ))
+ .map(|n| n as usize)
+ }
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ let ret = cvt(unsafe {
+ libc::read(
+ self.as_raw_fd(),
+ buf.unfilled_mut().as_mut_ptr() as *mut libc::c_void,
+ cmp::min(buf.remaining(), READ_LIMIT),
+ )
+ })?;
+
+ // Safety: `ret` bytes were written to the initialized portion of the buffer
+ unsafe {
+ buf.assume_init(ret as usize);
+ }
+ buf.add_filled(ret as usize);
+ Ok(())
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::write(
+ self.as_raw_fd(),
+ buf.as_ptr() as *const libc::c_void,
+ cmp::min(buf.len(), READ_LIMIT),
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[cfg(not(any(target_os = "espidf", target_os = "horizon")))]
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::writev(
+ self.as_raw_fd(),
+ bufs.as_ptr() as *const libc::iovec,
+ cmp::min(bufs.len(), max_iov()) as libc::c_int,
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ return crate::io::default_write_vectored(|b| self.write(b), bufs);
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ cfg!(not(any(target_os = "espidf", target_os = "horizon")))
+ }
+
+ pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ #[cfg(not(any(target_os = "linux", target_os = "android")))]
+ use libc::pwrite as pwrite64;
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ use libc::pwrite64;
+
+ unsafe {
+ cvt(pwrite64(
+ self.as_raw_fd(),
+ buf.as_ptr() as *const libc::c_void,
+ cmp::min(buf.len(), READ_LIMIT),
+ offset as off64_t,
+ ))
+ .map(|n| n as usize)
+ }
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn get_cloexec(&self) -> io::Result<bool> {
+ unsafe { Ok((cvt(libc::fcntl(self.as_raw_fd(), libc::F_GETFD))? & libc::FD_CLOEXEC) != 0) }
+ }
+
+ #[cfg(not(any(
+ target_env = "newlib",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "l4re",
+ target_os = "linux",
+ target_os = "haiku",
+ target_os = "redox",
+ target_os = "vxworks"
+ )))]
+ pub fn set_cloexec(&self) -> io::Result<()> {
+ unsafe {
+ cvt(libc::ioctl(self.as_raw_fd(), libc::FIOCLEX))?;
+ Ok(())
+ }
+ }
+ #[cfg(any(
+ all(target_env = "newlib", not(any(target_os = "espidf", target_os = "horizon"))),
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "l4re",
+ target_os = "linux",
+ target_os = "haiku",
+ target_os = "redox",
+ target_os = "vxworks"
+ ))]
+ pub fn set_cloexec(&self) -> io::Result<()> {
+ unsafe {
+ let previous = cvt(libc::fcntl(self.as_raw_fd(), libc::F_GETFD))?;
+ let new = previous | libc::FD_CLOEXEC;
+ if new != previous {
+ cvt(libc::fcntl(self.as_raw_fd(), libc::F_SETFD, new))?;
+ }
+ Ok(())
+ }
+ }
+ #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ pub fn set_cloexec(&self) -> io::Result<()> {
+ // FD_CLOEXEC is not supported in ESP-IDF and Horizon OS but there's no need to,
+ // because neither supports spawning processes.
+ Ok(())
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ unsafe {
+ let v = nonblocking as libc::c_int;
+ cvt(libc::ioctl(self.as_raw_fd(), libc::FIONBIO, &v))?;
+ Ok(())
+ }
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ unsafe {
+ let previous = cvt(libc::fcntl(self.as_raw_fd(), libc::F_GETFL))?;
+ let new = if nonblocking {
+ previous | libc::O_NONBLOCK
+ } else {
+ previous & !libc::O_NONBLOCK
+ };
+ if new != previous {
+ cvt(libc::fcntl(self.as_raw_fd(), libc::F_SETFL, new))?;
+ }
+ Ok(())
+ }
+ }
+
+ #[inline]
+ pub fn duplicate(&self) -> io::Result<FileDesc> {
+ Ok(Self(self.0.try_clone()?))
+ }
+}
+
+impl<'a> Read for &'a FileDesc {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+}
+
+impl AsInner<OwnedFd> for FileDesc {
+ fn as_inner(&self) -> &OwnedFd {
+ &self.0
+ }
+}
+
+impl IntoInner<OwnedFd> for FileDesc {
+ fn into_inner(self) -> OwnedFd {
+ self.0
+ }
+}
+
+impl FromInner<OwnedFd> for FileDesc {
+ fn from_inner(owned_fd: OwnedFd) -> Self {
+ Self(owned_fd)
+ }
+}
+
+impl AsFd for FileDesc {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl AsRawFd for FileDesc {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for FileDesc {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+impl FromRawFd for FileDesc {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
+ }
+}
diff --git a/library/std/src/sys/unix/fd/tests.rs b/library/std/src/sys/unix/fd/tests.rs
new file mode 100644
index 000000000..5d17e4678
--- /dev/null
+++ b/library/std/src/sys/unix/fd/tests.rs
@@ -0,0 +1,10 @@
+use super::{FileDesc, IoSlice};
+use crate::os::unix::io::FromRawFd;
+use core::mem::ManuallyDrop;
+
+#[test]
+fn limit_vector_count() {
+ let stdout = ManuallyDrop::new(unsafe { FileDesc::from_raw_fd(1) });
+ let bufs = (0..1500).map(|_| IoSlice::new(&[])).collect::<Vec<_>>();
+ assert!(stdout.write_vectored(&bufs).is_ok());
+}
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
new file mode 100644
index 000000000..b5cc8038c
--- /dev/null
+++ b/library/std/src/sys/unix/fs.rs
@@ -0,0 +1,1878 @@
+use crate::os::unix::prelude::*;
+
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::mem;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd};
+use crate::path::{Path, PathBuf};
+use crate::ptr;
+use crate::sync::Arc;
+use crate::sys::fd::FileDesc;
+use crate::sys::time::SystemTime;
+use crate::sys::{cvt, cvt_r};
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+#[cfg(any(
+ all(target_os = "linux", target_env = "gnu"),
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+))]
+use crate::sys::weak::syscall;
+#[cfg(any(target_os = "android", target_os = "macos"))]
+use crate::sys::weak::weak;
+
+use libc::{c_int, mode_t};
+
+#[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ all(target_os = "linux", target_env = "gnu")
+))]
+use libc::c_char;
+#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))]
+use libc::dirfd;
+#[cfg(any(target_os = "linux", target_os = "emscripten"))]
+use libc::fstatat64;
+#[cfg(any(
+ target_os = "android",
+ target_os = "solaris",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "illumos"
+))]
+use libc::readdir as readdir64;
+#[cfg(target_os = "linux")]
+use libc::readdir64;
+#[cfg(any(target_os = "emscripten", target_os = "l4re"))]
+use libc::readdir64_r;
+#[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "l4re",
+ target_os = "fuchsia",
+ target_os = "redox"
+)))]
+use libc::readdir_r as readdir64_r;
+#[cfg(target_os = "android")]
+use libc::{
+ dirent as dirent64, fstat as fstat64, fstatat as fstatat64, ftruncate64, lseek64,
+ lstat as lstat64, off64_t, open as open64, stat as stat64,
+};
+#[cfg(not(any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "l4re",
+ target_os = "android"
+)))]
+use libc::{
+ dirent as dirent64, fstat as fstat64, ftruncate as ftruncate64, lseek as lseek64,
+ lstat as lstat64, off_t as off64_t, open as open64, stat as stat64,
+};
+#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "l4re"))]
+use libc::{dirent64, fstat64, ftruncate64, lseek64, lstat64, off64_t, open64, stat64};
+
+pub use crate::sys_common::fs::try_exists;
+
+pub struct File(FileDesc);
+
+// FIXME: This should be available on Linux with all `target_env`.
+// But currently only glibc exposes `statx` fn and structs.
+// We don't want to import unverified raw C structs here directly.
+// https://github.com/rust-lang/rust/pull/67774
+macro_rules! cfg_has_statx {
+ ({ $($then_tt:tt)* } else { $($else_tt:tt)* }) => {
+ cfg_if::cfg_if! {
+ if #[cfg(all(target_os = "linux", target_env = "gnu"))] {
+ $($then_tt)*
+ } else {
+ $($else_tt)*
+ }
+ }
+ };
+ ($($block_inner:tt)*) => {
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ {
+ $($block_inner)*
+ }
+ };
+}
+
+cfg_has_statx! {{
+ #[derive(Clone)]
+ pub struct FileAttr {
+ stat: stat64,
+ statx_extra_fields: Option<StatxExtraFields>,
+ }
+
+ #[derive(Clone)]
+ struct StatxExtraFields {
+ // This is needed to check if btime is supported by the filesystem.
+ stx_mask: u32,
+ stx_btime: libc::statx_timestamp,
+ // With statx, we can overcome 32-bit `time_t` too.
+ #[cfg(target_pointer_width = "32")]
+ stx_atime: libc::statx_timestamp,
+ #[cfg(target_pointer_width = "32")]
+ stx_ctime: libc::statx_timestamp,
+ #[cfg(target_pointer_width = "32")]
+ stx_mtime: libc::statx_timestamp,
+
+ }
+
+ // We prefer `statx` on Linux if available, which contains file creation time,
+ // as well as 64-bit timestamps of all kinds.
+ // Default `stat64` contains no creation time and may have 32-bit `time_t`.
+ unsafe fn try_statx(
+ fd: c_int,
+ path: *const c_char,
+ flags: i32,
+ mask: u32,
+ ) -> Option<io::Result<FileAttr>> {
+ use crate::sync::atomic::{AtomicU8, Ordering};
+
+ // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`
+ // We store the availability in global to avoid unnecessary syscalls.
+ // 0: Unknown
+ // 1: Not available
+ // 2: Available
+ static STATX_STATE: AtomicU8 = AtomicU8::new(0);
+ syscall! {
+ fn statx(
+ fd: c_int,
+ pathname: *const c_char,
+ flags: c_int,
+ mask: libc::c_uint,
+ statxbuf: *mut libc::statx
+ ) -> c_int
+ }
+
+ match STATX_STATE.load(Ordering::Relaxed) {
+ 0 => {
+ // It is a trick to call `statx` with null pointers to check if the syscall
+ // is available. According to the manual, it is expected to fail with EFAULT.
+ // We do this mainly for performance, since it is nearly hundreds times
+ // faster than a normal successful call.
+ let err = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut()))
+ .err()
+ .and_then(|e| e.raw_os_error());
+ // We don't check `err == Some(libc::ENOSYS)` because the syscall may be limited
+ // and returns `EPERM`. Listing all possible errors seems not a good idea.
+ // See: https://github.com/rust-lang/rust/issues/65662
+ if err != Some(libc::EFAULT) {
+ STATX_STATE.store(1, Ordering::Relaxed);
+ return None;
+ }
+ STATX_STATE.store(2, Ordering::Relaxed);
+ }
+ 1 => return None,
+ _ => {}
+ }
+
+ let mut buf: libc::statx = mem::zeroed();
+ if let Err(err) = cvt(statx(fd, path, flags, mask, &mut buf)) {
+ return Some(Err(err));
+ }
+
+ // We cannot fill `stat64` exhaustively because of private padding fields.
+ let mut stat: stat64 = mem::zeroed();
+ // `c_ulong` on gnu-mips, `dev_t` otherwise
+ stat.st_dev = libc::makedev(buf.stx_dev_major, buf.stx_dev_minor) as _;
+ stat.st_ino = buf.stx_ino as libc::ino64_t;
+ stat.st_nlink = buf.stx_nlink as libc::nlink_t;
+ stat.st_mode = buf.stx_mode as libc::mode_t;
+ stat.st_uid = buf.stx_uid as libc::uid_t;
+ stat.st_gid = buf.stx_gid as libc::gid_t;
+ stat.st_rdev = libc::makedev(buf.stx_rdev_major, buf.stx_rdev_minor) as _;
+ stat.st_size = buf.stx_size as off64_t;
+ stat.st_blksize = buf.stx_blksize as libc::blksize_t;
+ stat.st_blocks = buf.stx_blocks as libc::blkcnt64_t;
+ stat.st_atime = buf.stx_atime.tv_sec as libc::time_t;
+ // `i64` on gnu-x86_64-x32, `c_ulong` otherwise.
+ stat.st_atime_nsec = buf.stx_atime.tv_nsec as _;
+ stat.st_mtime = buf.stx_mtime.tv_sec as libc::time_t;
+ stat.st_mtime_nsec = buf.stx_mtime.tv_nsec as _;
+ stat.st_ctime = buf.stx_ctime.tv_sec as libc::time_t;
+ stat.st_ctime_nsec = buf.stx_ctime.tv_nsec as _;
+
+ let extra = StatxExtraFields {
+ stx_mask: buf.stx_mask,
+ stx_btime: buf.stx_btime,
+ // Store full times to avoid 32-bit `time_t` truncation.
+ #[cfg(target_pointer_width = "32")]
+ stx_atime: buf.stx_atime,
+ #[cfg(target_pointer_width = "32")]
+ stx_ctime: buf.stx_ctime,
+ #[cfg(target_pointer_width = "32")]
+ stx_mtime: buf.stx_mtime,
+ };
+
+ Some(Ok(FileAttr { stat, statx_extra_fields: Some(extra) }))
+ }
+
+} else {
+ #[derive(Clone)]
+ pub struct FileAttr {
+ stat: stat64,
+ }
+}}
+
+// all DirEntry's will have a reference to this struct
+struct InnerReadDir {
+ dirp: Dir,
+ root: PathBuf,
+}
+
+pub struct ReadDir {
+ inner: Arc<InnerReadDir>,
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox",
+ )))]
+ end_of_stream: bool,
+}
+
+struct Dir(*mut libc::DIR);
+
+unsafe impl Send for Dir {}
+unsafe impl Sync for Dir {}
+
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox"
+))]
+pub struct DirEntry {
+ dir: Arc<InnerReadDir>,
+ entry: dirent64_min,
+ // We need to store an owned copy of the entry name on platforms that use
+ // readdir() (not readdir_r()), because a) struct dirent may use a flexible
+ // array to store the name, b) it lives only until the next readdir() call.
+ name: CString,
+}
+
+// Define a minimal subset of fields we need from `dirent64`, especially since
+// we're not using the immediate `d_name` on these targets. Keeping this as an
+// `entry` field in `DirEntry` helps reduce the `cfg` boilerplate elsewhere.
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox"
+))]
+struct dirent64_min {
+ d_ino: u64,
+ #[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
+ d_type: u8,
+}
+
+#[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox"
+)))]
+pub struct DirEntry {
+ dir: Arc<InnerReadDir>,
+ // The full entry includes a fixed-length `d_name`.
+ entry: dirent64,
+}
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {
+ // generic
+ read: bool,
+ write: bool,
+ append: bool,
+ truncate: bool,
+ create: bool,
+ create_new: bool,
+ // system-specific
+ custom_flags: i32,
+ mode: mode_t,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct FilePermissions {
+ mode: mode_t,
+}
+
+#[derive(Copy, Clone)]
+pub struct FileTimes([libc::timespec; 2]);
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct FileType {
+ mode: mode_t,
+}
+
+#[derive(Debug)]
+pub struct DirBuilder {
+ mode: mode_t,
+}
+
+cfg_has_statx! {{
+ impl FileAttr {
+ fn from_stat64(stat: stat64) -> Self {
+ Self { stat, statx_extra_fields: None }
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ pub fn stx_mtime(&self) -> Option<&libc::statx_timestamp> {
+ if let Some(ext) = &self.statx_extra_fields {
+ if (ext.stx_mask & libc::STATX_MTIME) != 0 {
+ return Some(&ext.stx_mtime);
+ }
+ }
+ None
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ pub fn stx_atime(&self) -> Option<&libc::statx_timestamp> {
+ if let Some(ext) = &self.statx_extra_fields {
+ if (ext.stx_mask & libc::STATX_ATIME) != 0 {
+ return Some(&ext.stx_atime);
+ }
+ }
+ None
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ pub fn stx_ctime(&self) -> Option<&libc::statx_timestamp> {
+ if let Some(ext) = &self.statx_extra_fields {
+ if (ext.stx_mask & libc::STATX_CTIME) != 0 {
+ return Some(&ext.stx_ctime);
+ }
+ }
+ None
+ }
+ }
+} else {
+ impl FileAttr {
+ fn from_stat64(stat: stat64) -> Self {
+ Self { stat }
+ }
+ }
+}}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.stat.st_size as u64
+ }
+ pub fn perm(&self) -> FilePermissions {
+ FilePermissions { mode: (self.stat.st_mode as mode_t) }
+ }
+
+ pub fn file_type(&self) -> FileType {
+ FileType { mode: self.stat.st_mode as mode_t }
+ }
+}
+
+#[cfg(target_os = "netbsd")]
+impl FileAttr {
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_mtime as i64, self.stat.st_mtimensec as i64))
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_atime as i64, self.stat.st_atimensec as i64))
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_birthtime as i64, self.stat.st_birthtimensec as i64))
+ }
+}
+
+#[cfg(not(target_os = "netbsd"))]
+impl FileAttr {
+ #[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))]
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ #[cfg(target_pointer_width = "32")]
+ cfg_has_statx! {
+ if let Some(mtime) = self.stx_mtime() {
+ return Ok(SystemTime::new(mtime.tv_sec, mtime.tv_nsec as i64));
+ }
+ }
+
+ Ok(SystemTime::new(self.stat.st_mtime as i64, self.stat.st_mtime_nsec as i64))
+ }
+
+ #[cfg(any(target_os = "vxworks", target_os = "espidf"))]
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_mtime as i64, 0))
+ }
+
+ #[cfg(target_os = "horizon")]
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from(self.stat.st_mtim))
+ }
+
+ #[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))]
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ #[cfg(target_pointer_width = "32")]
+ cfg_has_statx! {
+ if let Some(atime) = self.stx_atime() {
+ return Ok(SystemTime::new(atime.tv_sec, atime.tv_nsec as i64));
+ }
+ }
+
+ Ok(SystemTime::new(self.stat.st_atime as i64, self.stat.st_atime_nsec as i64))
+ }
+
+ #[cfg(any(target_os = "vxworks", target_os = "espidf"))]
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_atime as i64, 0))
+ }
+
+ #[cfg(target_os = "horizon")]
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from(self.stat.st_atim))
+ }
+
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ ))]
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::new(self.stat.st_birthtime as i64, self.stat.st_birthtime_nsec as i64))
+ }
+
+ #[cfg(not(any(
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ )))]
+ pub fn created(&self) -> io::Result<SystemTime> {
+ cfg_has_statx! {
+ if let Some(ext) = &self.statx_extra_fields {
+ return if (ext.stx_mask & libc::STATX_BTIME) != 0 {
+ Ok(SystemTime::new(ext.stx_btime.tv_sec, ext.stx_btime.tv_nsec as i64))
+ } else {
+ Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "creation time is not available for the filesystem",
+ ))
+ };
+ }
+ }
+
+ Err(io::const_io_error!(
+ io::ErrorKind::Unsupported,
+ "creation time is not available on this platform \
+ currently",
+ ))
+ }
+}
+
+impl AsInner<stat64> for FileAttr {
+ fn as_inner(&self) -> &stat64 {
+ &self.stat
+ }
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ // check if any class (owner, group, others) has write permission
+ self.mode & 0o222 == 0
+ }
+
+ pub fn set_readonly(&mut self, readonly: bool) {
+ if readonly {
+ // remove write permission for all classes; equivalent to `chmod a-w <file>`
+ self.mode &= !0o222;
+ } else {
+ // add write permission for all classes; equivalent to `chmod a+w <file>`
+ self.mode |= 0o222;
+ }
+ }
+ pub fn mode(&self) -> u32 {
+ self.mode as u32
+ }
+}
+
+impl FileTimes {
+ pub fn set_accessed(&mut self, t: SystemTime) {
+ self.0[0] = t.t.to_timespec().expect("Invalid system time");
+ }
+
+ pub fn set_modified(&mut self, t: SystemTime) {
+ self.0[1] = t.t.to_timespec().expect("Invalid system time");
+ }
+}
+
+struct TimespecDebugAdapter<'a>(&'a libc::timespec);
+
+impl fmt::Debug for TimespecDebugAdapter<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("timespec")
+ .field("tv_sec", &self.0.tv_sec)
+ .field("tv_nsec", &self.0.tv_nsec)
+ .finish()
+ }
+}
+
+impl fmt::Debug for FileTimes {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FileTimes")
+ .field("accessed", &TimespecDebugAdapter(&self.0[0]))
+ .field("modified", &TimespecDebugAdapter(&self.0[1]))
+ .finish()
+ }
+}
+
+impl Default for FileTimes {
+ fn default() -> Self {
+ // Redox doesn't appear to support `UTIME_OMIT`, so we stub it out here, and always return
+ // an error in `set_times`.
+ // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
+ // the same as for Redox.
+ #[cfg(any(target_os = "redox", target_os = "espidf"))]
+ let omit = libc::timespec { tv_sec: 0, tv_nsec: 0 };
+ #[cfg(not(any(target_os = "redox", target_os = "espidf")))]
+ let omit = libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ };
+ Self([omit; 2])
+ }
+}
+
+impl FileType {
+ pub fn is_dir(&self) -> bool {
+ self.is(libc::S_IFDIR)
+ }
+ pub fn is_file(&self) -> bool {
+ self.is(libc::S_IFREG)
+ }
+ pub fn is_symlink(&self) -> bool {
+ self.is(libc::S_IFLNK)
+ }
+
+ pub fn is(&self, mode: mode_t) -> bool {
+ self.mode & libc::S_IFMT == mode
+ }
+}
+
+impl FromInner<u32> for FilePermissions {
+ fn from_inner(mode: u32) -> FilePermissions {
+ FilePermissions { mode: mode as mode_t }
+ }
+}
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame.
+ // Thus the result will be e g 'ReadDir("/home")'
+ fmt::Debug::fmt(&*self.inner.root, f)
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "illumos"
+ ))]
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ unsafe {
+ loop {
+ // As of POSIX.1-2017, readdir() is not required to be thread safe; only
+ // readdir_r() is. However, readdir_r() cannot correctly handle platforms
+ // with unlimited or variable NAME_MAX. Many modern platforms guarantee
+ // thread safety for readdir() as long an individual DIR* is not accessed
+ // concurrently, which is sufficient for Rust.
+ super::os::set_errno(0);
+ let entry_ptr = readdir64(self.inner.dirp.0);
+ if entry_ptr.is_null() {
+ // null can mean either the end is reached or an error occurred.
+ // So we had to clear errno beforehand to check for an error now.
+ return match super::os::errno() {
+ 0 => None,
+ e => Some(Err(Error::from_raw_os_error(e))),
+ };
+ }
+
+ // Only d_reclen bytes of *entry_ptr are valid, so we can't just copy the
+ // whole thing (#93384). Instead, copy everything except the name.
+ let mut copy: dirent64 = mem::zeroed();
+ // Can't dereference entry_ptr, so use the local entry to get
+ // offsetof(struct dirent, d_name)
+ let copy_bytes = &mut copy as *mut _ as *mut u8;
+ let copy_name = &mut copy.d_name as *mut _ as *mut u8;
+ let name_offset = copy_name.offset_from(copy_bytes) as usize;
+ let entry_bytes = entry_ptr as *const u8;
+ let entry_name = entry_bytes.add(name_offset);
+ ptr::copy_nonoverlapping(entry_bytes, copy_bytes, name_offset);
+
+ let entry = dirent64_min {
+ d_ino: copy.d_ino as u64,
+ #[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
+ d_type: copy.d_type as u8,
+ };
+
+ let ret = DirEntry {
+ entry,
+ // d_name is guaranteed to be null-terminated.
+ name: CStr::from_ptr(entry_name as *const _).to_owned(),
+ dir: Arc::clone(&self.inner),
+ };
+ if ret.name_bytes() != b"." && ret.name_bytes() != b".." {
+ return Some(Ok(ret));
+ }
+ }
+ }
+ }
+
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "illumos"
+ )))]
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ if self.end_of_stream {
+ return None;
+ }
+
+ unsafe {
+ let mut ret = DirEntry { entry: mem::zeroed(), dir: Arc::clone(&self.inner) };
+ let mut entry_ptr = ptr::null_mut();
+ loop {
+ let err = readdir64_r(self.inner.dirp.0, &mut ret.entry, &mut entry_ptr);
+ if err != 0 {
+ if entry_ptr.is_null() {
+ // We encountered an error (which will be returned in this iteration), but
+ // we also reached the end of the directory stream. The `end_of_stream`
+ // flag is enabled to make sure that we return `None` in the next iteration
+ // (instead of looping forever)
+ self.end_of_stream = true;
+ }
+ return Some(Err(Error::from_raw_os_error(err)));
+ }
+ if entry_ptr.is_null() {
+ return None;
+ }
+ if ret.name_bytes() != b"." && ret.name_bytes() != b".." {
+ return Some(Ok(ret));
+ }
+ }
+ }
+ }
+}
+
+impl Drop for Dir {
+ fn drop(&mut self) {
+ let r = unsafe { libc::closedir(self.0) };
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl DirEntry {
+ pub fn path(&self) -> PathBuf {
+ self.dir.root.join(self.file_name_os_str())
+ }
+
+ pub fn file_name(&self) -> OsString {
+ self.file_name_os_str().to_os_string()
+ }
+
+ #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))]
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ let fd = cvt(unsafe { dirfd(self.dir.dirp.0) })?;
+ let name = self.name_cstr().as_ptr();
+
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ fd,
+ name,
+ libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
+ }
+
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { fstatat64(fd, name, &mut stat, libc::AT_SYMLINK_NOFOLLOW) })?;
+ Ok(FileAttr::from_stat64(stat))
+ }
+
+ #[cfg(not(any(target_os = "linux", target_os = "emscripten", target_os = "android")))]
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ lstat(&self.path())
+ }
+
+ #[cfg(any(
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "haiku",
+ target_os = "vxworks"
+ ))]
+ pub fn file_type(&self) -> io::Result<FileType> {
+ self.metadata().map(|m| m.file_type())
+ }
+
+ #[cfg(not(any(
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "haiku",
+ target_os = "vxworks"
+ )))]
+ pub fn file_type(&self) -> io::Result<FileType> {
+ match self.entry.d_type {
+ libc::DT_CHR => Ok(FileType { mode: libc::S_IFCHR }),
+ libc::DT_FIFO => Ok(FileType { mode: libc::S_IFIFO }),
+ libc::DT_LNK => Ok(FileType { mode: libc::S_IFLNK }),
+ libc::DT_REG => Ok(FileType { mode: libc::S_IFREG }),
+ libc::DT_SOCK => Ok(FileType { mode: libc::S_IFSOCK }),
+ libc::DT_DIR => Ok(FileType { mode: libc::S_IFDIR }),
+ libc::DT_BLK => Ok(FileType { mode: libc::S_IFBLK }),
+ _ => self.metadata().map(|m| m.file_type()),
+ }
+ }
+
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "android",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "haiku",
+ target_os = "l4re",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "vxworks",
+ target_os = "espidf",
+ target_os = "horizon"
+ ))]
+ pub fn ino(&self) -> u64 {
+ self.entry.d_ino as u64
+ }
+
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "netbsd",
+ target_os = "dragonfly"
+ ))]
+ pub fn ino(&self) -> u64 {
+ self.entry.d_fileno as u64
+ }
+
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "freebsd",
+ target_os = "dragonfly"
+ ))]
+ fn name_bytes(&self) -> &[u8] {
+ use crate::slice;
+ unsafe {
+ slice::from_raw_parts(
+ self.entry.d_name.as_ptr() as *const u8,
+ self.entry.d_namlen as usize,
+ )
+ }
+ }
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "freebsd",
+ target_os = "dragonfly"
+ )))]
+ fn name_bytes(&self) -> &[u8] {
+ self.name_cstr().to_bytes()
+ }
+
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox"
+ )))]
+ fn name_cstr(&self) -> &CStr {
+ unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) }
+ }
+ #[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox"
+ ))]
+ fn name_cstr(&self) -> &CStr {
+ &self.name
+ }
+
+ pub fn file_name_os_str(&self) -> &OsStr {
+ OsStr::from_bytes(self.name_bytes())
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ OpenOptions {
+ // generic
+ read: false,
+ write: false,
+ append: false,
+ truncate: false,
+ create: false,
+ create_new: false,
+ // system-specific
+ custom_flags: 0,
+ mode: 0o666,
+ }
+ }
+
+ pub fn read(&mut self, read: bool) {
+ self.read = read;
+ }
+ pub fn write(&mut self, write: bool) {
+ self.write = write;
+ }
+ pub fn append(&mut self, append: bool) {
+ self.append = append;
+ }
+ pub fn truncate(&mut self, truncate: bool) {
+ self.truncate = truncate;
+ }
+ pub fn create(&mut self, create: bool) {
+ self.create = create;
+ }
+ pub fn create_new(&mut self, create_new: bool) {
+ self.create_new = create_new;
+ }
+
+ pub fn custom_flags(&mut self, flags: i32) {
+ self.custom_flags = flags;
+ }
+ pub fn mode(&mut self, mode: u32) {
+ self.mode = mode as mode_t;
+ }
+
+ fn get_access_mode(&self) -> io::Result<c_int> {
+ match (self.read, self.write, self.append) {
+ (true, false, false) => Ok(libc::O_RDONLY),
+ (false, true, false) => Ok(libc::O_WRONLY),
+ (true, true, false) => Ok(libc::O_RDWR),
+ (false, _, true) => Ok(libc::O_WRONLY | libc::O_APPEND),
+ (true, _, true) => Ok(libc::O_RDWR | libc::O_APPEND),
+ (false, false, false) => Err(Error::from_raw_os_error(libc::EINVAL)),
+ }
+ }
+
+ fn get_creation_mode(&self) -> io::Result<c_int> {
+ match (self.write, self.append) {
+ (true, false) => {}
+ (false, false) => {
+ if self.truncate || self.create || self.create_new {
+ return Err(Error::from_raw_os_error(libc::EINVAL));
+ }
+ }
+ (_, true) => {
+ if self.truncate && !self.create_new {
+ return Err(Error::from_raw_os_error(libc::EINVAL));
+ }
+ }
+ }
+
+ Ok(match (self.create, self.truncate, self.create_new) {
+ (false, false, false) => 0,
+ (true, false, false) => libc::O_CREAT,
+ (false, true, false) => libc::O_TRUNC,
+ (true, true, false) => libc::O_CREAT | libc::O_TRUNC,
+ (_, _, true) => libc::O_CREAT | libc::O_EXCL,
+ })
+ }
+}
+
+impl File {
+ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let path = cstr(path)?;
+ File::open_c(&path, opts)
+ }
+
+ pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
+ let flags = libc::O_CLOEXEC
+ | opts.get_access_mode()?
+ | opts.get_creation_mode()?
+ | (opts.custom_flags as c_int & !libc::O_ACCMODE);
+ // The third argument of `open64` is documented to have type `mode_t`. On
+ // some platforms (like macOS, where `open64` is actually `open`), `mode_t` is `u16`.
+ // However, since this is a variadic function, C integer promotion rules mean that on
+ // the ABI level, this still gets passed as `c_int` (aka `u32` on Unix platforms).
+ let fd = cvt_r(|| unsafe { open64(path.as_ptr(), flags, opts.mode as c_int) })?;
+ Ok(File(unsafe { FileDesc::from_raw_fd(fd) }))
+ }
+
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ let fd = self.as_raw_fd();
+
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ fd,
+ b"\0" as *const _ as *const c_char,
+ libc::AT_EMPTY_PATH | libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
+ }
+
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { fstat64(fd, &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ cvt_r(|| unsafe { os_fsync(self.as_raw_fd()) })?;
+ return Ok(());
+
+ #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+ unsafe fn os_fsync(fd: c_int) -> c_int {
+ libc::fcntl(fd, libc::F_FULLFSYNC)
+ }
+ #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "watchos")))]
+ unsafe fn os_fsync(fd: c_int) -> c_int {
+ libc::fsync(fd)
+ }
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ cvt_r(|| unsafe { os_datasync(self.as_raw_fd()) })?;
+ return Ok(());
+
+ #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+ unsafe fn os_datasync(fd: c_int) -> c_int {
+ libc::fcntl(fd, libc::F_FULLFSYNC)
+ }
+ #[cfg(any(
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "android",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ unsafe fn os_datasync(fd: c_int) -> c_int {
+ libc::fdatasync(fd)
+ }
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "freebsd",
+ target_os = "ios",
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "watchos",
+ )))]
+ unsafe fn os_datasync(fd: c_int) -> c_int {
+ libc::fsync(fd)
+ }
+ }
+
+ pub fn truncate(&self, size: u64) -> io::Result<()> {
+ let size: off64_t =
+ size.try_into().map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
+ cvt_r(|| unsafe { ftruncate64(self.as_raw_fd(), size) }).map(drop)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+
+ pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ self.0.read_at(buf, offset)
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ self.0.read_buf(buf)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ self.0.write_at(buf, offset)
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+
+ pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
+ let (whence, pos) = match pos {
+ // Casting to `i64` is fine, too large values will end up as
+ // negative which will cause an error in `lseek64`.
+ SeekFrom::Start(off) => (libc::SEEK_SET, off as i64),
+ SeekFrom::End(off) => (libc::SEEK_END, off),
+ SeekFrom::Current(off) => (libc::SEEK_CUR, off),
+ };
+ let n = cvt(unsafe { lseek64(self.as_raw_fd(), pos as off64_t, whence) })?;
+ Ok(n as u64)
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ self.0.duplicate().map(File)
+ }
+
+ pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> {
+ cvt_r(|| unsafe { libc::fchmod(self.as_raw_fd(), perm.mode) })?;
+ Ok(())
+ }
+
+ pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "redox", target_os = "espidf"))] {
+ // Redox doesn't appear to support `UTIME_OMIT`.
+ // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
+ // the same as for Redox.
+ drop(times);
+ Err(io::const_io_error!(
+ io::ErrorKind::Unsupported,
+ "setting file times not supported",
+ ))
+ } else if #[cfg(any(target_os = "android", target_os = "macos"))] {
+ // futimens requires macOS 10.13, and Android API level 19
+ cvt(unsafe {
+ weak!(fn futimens(c_int, *const libc::timespec) -> c_int);
+ match futimens.get() {
+ Some(futimens) => futimens(self.as_raw_fd(), times.0.as_ptr()),
+ #[cfg(target_os = "macos")]
+ None => {
+ fn ts_to_tv(ts: &libc::timespec) -> libc::timeval {
+ libc::timeval {
+ tv_sec: ts.tv_sec,
+ tv_usec: (ts.tv_nsec / 1000) as _
+ }
+ }
+ let timevals = [ts_to_tv(&times.0[0]), ts_to_tv(&times.0[1])];
+ libc::futimes(self.as_raw_fd(), timevals.as_ptr())
+ }
+ // futimes requires even newer Android.
+ #[cfg(target_os = "android")]
+ None => return Err(io::const_io_error!(
+ io::ErrorKind::Unsupported,
+ "setting file times requires Android API level >= 19",
+ )),
+ }
+ })?;
+ Ok(())
+ } else {
+ cvt(unsafe { libc::futimens(self.as_raw_fd(), times.0.as_ptr()) })?;
+ Ok(())
+ }
+ }
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder { mode: 0o777 }
+ }
+
+ pub fn mkdir(&self, p: &Path) -> io::Result<()> {
+ let p = cstr(p)?;
+ cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?;
+ Ok(())
+ }
+
+ pub fn set_mode(&mut self, mode: u32) {
+ self.mode = mode as mode_t;
+ }
+}
+
+fn cstr(path: &Path) -> io::Result<CString> {
+ Ok(CString::new(path.as_os_str().as_bytes())?)
+}
+
+impl AsInner<FileDesc> for File {
+ fn as_inner(&self) -> &FileDesc {
+ &self.0
+ }
+}
+
+impl AsInnerMut<FileDesc> for File {
+ fn as_inner_mut(&mut self) -> &mut FileDesc {
+ &mut self.0
+ }
+}
+
+impl IntoInner<FileDesc> for File {
+ fn into_inner(self) -> FileDesc {
+ self.0
+ }
+}
+
+impl FromInner<FileDesc> for File {
+ fn from_inner(file_desc: FileDesc) -> Self {
+ Self(file_desc)
+ }
+}
+
+impl AsFd for File {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl AsRawFd for File {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for File {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+impl FromRawFd for File {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[cfg(any(target_os = "linux", target_os = "netbsd"))]
+ fn get_path(fd: c_int) -> Option<PathBuf> {
+ let mut p = PathBuf::from("/proc/self/fd");
+ p.push(&fd.to_string());
+ readlink(&p).ok()
+ }
+
+ #[cfg(target_os = "macos")]
+ fn get_path(fd: c_int) -> Option<PathBuf> {
+ // FIXME: The use of PATH_MAX is generally not encouraged, but it
+ // is inevitable in this case because macOS defines `fcntl` with
+ // `F_GETPATH` in terms of `MAXPATHLEN`, and there are no
+ // alternatives. If a better method is invented, it should be used
+ // instead.
+ let mut buf = vec![0; libc::PATH_MAX as usize];
+ let n = unsafe { libc::fcntl(fd, libc::F_GETPATH, buf.as_ptr()) };
+ if n == -1 {
+ return None;
+ }
+ let l = buf.iter().position(|&c| c == 0).unwrap();
+ buf.truncate(l as usize);
+ buf.shrink_to_fit();
+ Some(PathBuf::from(OsString::from_vec(buf)))
+ }
+
+ #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))]
+ fn get_path(fd: c_int) -> Option<PathBuf> {
+ let info = Box::<libc::kinfo_file>::new_zeroed();
+ let mut info = unsafe { info.assume_init() };
+ info.kf_structsize = mem::size_of::<libc::kinfo_file>() as libc::c_int;
+ let n = unsafe { libc::fcntl(fd, libc::F_KINFO, &mut *info) };
+ if n == -1 {
+ return None;
+ }
+ let buf = unsafe { CStr::from_ptr(info.kf_path.as_mut_ptr()).to_bytes().to_vec() };
+ Some(PathBuf::from(OsString::from_vec(buf)))
+ }
+
+ #[cfg(target_os = "vxworks")]
+ fn get_path(fd: c_int) -> Option<PathBuf> {
+ let mut buf = vec![0; libc::PATH_MAX as usize];
+ let n = unsafe { libc::ioctl(fd, libc::FIOGETNAME, buf.as_ptr()) };
+ if n == -1 {
+ return None;
+ }
+ let l = buf.iter().position(|&c| c == 0).unwrap();
+ buf.truncate(l as usize);
+ Some(PathBuf::from(OsString::from_vec(buf)))
+ }
+
+ #[cfg(not(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "vxworks",
+ all(target_os = "freebsd", target_arch = "x86_64"),
+ target_os = "netbsd"
+ )))]
+ fn get_path(_fd: c_int) -> Option<PathBuf> {
+ // FIXME(#24570): implement this for other Unix platforms
+ None
+ }
+
+ #[cfg(any(target_os = "linux", target_os = "macos", target_os = "vxworks"))]
+ fn get_mode(fd: c_int) -> Option<(bool, bool)> {
+ let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+ if mode == -1 {
+ return None;
+ }
+ match mode & libc::O_ACCMODE {
+ libc::O_RDONLY => Some((true, false)),
+ libc::O_RDWR => Some((true, true)),
+ libc::O_WRONLY => Some((false, true)),
+ _ => None,
+ }
+ }
+
+ #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "vxworks")))]
+ fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
+ // FIXME(#24570): implement this for other Unix platforms
+ None
+ }
+
+ let fd = self.as_raw_fd();
+ let mut b = f.debug_struct("File");
+ b.field("fd", &fd);
+ if let Some(path) = get_path(fd) {
+ b.field("path", &path);
+ }
+ if let Some((read, write)) = get_mode(fd) {
+ b.field("read", &read).field("write", &write);
+ }
+ b.finish()
+ }
+}
+
+pub fn readdir(p: &Path) -> io::Result<ReadDir> {
+ let root = p.to_path_buf();
+ let p = cstr(p)?;
+ unsafe {
+ let ptr = libc::opendir(p.as_ptr());
+ if ptr.is_null() {
+ Err(Error::last_os_error())
+ } else {
+ let inner = InnerReadDir { dirp: Dir(ptr), root };
+ Ok(ReadDir {
+ inner: Arc::new(inner),
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox",
+ )))]
+ end_of_stream: false,
+ })
+ }
+ }
+}
+
+pub fn unlink(p: &Path) -> io::Result<()> {
+ let p = cstr(p)?;
+ cvt(unsafe { libc::unlink(p.as_ptr()) })?;
+ Ok(())
+}
+
+pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
+ let old = cstr(old)?;
+ let new = cstr(new)?;
+ cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?;
+ Ok(())
+}
+
+pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
+ let p = cstr(p)?;
+ cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?;
+ Ok(())
+}
+
+pub fn rmdir(p: &Path) -> io::Result<()> {
+ let p = cstr(p)?;
+ cvt(unsafe { libc::rmdir(p.as_ptr()) })?;
+ Ok(())
+}
+
+pub fn readlink(p: &Path) -> io::Result<PathBuf> {
+ let c_path = cstr(p)?;
+ let p = c_path.as_ptr();
+
+ let mut buf = Vec::with_capacity(256);
+
+ loop {
+ let buf_read =
+ cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize;
+
+ unsafe {
+ buf.set_len(buf_read);
+ }
+
+ if buf_read != buf.capacity() {
+ buf.shrink_to_fit();
+
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ }
+
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity. The length is guaranteed to be
+ // the same as the capacity due to the if statement above.
+ buf.reserve(1);
+ }
+}
+
+pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
+ let original = cstr(original)?;
+ let link = cstr(link)?;
+ cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) })?;
+ Ok(())
+}
+
+pub fn link(original: &Path, link: &Path) -> io::Result<()> {
+ let original = cstr(original)?;
+ let link = cstr(link)?;
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] {
+ // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves
+ // it implementation-defined whether `link` follows symlinks, so rely on the
+ // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior.
+ // Android has `linkat` on newer versions, but we happen to know `link`
+ // always has the correct behavior, so it's here as well.
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ } else if #[cfg(target_os = "macos")] {
+ // On MacOS, older versions (<=10.9) lack support for linkat while newer
+ // versions have it. We want to use linkat if it is available, so we use weak!
+ // to check. `linkat` is preferable to `link` because it gives us a flag to
+ // specify how symlinks should be handled. We pass 0 as the flags argument,
+ // meaning it shouldn't follow symlinks.
+ weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
+
+ if let Some(f) = linkat.get() {
+ cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ } else {
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ };
+ } else {
+ // Where we can, use `linkat` instead of `link`; see the comment above
+ // this one for details on why.
+ cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ }
+ }
+ Ok(())
+}
+
+pub fn stat(p: &Path) -> io::Result<FileAttr> {
+ let p = cstr(p)?;
+
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
+ }
+
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+}
+
+pub fn lstat(p: &Path) -> io::Result<FileAttr> {
+ let p = cstr(p)?;
+
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
+ }
+
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+}
+
+pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
+ let path = CString::new(p.as_os_str().as_bytes())?;
+ let buf;
+ unsafe {
+ let r = libc::realpath(path.as_ptr(), ptr::null_mut());
+ if r.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+ buf = CStr::from_ptr(r).to_bytes().to_vec();
+ libc::free(r as *mut _);
+ }
+ Ok(PathBuf::from(OsString::from_vec(buf)))
+}
+
+fn open_from(from: &Path) -> io::Result<(crate::fs::File, crate::fs::Metadata)> {
+ use crate::fs::File;
+ use crate::sys_common::fs::NOT_FILE_ERROR;
+
+ let reader = File::open(from)?;
+ let metadata = reader.metadata()?;
+ if !metadata.is_file() {
+ return Err(NOT_FILE_ERROR);
+ }
+ Ok((reader, metadata))
+}
+
+#[cfg(target_os = "espidf")]
+fn open_to_and_set_permissions(
+ to: &Path,
+ reader_metadata: crate::fs::Metadata,
+) -> io::Result<(crate::fs::File, crate::fs::Metadata)> {
+ use crate::fs::OpenOptions;
+ let writer = OpenOptions::new().open(to)?;
+ let writer_metadata = writer.metadata()?;
+ Ok((writer, writer_metadata))
+}
+
+#[cfg(not(target_os = "espidf"))]
+fn open_to_and_set_permissions(
+ to: &Path,
+ reader_metadata: crate::fs::Metadata,
+) -> io::Result<(crate::fs::File, crate::fs::Metadata)> {
+ use crate::fs::OpenOptions;
+ use crate::os::unix::fs::{OpenOptionsExt, PermissionsExt};
+
+ let perm = reader_metadata.permissions();
+ let writer = OpenOptions::new()
+ // create the file with the correct mode right away
+ .mode(perm.mode())
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(to)?;
+ let writer_metadata = writer.metadata()?;
+ if writer_metadata.is_file() {
+ // Set the correct file permissions, in case the file already existed.
+ // Don't set the permissions on already existing non-files like
+ // pipes/FIFOs or device nodes.
+ writer.set_permissions(perm)?;
+ }
+ Ok((writer, writer_metadata))
+}
+
+#[cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+)))]
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ let (mut reader, reader_metadata) = open_from(from)?;
+ let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?;
+
+ io::copy(&mut reader, &mut writer)
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ let (mut reader, reader_metadata) = open_from(from)?;
+ let max_len = u64::MAX;
+ let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?;
+
+ use super::kernel_copy::{copy_regular_files, CopyResult};
+
+ match copy_regular_files(reader.as_raw_fd(), writer.as_raw_fd(), max_len) {
+ CopyResult::Ended(bytes) => Ok(bytes),
+ CopyResult::Error(e, _) => Err(e),
+ CopyResult::Fallback(written) => match io::copy::generic_copy(&mut reader, &mut writer) {
+ Ok(bytes) => Ok(bytes + written),
+ Err(e) => Err(e),
+ },
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ use crate::sync::atomic::{AtomicBool, Ordering};
+
+ const COPYFILE_ACL: u32 = 1 << 0;
+ const COPYFILE_STAT: u32 = 1 << 1;
+ const COPYFILE_XATTR: u32 = 1 << 2;
+ const COPYFILE_DATA: u32 = 1 << 3;
+
+ const COPYFILE_SECURITY: u32 = COPYFILE_STAT | COPYFILE_ACL;
+ const COPYFILE_METADATA: u32 = COPYFILE_SECURITY | COPYFILE_XATTR;
+ const COPYFILE_ALL: u32 = COPYFILE_METADATA | COPYFILE_DATA;
+
+ const COPYFILE_STATE_COPIED: u32 = 8;
+
+ #[allow(non_camel_case_types)]
+ type copyfile_state_t = *mut libc::c_void;
+ #[allow(non_camel_case_types)]
+ type copyfile_flags_t = u32;
+
+ extern "C" {
+ fn fcopyfile(
+ from: libc::c_int,
+ to: libc::c_int,
+ state: copyfile_state_t,
+ flags: copyfile_flags_t,
+ ) -> libc::c_int;
+ fn copyfile_state_alloc() -> copyfile_state_t;
+ fn copyfile_state_free(state: copyfile_state_t) -> libc::c_int;
+ fn copyfile_state_get(
+ state: copyfile_state_t,
+ flag: u32,
+ dst: *mut libc::c_void,
+ ) -> libc::c_int;
+ }
+
+ struct FreeOnDrop(copyfile_state_t);
+ impl Drop for FreeOnDrop {
+ fn drop(&mut self) {
+ // The code below ensures that `FreeOnDrop` is never a null pointer
+ unsafe {
+ // `copyfile_state_free` returns -1 if the `to` or `from` files
+ // cannot be closed. However, this is not considered this an
+ // error.
+ copyfile_state_free(self.0);
+ }
+ }
+ }
+
+ // MacOS prior to 10.12 don't support `fclonefileat`
+ // We store the availability in a global to avoid unnecessary syscalls
+ static HAS_FCLONEFILEAT: AtomicBool = AtomicBool::new(true);
+ syscall! {
+ fn fclonefileat(
+ srcfd: libc::c_int,
+ dst_dirfd: libc::c_int,
+ dst: *const c_char,
+ flags: libc::c_int
+ ) -> libc::c_int
+ }
+
+ let (reader, reader_metadata) = open_from(from)?;
+
+ // Opportunistically attempt to create a copy-on-write clone of `from`
+ // using `fclonefileat`.
+ if HAS_FCLONEFILEAT.load(Ordering::Relaxed) {
+ let to = cstr(to)?;
+ let clonefile_result =
+ cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) });
+ match clonefile_result {
+ Ok(_) => return Ok(reader_metadata.len()),
+ Err(err) => match err.raw_os_error() {
+ // `fclonefileat` will fail on non-APFS volumes, if the
+ // destination already exists, or if the source and destination
+ // are on different devices. In all these cases `fcopyfile`
+ // should succeed.
+ Some(libc::ENOTSUP) | Some(libc::EEXIST) | Some(libc::EXDEV) => (),
+ Some(libc::ENOSYS) => HAS_FCLONEFILEAT.store(false, Ordering::Relaxed),
+ _ => return Err(err),
+ },
+ }
+ }
+
+ // Fall back to using `fcopyfile` if `fclonefileat` does not succeed.
+ let (writer, writer_metadata) = open_to_and_set_permissions(to, reader_metadata)?;
+
+ // We ensure that `FreeOnDrop` never contains a null pointer so it is
+ // always safe to call `copyfile_state_free`
+ let state = unsafe {
+ let state = copyfile_state_alloc();
+ if state.is_null() {
+ return Err(crate::io::Error::last_os_error());
+ }
+ FreeOnDrop(state)
+ };
+
+ let flags = if writer_metadata.is_file() { COPYFILE_ALL } else { COPYFILE_DATA };
+
+ cvt(unsafe { fcopyfile(reader.as_raw_fd(), writer.as_raw_fd(), state.0, flags) })?;
+
+ let mut bytes_copied: libc::off_t = 0;
+ cvt(unsafe {
+ copyfile_state_get(
+ state.0,
+ COPYFILE_STATE_COPIED,
+ &mut bytes_copied as *mut libc::off_t as *mut libc::c_void,
+ )
+ })?;
+ Ok(bytes_copied as u64)
+}
+
+pub fn chown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
+ let path = cstr(path)?;
+ cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
+ Ok(())
+}
+
+pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> {
+ cvt(unsafe { libc::fchown(fd, uid as libc::uid_t, gid as libc::gid_t) })?;
+ Ok(())
+}
+
+pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
+ let path = cstr(path)?;
+ cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
+ Ok(())
+}
+
+#[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))]
+pub fn chroot(dir: &Path) -> io::Result<()> {
+ let dir = cstr(dir)?;
+ cvt(unsafe { libc::chroot(dir.as_ptr()) })?;
+ Ok(())
+}
+
+pub use remove_dir_impl::remove_dir_all;
+
+// Fallback for REDOX, ESP-ID, Horizon, and Miri
+#[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri))]
+mod remove_dir_impl {
+ pub use crate::sys_common::fs::remove_dir_all;
+}
+
+// Modern implementation using openat(), unlinkat() and fdopendir()
+#[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri)))]
+mod remove_dir_impl {
+ use super::{cstr, lstat, Dir, DirEntry, InnerReadDir, ReadDir};
+ use crate::ffi::CStr;
+ use crate::io;
+ use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
+ use crate::os::unix::prelude::{OwnedFd, RawFd};
+ use crate::path::{Path, PathBuf};
+ use crate::sync::Arc;
+ use crate::sys::{cvt, cvt_r};
+
+ #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))]
+ use libc::{fdopendir, openat, unlinkat};
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
+ use macos_weak::{fdopendir, openat, unlinkat};
+
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
+ mod macos_weak {
+ use crate::sys::weak::weak;
+ use libc::{c_char, c_int, DIR};
+
+ fn get_openat_fn() -> Option<unsafe extern "C" fn(c_int, *const c_char, c_int) -> c_int> {
+ weak!(fn openat(c_int, *const c_char, c_int) -> c_int);
+ openat.get()
+ }
+
+ pub fn has_openat() -> bool {
+ get_openat_fn().is_some()
+ }
+
+ pub unsafe fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int {
+ get_openat_fn().map(|openat| openat(dirfd, pathname, flags)).unwrap_or_else(|| {
+ crate::sys::unix::os::set_errno(libc::ENOSYS);
+ -1
+ })
+ }
+
+ pub unsafe fn fdopendir(fd: c_int) -> *mut DIR {
+ #[cfg(all(target_os = "macos", target_arch = "x86"))]
+ weak!(fn fdopendir(c_int) -> *mut DIR, "fdopendir$INODE64$UNIX2003");
+ #[cfg(all(target_os = "macos", target_arch = "x86_64"))]
+ weak!(fn fdopendir(c_int) -> *mut DIR, "fdopendir$INODE64");
+ fdopendir.get().map(|fdopendir| fdopendir(fd)).unwrap_or_else(|| {
+ crate::sys::unix::os::set_errno(libc::ENOSYS);
+ crate::ptr::null_mut()
+ })
+ }
+
+ pub unsafe fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int {
+ weak!(fn unlinkat(c_int, *const c_char, c_int) -> c_int);
+ unlinkat.get().map(|unlinkat| unlinkat(dirfd, pathname, flags)).unwrap_or_else(|| {
+ crate::sys::unix::os::set_errno(libc::ENOSYS);
+ -1
+ })
+ }
+ }
+
+ pub fn openat_nofollow_dironly(parent_fd: Option<RawFd>, p: &CStr) -> io::Result<OwnedFd> {
+ let fd = cvt_r(|| unsafe {
+ openat(
+ parent_fd.unwrap_or(libc::AT_FDCWD),
+ p.as_ptr(),
+ libc::O_CLOEXEC | libc::O_RDONLY | libc::O_NOFOLLOW | libc::O_DIRECTORY,
+ )
+ })?;
+ Ok(unsafe { OwnedFd::from_raw_fd(fd) })
+ }
+
+ fn fdreaddir(dir_fd: OwnedFd) -> io::Result<(ReadDir, RawFd)> {
+ let ptr = unsafe { fdopendir(dir_fd.as_raw_fd()) };
+ if ptr.is_null() {
+ return Err(io::Error::last_os_error());
+ }
+ let dirp = Dir(ptr);
+ // file descriptor is automatically closed by libc::closedir() now, so give up ownership
+ let new_parent_fd = dir_fd.into_raw_fd();
+ // a valid root is not needed because we do not call any functions involving the full path
+ // of the DirEntrys.
+ let dummy_root = PathBuf::new();
+ Ok((
+ ReadDir {
+ inner: Arc::new(InnerReadDir { dirp, root: dummy_root }),
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox",
+ )))]
+ end_of_stream: false,
+ },
+ new_parent_fd,
+ ))
+ }
+
+ #[cfg(any(
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "haiku",
+ target_os = "vxworks",
+ ))]
+ fn is_dir(_ent: &DirEntry) -> Option<bool> {
+ None
+ }
+
+ #[cfg(not(any(
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "haiku",
+ target_os = "vxworks",
+ )))]
+ fn is_dir(ent: &DirEntry) -> Option<bool> {
+ match ent.entry.d_type {
+ libc::DT_UNKNOWN => None,
+ libc::DT_DIR => Some(true),
+ _ => Some(false),
+ }
+ }
+
+ fn remove_dir_all_recursive(parent_fd: Option<RawFd>, path: &CStr) -> io::Result<()> {
+ // try opening as directory
+ let fd = match openat_nofollow_dironly(parent_fd, &path) {
+ Err(err) if matches!(err.raw_os_error(), Some(libc::ENOTDIR | libc::ELOOP)) => {
+ // not a directory - don't traverse further
+ // (for symlinks, older Linux kernels may return ELOOP instead of ENOTDIR)
+ return match parent_fd {
+ // unlink...
+ Some(parent_fd) => {
+ cvt(unsafe { unlinkat(parent_fd, path.as_ptr(), 0) }).map(drop)
+ }
+ // ...unless this was supposed to be the deletion root directory
+ None => Err(err),
+ };
+ }
+ result => result?,
+ };
+
+ // open the directory passing ownership of the fd
+ let (dir, fd) = fdreaddir(fd)?;
+ for child in dir {
+ let child = child?;
+ let child_name = child.name_cstr();
+ match is_dir(&child) {
+ Some(true) => {
+ remove_dir_all_recursive(Some(fd), child_name)?;
+ }
+ Some(false) => {
+ cvt(unsafe { unlinkat(fd, child_name.as_ptr(), 0) })?;
+ }
+ None => {
+ // POSIX specifies that calling unlink()/unlinkat(..., 0) on a directory can succeed
+ // if the process has the appropriate privileges. This however can causing orphaned
+ // directories requiring an fsck e.g. on Solaris and Illumos. So we try recursing
+ // into it first instead of trying to unlink() it.
+ remove_dir_all_recursive(Some(fd), child_name)?;
+ }
+ }
+ }
+
+ // unlink the directory after removing its contents
+ cvt(unsafe {
+ unlinkat(parent_fd.unwrap_or(libc::AT_FDCWD), path.as_ptr(), libc::AT_REMOVEDIR)
+ })?;
+ Ok(())
+ }
+
+ fn remove_dir_all_modern(p: &Path) -> io::Result<()> {
+ // We cannot just call remove_dir_all_recursive() here because that would not delete a passed
+ // symlink. No need to worry about races, because remove_dir_all_recursive() does not recurse
+ // into symlinks.
+ let attr = lstat(p)?;
+ if attr.file_type().is_symlink() {
+ crate::fs::remove_file(p)
+ } else {
+ remove_dir_all_recursive(None, &cstr(p)?)
+ }
+ }
+
+ #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64"))))]
+ pub fn remove_dir_all(p: &Path) -> io::Result<()> {
+ remove_dir_all_modern(p)
+ }
+
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
+ pub fn remove_dir_all(p: &Path) -> io::Result<()> {
+ if macos_weak::has_openat() {
+ // openat() is available with macOS 10.10+, just like unlinkat() and fdopendir()
+ remove_dir_all_modern(p)
+ } else {
+ // fall back to classic implementation
+ crate::sys_common::fs::remove_dir_all(p)
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs
new file mode 100644
index 000000000..8d5b54021
--- /dev/null
+++ b/library/std/src/sys/unix/futex.rs
@@ -0,0 +1,303 @@
+#![cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+))]
+
+use crate::sync::atomic::AtomicU32;
+use crate::time::Duration;
+
+/// Wait for a futex_wake operation to wake us.
+///
+/// Returns directly if the futex doesn't hold the expected value.
+///
+/// Returns false on timeout, and true in all other cases.
+#[cfg(any(target_os = "linux", target_os = "android", target_os = "freebsd"))]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ use super::time::Timespec;
+ use crate::ptr::null;
+ use crate::sync::atomic::Ordering::Relaxed;
+
+ // Calculate the timeout as an absolute timespec.
+ //
+ // Overflows are rounded up to an infinite timeout (None).
+ let timespec = timeout
+ .and_then(|d| Timespec::now(libc::CLOCK_MONOTONIC).checked_add_duration(&d))
+ .and_then(|t| t.to_timespec());
+
+ loop {
+ // No need to wait if the value already changed.
+ if futex.load(Relaxed) != expected {
+ return true;
+ }
+
+ let r = unsafe {
+ cfg_if::cfg_if! {
+ if #[cfg(target_os = "freebsd")] {
+ // FreeBSD doesn't have futex(), but it has
+ // _umtx_op(UMTX_OP_WAIT_UINT_PRIVATE), which is nearly
+ // identical. It supports absolute timeouts through a flag
+ // in the _umtx_time struct.
+ let umtx_timeout = timespec.map(|t| libc::_umtx_time {
+ _timeout: t,
+ _flags: libc::UMTX_ABSTIME,
+ _clockid: libc::CLOCK_MONOTONIC as u32,
+ });
+ let umtx_timeout_ptr = umtx_timeout.as_ref().map_or(null(), |t| t as *const _);
+ let umtx_timeout_size = umtx_timeout.as_ref().map_or(0, |t| crate::mem::size_of_val(t));
+ libc::_umtx_op(
+ futex as *const AtomicU32 as *mut _,
+ libc::UMTX_OP_WAIT_UINT_PRIVATE,
+ expected as libc::c_ulong,
+ crate::ptr::invalid_mut(umtx_timeout_size),
+ umtx_timeout_ptr as *mut _,
+ )
+ } else if #[cfg(any(target_os = "linux", target_os = "android"))] {
+ // Use FUTEX_WAIT_BITSET rather than FUTEX_WAIT to be able to give an
+ // absolute time rather than a relative time.
+ libc::syscall(
+ libc::SYS_futex,
+ futex as *const AtomicU32,
+ libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG,
+ expected,
+ timespec.as_ref().map_or(null(), |t| t as *const libc::timespec),
+ null::<u32>(), // This argument is unused for FUTEX_WAIT_BITSET.
+ !0u32, // A full bitmask, to make it behave like a regular FUTEX_WAIT.
+ )
+ } else {
+ compile_error!("unknown target_os");
+ }
+ }
+ };
+
+ match (r < 0).then(super::os::errno) {
+ Some(libc::ETIMEDOUT) => return false,
+ Some(libc::EINTR) => continue,
+ _ => return true,
+ }
+ }
+}
+
+/// Wake up one thread that's blocked on futex_wait on this futex.
+///
+/// Returns true if this actually woke up such a thread,
+/// or false if no thread was waiting on this futex.
+///
+/// On some platforms, this always returns false.
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ let ptr = futex as *const AtomicU32;
+ let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
+ unsafe { libc::syscall(libc::SYS_futex, ptr, op, 1) > 0 }
+}
+
+/// Wake up all threads that are waiting on futex_wait on this futex.
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ let ptr = futex as *const AtomicU32;
+ let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
+ unsafe {
+ libc::syscall(libc::SYS_futex, ptr, op, i32::MAX);
+ }
+}
+
+// FreeBSD doesn't tell us how many threads are woken up, so this always returns false.
+#[cfg(target_os = "freebsd")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ use crate::ptr::null_mut;
+ unsafe {
+ libc::_umtx_op(
+ futex as *const AtomicU32 as *mut _,
+ libc::UMTX_OP_WAKE_PRIVATE,
+ 1,
+ null_mut(),
+ null_mut(),
+ )
+ };
+ false
+}
+
+#[cfg(target_os = "freebsd")]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ use crate::ptr::null_mut;
+ unsafe {
+ libc::_umtx_op(
+ futex as *const AtomicU32 as *mut _,
+ libc::UMTX_OP_WAKE_PRIVATE,
+ i32::MAX as libc::c_ulong,
+ null_mut(),
+ null_mut(),
+ )
+ };
+}
+
+#[cfg(target_os = "openbsd")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ use super::time::Timespec;
+ use crate::ptr::{null, null_mut};
+
+ // Overflows are rounded up to an infinite timeout (None).
+ let timespec = timeout
+ .and_then(|d| Timespec::zero().checked_add_duration(&d))
+ .and_then(|t| t.to_timespec());
+
+ let r = unsafe {
+ libc::futex(
+ futex as *const AtomicU32 as *mut u32,
+ libc::FUTEX_WAIT,
+ expected as i32,
+ timespec.as_ref().map_or(null(), |t| t as *const libc::timespec),
+ null_mut(),
+ )
+ };
+
+ r == 0 || super::os::errno() != libc::ETIMEDOUT
+}
+
+#[cfg(target_os = "openbsd")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ use crate::ptr::{null, null_mut};
+ unsafe {
+ libc::futex(futex as *const AtomicU32 as *mut u32, libc::FUTEX_WAKE, 1, null(), null_mut())
+ > 0
+ }
+}
+
+#[cfg(target_os = "openbsd")]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ use crate::ptr::{null, null_mut};
+ unsafe {
+ libc::futex(
+ futex as *const AtomicU32 as *mut u32,
+ libc::FUTEX_WAKE,
+ i32::MAX,
+ null(),
+ null_mut(),
+ );
+ }
+}
+
+#[cfg(target_os = "dragonfly")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ // A timeout of 0 means infinite.
+ // We round smaller timeouts up to 1 millisecond.
+ // Overflows are rounded up to an infinite timeout.
+ let timeout_ms =
+ timeout.and_then(|d| Some(i32::try_from(d.as_millis()).ok()?.max(1))).unwrap_or(0);
+
+ let r = unsafe {
+ libc::umtx_sleep(futex as *const AtomicU32 as *const i32, expected as i32, timeout_ms)
+ };
+
+ r == 0 || super::os::errno() != libc::ETIMEDOUT
+}
+
+// DragonflyBSD doesn't tell us how many threads are woken up, so this always returns false.
+#[cfg(target_os = "dragonfly")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, 1) };
+ false
+}
+
+#[cfg(target_os = "dragonfly")]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe { libc::umtx_wakeup(futex as *const AtomicU32 as *const i32, i32::MAX) };
+}
+
+#[cfg(target_os = "emscripten")]
+extern "C" {
+ fn emscripten_futex_wake(addr: *const AtomicU32, count: libc::c_int) -> libc::c_int;
+ fn emscripten_futex_wait(
+ addr: *const AtomicU32,
+ val: libc::c_uint,
+ max_wait_ms: libc::c_double,
+ ) -> libc::c_int;
+}
+
+#[cfg(target_os = "emscripten")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ unsafe {
+ emscripten_futex_wait(
+ futex,
+ expected,
+ timeout.map_or(f64::INFINITY, |d| d.as_secs_f64() * 1000.0),
+ ) != -libc::ETIMEDOUT
+ }
+}
+
+#[cfg(target_os = "emscripten")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { emscripten_futex_wake(futex, 1) > 0 }
+}
+
+#[cfg(target_os = "emscripten")]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe { emscripten_futex_wake(futex, i32::MAX) };
+}
+
+#[cfg(target_os = "fuchsia")]
+pub mod zircon {
+ pub type zx_futex_t = crate::sync::atomic::AtomicU32;
+ pub type zx_handle_t = u32;
+ pub type zx_status_t = i32;
+ pub type zx_time_t = i64;
+
+ pub const ZX_HANDLE_INVALID: zx_handle_t = 0;
+
+ pub const ZX_TIME_INFINITE: zx_time_t = zx_time_t::MAX;
+
+ pub const ZX_OK: zx_status_t = 0;
+ pub const ZX_ERR_INVALID_ARGS: zx_status_t = -10;
+ pub const ZX_ERR_BAD_HANDLE: zx_status_t = -11;
+ pub const ZX_ERR_WRONG_TYPE: zx_status_t = -12;
+ pub const ZX_ERR_BAD_STATE: zx_status_t = -20;
+ pub const ZX_ERR_TIMED_OUT: zx_status_t = -21;
+
+ extern "C" {
+ pub fn zx_clock_get_monotonic() -> zx_time_t;
+ pub fn zx_futex_wait(
+ value_ptr: *const zx_futex_t,
+ current_value: zx_futex_t,
+ new_futex_owner: zx_handle_t,
+ deadline: zx_time_t,
+ ) -> zx_status_t;
+ pub fn zx_futex_wake(value_ptr: *const zx_futex_t, wake_count: u32) -> zx_status_t;
+ pub fn zx_futex_wake_single_owner(value_ptr: *const zx_futex_t) -> zx_status_t;
+ pub fn zx_thread_self() -> zx_handle_t;
+ }
+}
+
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ use crate::convert::TryFrom;
+
+ // Sleep forever if the timeout is longer than fits in a i64.
+ let deadline = timeout
+ .and_then(|d| {
+ i64::try_from(d.as_nanos())
+ .ok()?
+ .checked_add(unsafe { zircon::zx_clock_get_monotonic() })
+ })
+ .unwrap_or(zircon::ZX_TIME_INFINITE);
+
+ unsafe {
+ zircon::zx_futex_wait(futex, AtomicU32::new(expected), zircon::ZX_HANDLE_INVALID, deadline)
+ != zircon::ZX_ERR_TIMED_OUT
+ }
+}
+
+// Fuchsia doesn't tell us how many threads are woken up, so this always returns false.
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { zircon::zx_futex_wake(futex, 1) };
+ false
+}
+
+#[cfg(target_os = "fuchsia")]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe { zircon::zx_futex_wake(futex, u32::MAX) };
+}
diff --git a/library/std/src/sys/unix/io.rs b/library/std/src/sys/unix/io.rs
new file mode 100644
index 000000000..deb5ee76b
--- /dev/null
+++ b/library/std/src/sys/unix/io.rs
@@ -0,0 +1,76 @@
+use crate::marker::PhantomData;
+use crate::slice;
+
+use libc::{c_void, iovec};
+
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct IoSlice<'a> {
+ vec: iovec,
+ _p: PhantomData<&'a [u8]>,
+}
+
+impl<'a> IoSlice<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ IoSlice {
+ vec: iovec { iov_base: buf.as_ptr() as *mut u8 as *mut c_void, iov_len: buf.len() },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.iov_len < n {
+ panic!("advancing IoSlice beyond its length");
+ }
+
+ unsafe {
+ self.vec.iov_len -= n;
+ self.vec.iov_base = self.vec.iov_base.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+}
+
+#[repr(transparent)]
+pub struct IoSliceMut<'a> {
+ vec: iovec,
+ _p: PhantomData<&'a mut [u8]>,
+}
+
+impl<'a> IoSliceMut<'a> {
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ IoSliceMut {
+ vec: iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len() },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.iov_len < n {
+ panic!("advancing IoSliceMut beyond its length");
+ }
+
+ unsafe {
+ self.vec.iov_len -= n;
+ self.vec.iov_base = self.vec.iov_base.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
+ }
+}
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
new file mode 100644
index 000000000..8f7abb55e
--- /dev/null
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -0,0 +1,686 @@
+//! This module contains specializations that can offload `io::copy()` operations on file descriptor
+//! containing types (`File`, `TcpStream`, etc.) to more efficient syscalls than `read(2)` and `write(2)`.
+//!
+//! Specialization is only applied to wholly std-owned types so that user code can't observe
+//! that the `Read` and `Write` traits are not used.
+//!
+//! Since a copy operation involves a reader and writer side where each can consist of different types
+//! and also involve generic wrappers (e.g. `Take`, `BufReader`) it is not practical to specialize
+//! a single method on all possible combinations.
+//!
+//! Instead readers and writers are handled separately by the `CopyRead` and `CopyWrite` specialization
+//! traits and then specialized on by the `Copier::copy` method.
+//!
+//! `Copier` uses the specialization traits to unpack the underlying file descriptors and
+//! additional prerequisites and constraints imposed by the wrapper types.
+//!
+//! Once it has obtained all necessary pieces and brought any wrapper types into a state where they
+//! can be safely bypassed it will attempt to use the `copy_file_range(2)`,
+//! `sendfile(2)` or `splice(2)` syscalls to move data directly between file descriptors.
+//! Since those syscalls have requirements that cannot be fully checked in advance and
+//! gathering additional information about file descriptors would require additional syscalls
+//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to
+//! figure out which one works and and falls back to the generic read-write copy loop if none of them
+//! does.
+//! Once a working syscall is found for a pair of file descriptors it will be called in a loop
+//! until the copy operation is completed.
+//!
+//! Advantages of using these syscalls:
+//!
+//! * fewer context switches since reads and writes are coalesced into a single syscall
+//! and more bytes are transferred per syscall. This translates to higher throughput
+//! and fewer CPU cycles, at least for sufficiently large transfers to amortize the initial probing.
+//! * `copy_file_range` creates reflink copies on CoW filesystems, thus moving less data and
+//! consuming less disk space
+//! * `sendfile` and `splice` can perform zero-copy IO under some circumstances while
+//! a naive copy loop would move every byte through the CPU.
+//!
+//! Drawbacks:
+//!
+//! * copy operations smaller than the default buffer size can under some circumstances, especially
+//! on older kernels, incur more syscalls than the naive approach would. As mentioned above
+//! the syscall selection is guided by hints to minimize this possibility but they are not perfect.
+//! * optimizations only apply to std types. If a user adds a custom wrapper type, e.g. to report
+//! progress, they can hit a performance cliff.
+//! * complexity
+
+use crate::cmp::min;
+use crate::fs::{File, Metadata};
+use crate::io::copy::generic_copy;
+use crate::io::{
+ BufRead, BufReader, BufWriter, Error, Read, Result, StderrLock, StdinLock, StdoutLock, Take,
+ Write,
+};
+use crate::mem::ManuallyDrop;
+use crate::net::TcpStream;
+use crate::os::unix::fs::FileTypeExt;
+use crate::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use crate::os::unix::net::UnixStream;
+use crate::process::{ChildStderr, ChildStdin, ChildStdout};
+use crate::ptr;
+use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
+use crate::sys::cvt;
+use crate::sys::weak::syscall;
+use libc::{EBADF, EINVAL, ENOSYS, EOPNOTSUPP, EOVERFLOW, EPERM, EXDEV};
+
+#[cfg(test)]
+mod tests;
+
+pub(crate) fn copy_spec<R: Read + ?Sized, W: Write + ?Sized>(
+ read: &mut R,
+ write: &mut W,
+) -> Result<u64> {
+ let copier = Copier { read, write };
+ SpecCopy::copy(copier)
+}
+
+/// This type represents either the inferred `FileType` of a `RawFd` based on the source
+/// type from which it was extracted or the actual metadata
+///
+/// The methods on this type only provide hints, due to `AsRawFd` and `FromRawFd` the inferred
+/// type may be wrong.
+enum FdMeta {
+ /// We obtained the FD from a type that can contain any type of `FileType` and queried the metadata
+ /// because it is cheaper than probing all possible syscalls (reader side)
+ Metadata(Metadata),
+ Socket,
+ Pipe,
+ /// We don't have any metadata, e.g. because the original type was `File` which can represent
+ /// any `FileType` and we did not query the metadata either since it did not seem beneficial
+ /// (writer side)
+ NoneObtained,
+}
+
+impl FdMeta {
+ fn maybe_fifo(&self) -> bool {
+ match self {
+ FdMeta::Metadata(meta) => meta.file_type().is_fifo(),
+ FdMeta::Socket => false,
+ FdMeta::Pipe => true,
+ FdMeta::NoneObtained => true,
+ }
+ }
+
+ fn potential_sendfile_source(&self) -> bool {
+ match self {
+ // procfs erroneously shows 0 length on non-empty readable files.
+ // and if a file is truly empty then a `read` syscall will determine that and skip the write syscall
+ // thus there would be benefit from attempting sendfile
+ FdMeta::Metadata(meta)
+ if meta.file_type().is_file() && meta.len() > 0
+ || meta.file_type().is_block_device() =>
+ {
+ true
+ }
+ _ => false,
+ }
+ }
+
+ fn copy_file_range_candidate(&self) -> bool {
+ match self {
+ // copy_file_range will fail on empty procfs files. `read` can determine whether EOF has been reached
+ // without extra cost and skip the write, thus there is no benefit in attempting copy_file_range
+ FdMeta::Metadata(meta) if meta.is_file() && meta.len() > 0 => true,
+ FdMeta::NoneObtained => true,
+ _ => false,
+ }
+ }
+}
+
+struct CopyParams(FdMeta, Option<RawFd>);
+
+struct Copier<'a, 'b, R: Read + ?Sized, W: Write + ?Sized> {
+ read: &'a mut R,
+ write: &'b mut W,
+}
+
+trait SpecCopy {
+ fn copy(self) -> Result<u64>;
+}
+
+impl<R: Read + ?Sized, W: Write + ?Sized> SpecCopy for Copier<'_, '_, R, W> {
+ default fn copy(self) -> Result<u64> {
+ generic_copy(self.read, self.write)
+ }
+}
+
+impl<R: CopyRead, W: CopyWrite> SpecCopy for Copier<'_, '_, R, W> {
+ fn copy(self) -> Result<u64> {
+ let (reader, writer) = (self.read, self.write);
+ let r_cfg = reader.properties();
+ let w_cfg = writer.properties();
+
+ // before direct operations on file descriptors ensure that all source and sink buffers are empty
+ let mut flush = || -> crate::io::Result<u64> {
+ let bytes = reader.drain_to(writer, u64::MAX)?;
+ // BufWriter buffered bytes have already been accounted for in earlier write() calls
+ writer.flush()?;
+ Ok(bytes)
+ };
+
+ let mut written = 0u64;
+
+ if let (CopyParams(input_meta, Some(readfd)), CopyParams(output_meta, Some(writefd))) =
+ (r_cfg, w_cfg)
+ {
+ written += flush()?;
+ let max_write = reader.min_limit();
+
+ if input_meta.copy_file_range_candidate() && output_meta.copy_file_range_candidate() {
+ let result = copy_regular_files(readfd, writefd, max_write);
+ result.update_take(reader);
+
+ match result {
+ CopyResult::Ended(bytes_copied) => return Ok(bytes_copied + written),
+ CopyResult::Error(e, _) => return Err(e),
+ CopyResult::Fallback(bytes) => written += bytes,
+ }
+ }
+
+ // on modern kernels sendfile can copy from any mmapable type (some but not all regular files and block devices)
+ // to any writable file descriptor. On older kernels the writer side can only be a socket.
+ // So we just try and fallback if needed.
+ // If current file offsets + write sizes overflow it may also fail, we do not try to fix that and instead
+ // fall back to the generic copy loop.
+ if input_meta.potential_sendfile_source() {
+ let result = sendfile_splice(SpliceMode::Sendfile, readfd, writefd, max_write);
+ result.update_take(reader);
+
+ match result {
+ CopyResult::Ended(bytes_copied) => return Ok(bytes_copied + written),
+ CopyResult::Error(e, _) => return Err(e),
+ CopyResult::Fallback(bytes) => written += bytes,
+ }
+ }
+
+ if input_meta.maybe_fifo() || output_meta.maybe_fifo() {
+ let result = sendfile_splice(SpliceMode::Splice, readfd, writefd, max_write);
+ result.update_take(reader);
+
+ match result {
+ CopyResult::Ended(bytes_copied) => return Ok(bytes_copied + written),
+ CopyResult::Error(e, _) => return Err(e),
+ CopyResult::Fallback(0) => { /* use the fallback below */ }
+ CopyResult::Fallback(_) => {
+ unreachable!("splice should not return > 0 bytes on the fallback path")
+ }
+ }
+ }
+ }
+
+ // fallback if none of the more specialized syscalls wants to work with these file descriptors
+ match generic_copy(reader, writer) {
+ Ok(bytes) => Ok(bytes + written),
+ err => err,
+ }
+ }
+}
+
+#[rustc_specialization_trait]
+trait CopyRead: Read {
+ /// Implementations that contain buffers (i.e. `BufReader`) must transfer data from their internal
+ /// buffers into `writer` until either the buffers are emptied or `limit` bytes have been
+ /// transferred, whichever occurs sooner.
+ /// If nested buffers are present the outer buffers must be drained first.
+ ///
+ /// This is necessary to directly bypass the wrapper types while preserving the data order
+ /// when operating directly on the underlying file descriptors.
+ fn drain_to<W: Write>(&mut self, _writer: &mut W, _limit: u64) -> Result<u64> {
+ Ok(0)
+ }
+
+ /// Updates `Take` wrappers to remove the number of bytes copied.
+ fn taken(&mut self, _bytes: u64) {}
+
+ /// The minimum of the limit of all `Take<_>` wrappers, `u64::MAX` otherwise.
+ /// This method does not account for data `BufReader` buffers and would underreport
+ /// the limit of a `Take<BufReader<Take<_>>>` type. Thus its result is only valid
+ /// after draining the buffers via `drain_to`.
+ fn min_limit(&self) -> u64 {
+ u64::MAX
+ }
+
+ /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary.
+ fn properties(&self) -> CopyParams;
+}
+
+#[rustc_specialization_trait]
+trait CopyWrite: Write {
+ /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary.
+ fn properties(&self) -> CopyParams;
+}
+
+impl<T> CopyRead for &mut T
+where
+ T: CopyRead,
+{
+ fn drain_to<W: Write>(&mut self, writer: &mut W, limit: u64) -> Result<u64> {
+ (**self).drain_to(writer, limit)
+ }
+
+ fn taken(&mut self, bytes: u64) {
+ (**self).taken(bytes);
+ }
+
+ fn min_limit(&self) -> u64 {
+ (**self).min_limit()
+ }
+
+ fn properties(&self) -> CopyParams {
+ (**self).properties()
+ }
+}
+
+impl<T> CopyWrite for &mut T
+where
+ T: CopyWrite,
+{
+ fn properties(&self) -> CopyParams {
+ (**self).properties()
+ }
+}
+
+impl CopyRead for File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for &File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(*self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for &File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for &TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for &TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for UnixStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for &UnixStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for UnixStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for &UnixStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for ChildStdin {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for ChildStdout {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for ChildStderr {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for StdinLock<'_> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let buf_reader = self.as_mut_buf();
+ let buf = buf_reader.buffer();
+ let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))];
+ let bytes_drained = buf.len();
+ writer.write_all(buf)?;
+ buf_reader.consume(bytes_drained);
+
+ Ok(bytes_drained as u64)
+ }
+
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for StdoutLock<'_> {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for StderrLock<'_> {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl<T: CopyRead> CopyRead for Take<T> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let local_limit = self.limit();
+ let combined_limit = min(outer_limit, local_limit);
+ let bytes_drained = self.get_mut().drain_to(writer, combined_limit)?;
+ // update limit since read() was bypassed
+ self.set_limit(local_limit - bytes_drained);
+
+ Ok(bytes_drained)
+ }
+
+ fn taken(&mut self, bytes: u64) {
+ self.set_limit(self.limit() - bytes);
+ self.get_mut().taken(bytes);
+ }
+
+ fn min_limit(&self) -> u64 {
+ min(Take::limit(self), self.get_ref().min_limit())
+ }
+
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+impl<T: CopyRead> CopyRead for BufReader<T> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let buf = self.buffer();
+ let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))];
+ let bytes = buf.len();
+ writer.write_all(buf)?;
+ self.consume(bytes);
+
+ let remaining = outer_limit - bytes as u64;
+
+ // in case of nested bufreaders we also need to drain the ones closer to the source
+ let inner_bytes = self.get_mut().drain_to(writer, remaining)?;
+
+ Ok(bytes as u64 + inner_bytes)
+ }
+
+ fn taken(&mut self, bytes: u64) {
+ self.get_mut().taken(bytes);
+ }
+
+ fn min_limit(&self) -> u64 {
+ self.get_ref().min_limit()
+ }
+
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+impl<T: CopyWrite> CopyWrite for BufWriter<T> {
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+fn fd_to_meta<T: AsRawFd>(fd: &T) -> FdMeta {
+ let fd = fd.as_raw_fd();
+ let file: ManuallyDrop<File> = ManuallyDrop::new(unsafe { File::from_raw_fd(fd) });
+ match file.metadata() {
+ Ok(meta) => FdMeta::Metadata(meta),
+ Err(_) => FdMeta::NoneObtained,
+ }
+}
+
+pub(super) enum CopyResult {
+ Ended(u64),
+ Error(Error, u64),
+ Fallback(u64),
+}
+
+impl CopyResult {
+ fn update_take(&self, reader: &mut impl CopyRead) {
+ match *self {
+ CopyResult::Fallback(bytes)
+ | CopyResult::Ended(bytes)
+ | CopyResult::Error(_, bytes) => reader.taken(bytes),
+ }
+ }
+}
+
+/// Invalid file descriptor.
+///
+/// Valid file descriptors are guaranteed to be positive numbers (see `open()` manpage)
+/// while negative values are used to indicate errors.
+/// Thus -1 will never be overlap with a valid open file.
+const INVALID_FD: RawFd = -1;
+
+/// Linux-specific implementation that will attempt to use copy_file_range for copy offloading.
+/// As the name says, it only works on regular files.
+///
+/// Callers must handle fallback to a generic copy loop.
+/// `Fallback` may indicate non-zero number of bytes already written
+/// if one of the files' cursor +`max_len` would exceed u64::MAX (`EOVERFLOW`).
+pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
+ use crate::cmp;
+
+ const NOT_PROBED: u8 = 0;
+ const UNAVAILABLE: u8 = 1;
+ const AVAILABLE: u8 = 2;
+
+ // Kernel prior to 4.5 don't have copy_file_range
+ // We store the availability in a global to avoid unnecessary syscalls
+ static HAS_COPY_FILE_RANGE: AtomicU8 = AtomicU8::new(NOT_PROBED);
+
+ syscall! {
+ fn copy_file_range(
+ fd_in: libc::c_int,
+ off_in: *mut libc::loff_t,
+ fd_out: libc::c_int,
+ off_out: *mut libc::loff_t,
+ len: libc::size_t,
+ flags: libc::c_uint
+ ) -> libc::ssize_t
+ }
+
+ match HAS_COPY_FILE_RANGE.load(Ordering::Relaxed) {
+ NOT_PROBED => {
+ // EPERM can indicate seccomp filters or an immutable file.
+ // To distinguish these cases we probe with invalid file descriptors which should result in EBADF if the syscall is supported
+ // and some other error (ENOSYS or EPERM) if it's not available
+ let result = unsafe {
+ cvt(copy_file_range(INVALID_FD, ptr::null_mut(), INVALID_FD, ptr::null_mut(), 1, 0))
+ };
+
+ if matches!(result.map_err(|e| e.raw_os_error()), Err(Some(EBADF))) {
+ HAS_COPY_FILE_RANGE.store(AVAILABLE, Ordering::Relaxed);
+ } else {
+ HAS_COPY_FILE_RANGE.store(UNAVAILABLE, Ordering::Relaxed);
+ return CopyResult::Fallback(0);
+ }
+ }
+ UNAVAILABLE => return CopyResult::Fallback(0),
+ _ => {}
+ };
+
+ let mut written = 0u64;
+ while written < max_len {
+ let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
+ // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
+ // this allows us to copy large chunks without hitting EOVERFLOW,
+ // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
+ let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
+ let copy_result = unsafe {
+ // We actually don't have to adjust the offsets,
+ // because copy_file_range adjusts the file offset automatically
+ cvt(copy_file_range(reader, ptr::null_mut(), writer, ptr::null_mut(), bytes_to_copy, 0))
+ };
+
+ match copy_result {
+ Ok(0) if written == 0 => {
+ // fallback to work around several kernel bugs where copy_file_range will fail to
+ // copy any bytes and return 0 instead of an error if
+ // - reading virtual files from the proc filesystem which appear to have 0 size
+ // but are not empty. noted in coreutils to affect kernels at least up to 5.6.19.
+ // - copying from an overlay filesystem in docker. reported to occur on fedora 32.
+ return CopyResult::Fallback(0);
+ }
+ Ok(0) => return CopyResult::Ended(written), // reached EOF
+ Ok(ret) => written += ret as u64,
+ Err(err) => {
+ return match err.raw_os_error() {
+ // when file offset + max_length > u64::MAX
+ Some(EOVERFLOW) => CopyResult::Fallback(written),
+ Some(ENOSYS | EXDEV | EINVAL | EPERM | EOPNOTSUPP | EBADF) if written == 0 => {
+ // Try fallback io::copy if either:
+ // - Kernel version is < 4.5 (ENOSYS¹)
+ // - Files are mounted on different fs (EXDEV)
+ // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP)
+ // - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM)
+ // - copy_file_range cannot be used with pipes or device nodes (EINVAL)
+ // - the writer fd was opened with O_APPEND (EBADF²)
+ // and no bytes were written successfully yet. (All these errnos should
+ // not be returned if something was already written, but they happen in
+ // the wild, see #91152.)
+ //
+ // ¹ these cases should be detected by the initial probe but we handle them here
+ // anyway in case syscall interception changes during runtime
+ // ² actually invalid file descriptors would cause this too, but in that case
+ // the fallback code path is expected to encounter the same error again
+ CopyResult::Fallback(0)
+ }
+ _ => CopyResult::Error(err, written),
+ };
+ }
+ }
+ }
+ CopyResult::Ended(written)
+}
+
+#[derive(PartialEq)]
+enum SpliceMode {
+ Sendfile,
+ Splice,
+}
+
+/// performs splice or sendfile between file descriptors
+/// Does _not_ fall back to a generic copy loop.
+fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> CopyResult {
+ static HAS_SENDFILE: AtomicBool = AtomicBool::new(true);
+ static HAS_SPLICE: AtomicBool = AtomicBool::new(true);
+
+ // Android builds use feature level 14, but the libc wrapper for splice is
+ // gated on feature level 21+, so we have to invoke the syscall directly.
+ #[cfg(target_os = "android")]
+ syscall! {
+ fn splice(
+ srcfd: libc::c_int,
+ src_offset: *const i64,
+ dstfd: libc::c_int,
+ dst_offset: *const i64,
+ len: libc::size_t,
+ flags: libc::c_int
+ ) -> libc::ssize_t
+ }
+
+ #[cfg(target_os = "linux")]
+ use libc::splice;
+
+ match mode {
+ SpliceMode::Sendfile if !HAS_SENDFILE.load(Ordering::Relaxed) => {
+ return CopyResult::Fallback(0);
+ }
+ SpliceMode::Splice if !HAS_SPLICE.load(Ordering::Relaxed) => {
+ return CopyResult::Fallback(0);
+ }
+ _ => (),
+ }
+
+ let mut written = 0u64;
+ while written < len {
+ // according to its manpage that's the maximum size sendfile() will copy per invocation
+ let chunk_size = crate::cmp::min(len - written, 0x7ffff000_u64) as usize;
+
+ let result = match mode {
+ SpliceMode::Sendfile => {
+ cvt(unsafe { libc::sendfile(writer, reader, ptr::null_mut(), chunk_size) })
+ }
+ SpliceMode::Splice => cvt(unsafe {
+ splice(reader, ptr::null_mut(), writer, ptr::null_mut(), chunk_size, 0)
+ }),
+ };
+
+ match result {
+ Ok(0) => break, // EOF
+ Ok(ret) => written += ret as u64,
+ Err(err) => {
+ return match err.raw_os_error() {
+ Some(ENOSYS | EPERM) => {
+ // syscall not supported (ENOSYS)
+ // syscall is disallowed, e.g. by seccomp (EPERM)
+ match mode {
+ SpliceMode::Sendfile => HAS_SENDFILE.store(false, Ordering::Relaxed),
+ SpliceMode::Splice => HAS_SPLICE.store(false, Ordering::Relaxed),
+ }
+ assert_eq!(written, 0);
+ CopyResult::Fallback(0)
+ }
+ Some(EINVAL) => {
+ // splice/sendfile do not support this particular file descriptor (EINVAL)
+ assert_eq!(written, 0);
+ CopyResult::Fallback(0)
+ }
+ Some(os_err) if mode == SpliceMode::Sendfile && os_err == EOVERFLOW => {
+ CopyResult::Fallback(written)
+ }
+ _ => CopyResult::Error(err, written),
+ };
+ }
+ }
+ }
+ CopyResult::Ended(written)
+}
diff --git a/library/std/src/sys/unix/kernel_copy/tests.rs b/library/std/src/sys/unix/kernel_copy/tests.rs
new file mode 100644
index 000000000..3fe849e23
--- /dev/null
+++ b/library/std/src/sys/unix/kernel_copy/tests.rs
@@ -0,0 +1,270 @@
+use crate::fs::OpenOptions;
+use crate::io;
+use crate::io::Result;
+use crate::io::SeekFrom;
+use crate::io::{BufRead, Read, Seek, Write};
+use crate::os::unix::io::AsRawFd;
+use crate::sys_common::io::test::tmpdir;
+
+#[test]
+fn copy_specialization() -> Result<()> {
+ use crate::io::{BufReader, BufWriter};
+
+ let tmp_path = tmpdir();
+ let source_path = tmp_path.join("copy-spec.source");
+ let sink_path = tmp_path.join("copy-spec.sink");
+
+ let result: Result<()> = try {
+ let mut source = crate::fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(&source_path)?;
+ source.write_all(b"abcdefghiklmnopqr")?;
+ source.seek(SeekFrom::Start(8))?;
+ let mut source = BufReader::with_capacity(8, source.take(5));
+ source.fill_buf()?;
+ assert_eq!(source.buffer(), b"iklmn");
+ source.get_mut().set_limit(6);
+ source.get_mut().get_mut().seek(SeekFrom::Start(1))?; // "bcdefg"
+ let mut source = source.take(10); // "iklmnbcdef"
+
+ let mut sink = crate::fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(&sink_path)?;
+ sink.write_all(b"000000")?;
+ let mut sink = BufWriter::with_capacity(5, sink);
+ sink.write_all(b"wxyz")?;
+ assert_eq!(sink.buffer(), b"wxyz");
+
+ let copied = crate::io::copy(&mut source, &mut sink)?;
+ assert_eq!(copied, 10, "copy obeyed limit imposed by Take");
+ assert_eq!(sink.buffer().len(), 0, "sink buffer was flushed");
+ assert_eq!(source.limit(), 0, "outer Take was exhausted");
+ assert_eq!(source.get_ref().buffer().len(), 0, "source buffer should be drained");
+ assert_eq!(
+ source.get_ref().get_ref().limit(),
+ 1,
+ "inner Take allowed reading beyond end of file, some bytes should be left"
+ );
+
+ let mut sink = sink.into_inner()?;
+ sink.seek(SeekFrom::Start(0))?;
+ let mut copied = Vec::new();
+ sink.read_to_end(&mut copied)?;
+ assert_eq!(&copied, b"000000wxyziklmnbcdef");
+ };
+
+ let rm1 = crate::fs::remove_file(source_path);
+ let rm2 = crate::fs::remove_file(sink_path);
+
+ result.and(rm1).and(rm2)
+}
+
+#[test]
+fn copies_append_mode_sink() -> Result<()> {
+ let tmp_path = tmpdir();
+ let source_path = tmp_path.join("copies_append_mode.source");
+ let sink_path = tmp_path.join("copies_append_mode.sink");
+ let mut source =
+ OpenOptions::new().create(true).truncate(true).write(true).read(true).open(&source_path)?;
+ write!(source, "not empty")?;
+ source.seek(SeekFrom::Start(0))?;
+ let mut sink = OpenOptions::new().create(true).append(true).open(&sink_path)?;
+
+ let copied = crate::io::copy(&mut source, &mut sink)?;
+
+ assert_eq!(copied, 9);
+
+ Ok(())
+}
+
+#[bench]
+fn bench_file_to_file_copy(b: &mut test::Bencher) {
+ const BYTES: usize = 128 * 1024;
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("file-copy-bench-src");
+ let mut src = crate::fs::OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(src_path)
+ .unwrap();
+ src.write(&vec![0u8; BYTES]).unwrap();
+
+ let sink_path = temp_path.join("file-copy-bench-sink");
+ let mut sink = crate::fs::OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .write(true)
+ .open(sink_path)
+ .unwrap();
+
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ src.seek(SeekFrom::Start(0)).unwrap();
+ sink.seek(SeekFrom::Start(0)).unwrap();
+ assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap());
+ });
+}
+
+#[bench]
+fn bench_file_to_socket_copy(b: &mut test::Bencher) {
+ const BYTES: usize = 128 * 1024;
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("pipe-copy-bench-src");
+ let mut src = OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(src_path)
+ .unwrap();
+ src.write(&vec![0u8; BYTES]).unwrap();
+
+ let sink_drainer = crate::net::TcpListener::bind("localhost:0").unwrap();
+ let mut sink = crate::net::TcpStream::connect(sink_drainer.local_addr().unwrap()).unwrap();
+ let mut sink_drainer = sink_drainer.accept().unwrap().0;
+
+ crate::thread::spawn(move || {
+ let mut sink_buf = vec![0u8; 1024 * 1024];
+ loop {
+ sink_drainer.read(&mut sink_buf[..]).unwrap();
+ }
+ });
+
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ src.seek(SeekFrom::Start(0)).unwrap();
+ assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap());
+ });
+}
+
+#[bench]
+fn bench_file_to_uds_copy(b: &mut test::Bencher) {
+ const BYTES: usize = 128 * 1024;
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("uds-copy-bench-src");
+ let mut src = OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(src_path)
+ .unwrap();
+ src.write(&vec![0u8; BYTES]).unwrap();
+
+ let (mut sink, mut sink_drainer) = crate::os::unix::net::UnixStream::pair().unwrap();
+
+ crate::thread::spawn(move || {
+ let mut sink_buf = vec![0u8; 1024 * 1024];
+ loop {
+ sink_drainer.read(&mut sink_buf[..]).unwrap();
+ }
+ });
+
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ src.seek(SeekFrom::Start(0)).unwrap();
+ assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap());
+ });
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+#[bench]
+fn bench_socket_pipe_socket_copy(b: &mut test::Bencher) {
+ use super::CopyResult;
+ use crate::io::ErrorKind;
+ use crate::process::{ChildStdin, ChildStdout};
+ use crate::sys_common::FromInner;
+
+ let (read_end, write_end) = crate::sys::pipe::anon_pipe().unwrap();
+
+ let mut read_end = ChildStdout::from_inner(read_end);
+ let write_end = ChildStdin::from_inner(write_end);
+
+ let acceptor = crate::net::TcpListener::bind("localhost:0").unwrap();
+ let mut remote_end = crate::net::TcpStream::connect(acceptor.local_addr().unwrap()).unwrap();
+
+ let local_end = crate::sync::Arc::new(acceptor.accept().unwrap().0);
+
+ // the data flow in this benchmark:
+ //
+ // socket(tx) local_source
+ // remote_end (write) +--------> (splice to)
+ // write_end
+ // +
+ // |
+ // | pipe
+ // v
+ // read_end
+ // remote_end (read) <---------+ (splice to) *
+ // socket(rx) local_end
+ //
+ // * benchmark loop using io::copy
+
+ crate::thread::spawn(move || {
+ let mut sink_buf = vec![0u8; 1024 * 1024];
+ remote_end.set_nonblocking(true).unwrap();
+ loop {
+ match remote_end.write(&mut sink_buf[..]) {
+ Err(err) if err.kind() == ErrorKind::WouldBlock => {}
+ Ok(_) => {}
+ err => {
+ err.expect("write failed");
+ }
+ };
+ match remote_end.read(&mut sink_buf[..]) {
+ Err(err) if err.kind() == ErrorKind::WouldBlock => {}
+ Ok(_) => {}
+ err => {
+ err.expect("read failed");
+ }
+ };
+ }
+ });
+
+ // check that splice works, otherwise the benchmark would hang
+ let probe = super::sendfile_splice(
+ super::SpliceMode::Splice,
+ local_end.as_raw_fd(),
+ write_end.as_raw_fd(),
+ 1,
+ );
+
+ match probe {
+ CopyResult::Ended(1) => {
+ // splice works
+ }
+ _ => {
+ eprintln!("splice failed, skipping benchmark");
+ return;
+ }
+ }
+
+ let local_source = local_end.clone();
+ crate::thread::spawn(move || {
+ loop {
+ super::sendfile_splice(
+ super::SpliceMode::Splice,
+ local_source.as_raw_fd(),
+ write_end.as_raw_fd(),
+ u64::MAX,
+ );
+ }
+ });
+
+ const BYTES: usize = 128 * 1024;
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ assert_eq!(
+ BYTES as u64,
+ io::copy(&mut (&mut read_end).take(BYTES as u64), &mut &*local_end).unwrap()
+ );
+ });
+}
diff --git a/library/std/src/sys/unix/l4re.rs b/library/std/src/sys/unix/l4re.rs
new file mode 100644
index 000000000..996758893
--- /dev/null
+++ b/library/std/src/sys/unix/l4re.rs
@@ -0,0 +1,551 @@
+macro_rules! unimpl {
+ () => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Unsupported,
+ "No networking available on L4Re.",
+ ));
+ };
+}
+
+pub mod net {
+ #![allow(warnings)]
+ use crate::fmt;
+ use crate::io::{self, IoSlice, IoSliceMut};
+ use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+ use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+ use crate::sys::fd::FileDesc;
+ use crate::sys_common::{AsInner, FromInner, IntoInner};
+ use crate::time::Duration;
+
+ #[allow(unused_extern_crates)]
+ pub extern crate libc as netc;
+
+ pub struct Socket(FileDesc);
+ impl Socket {
+ pub fn new(_: &SocketAddr, _: libc::c_int) -> io::Result<Socket> {
+ unimpl!();
+ }
+
+ pub fn new_raw(_: libc::c_int, _: libc::c_int) -> io::Result<Socket> {
+ unimpl!();
+ }
+
+ pub fn new_pair(_: libc::c_int, _: libc::c_int) -> io::Result<(Socket, Socket)> {
+ unimpl!();
+ }
+
+ pub fn connect_timeout(&self, _: &SocketAddr, _: Duration) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn accept(
+ &self,
+ _: *mut libc::sockaddr,
+ _: *mut libc::socklen_t,
+ ) -> io::Result<Socket> {
+ unimpl!();
+ }
+
+ pub fn duplicate(&self) -> io::Result<Socket> {
+ unimpl!();
+ }
+
+ pub fn read(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unimpl!();
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unimpl!();
+ }
+
+ pub fn write(&self, _: &[u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn set_timeout(&self, _: Option<Duration>, _: libc::c_int) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn timeout(&self, _: libc::c_int) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unimpl!();
+ }
+
+ // This is used by sys_common code to abstract over Windows and Unix.
+ pub fn as_raw(&self) -> RawFd {
+ self.as_raw_fd()
+ }
+ }
+
+ impl AsInner<FileDesc> for Socket {
+ fn as_inner(&self) -> &FileDesc {
+ &self.0
+ }
+ }
+
+ impl FromInner<FileDesc> for Socket {
+ fn from_inner(file_desc: FileDesc) -> Socket {
+ Socket(file_desc)
+ }
+ }
+
+ impl IntoInner<FileDesc> for Socket {
+ fn into_inner(self) -> FileDesc {
+ self.0
+ }
+ }
+
+ impl AsFd for Socket {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+ }
+
+ impl AsRawFd for Socket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+ }
+
+ impl IntoRawFd for Socket {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+ }
+
+ impl FromRawFd for Socket {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
+ }
+ }
+
+ pub struct TcpStream {
+ inner: Socket,
+ }
+
+ impl TcpStream {
+ pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ unimpl!();
+ }
+
+ pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> {
+ unimpl!();
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn read(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn write(&self, _: &[u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ unimpl!();
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unimpl!();
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ unimpl!();
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unimpl!();
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unimpl!();
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+ }
+
+ impl FromInner<Socket> for TcpStream {
+ fn from_inner(socket: Socket) -> TcpStream {
+ TcpStream { inner: socket }
+ }
+ }
+
+ impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "No networking support available on L4Re")
+ }
+ }
+
+ pub struct TcpListener {
+ inner: Socket,
+ }
+
+ impl TcpListener {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ unimpl!();
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unimpl!();
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ unimpl!();
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ unimpl!();
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unimpl!();
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unimpl!();
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+ }
+
+ impl FromInner<Socket> for TcpListener {
+ fn from_inner(socket: Socket) -> TcpListener {
+ TcpListener { inner: socket }
+ }
+ }
+
+ impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "No networking support available on L4Re.")
+ }
+ }
+
+ pub struct UdpSocket {
+ inner: Socket,
+ }
+
+ impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unimpl!();
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ unimpl!();
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unimpl!();
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unimpl!();
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unimpl!();
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ unimpl!();
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ unimpl!();
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ unimpl!();
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ unimpl!();
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unimpl!();
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unimpl!();
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unimpl!();
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ unimpl!();
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ unimpl!();
+ }
+ }
+
+ impl FromInner<Socket> for UdpSocket {
+ fn from_inner(socket: Socket) -> UdpSocket {
+ UdpSocket { inner: socket }
+ }
+ }
+
+ impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "No networking support on L4Re available.")
+ }
+ }
+
+ pub struct LookupHost {
+ original: *mut libc::addrinfo,
+ cur: *mut libc::addrinfo,
+ }
+
+ impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ None
+ }
+ }
+
+ impl LookupHost {
+ pub fn port(&self) -> u16 {
+ 0 // unimplemented
+ }
+ }
+
+ unsafe impl Sync for LookupHost {}
+ unsafe impl Send for LookupHost {}
+
+ impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: &str) -> io::Result<LookupHost> {
+ unimpl!();
+ }
+ }
+
+ impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> {
+ unimpl!();
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
new file mode 100644
index 000000000..ce427599c
--- /dev/null
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -0,0 +1,165 @@
+//! A priority inheriting mutex for Fuchsia.
+//!
+//! This is a port of the [mutex in Fuchsia's libsync]. Contrary to the original,
+//! it does not abort the process when reentrant locking is detected, but deadlocks.
+//!
+//! Priority inheritance is achieved by storing the owning thread's handle in an
+//! atomic variable. Fuchsia's futex operations support setting an owner thread
+//! for a futex, which can boost that thread's priority while the futex is waited
+//! upon.
+//!
+//! libsync is licenced under the following BSD-style licence:
+//!
+//! Copyright 2016 The Fuchsia Authors.
+//!
+//! Redistribution and use in source and binary forms, with or without
+//! modification, are permitted provided that the following conditions are
+//! met:
+//!
+//! * Redistributions of source code must retain the above copyright
+//! notice, this list of conditions and the following disclaimer.
+//! * Redistributions in binary form must reproduce the above
+//! copyright notice, this list of conditions and the following
+//! disclaimer in the documentation and/or other materials provided
+//! with the distribution.
+//!
+//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//!
+//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c
+
+use crate::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::futex::zircon::{
+ zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE,
+ ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK,
+ ZX_TIME_INFINITE,
+};
+
+// The lowest two bits of a `zx_handle_t` are always set, so the lowest bit is used to mark the
+// mutex as contested by clearing it.
+const CONTESTED_BIT: u32 = 1;
+// This can never be a valid `zx_handle_t`.
+const UNLOCKED: u32 = 0;
+
+pub type MovableMutex = Mutex;
+
+pub struct Mutex {
+ futex: AtomicU32,
+}
+
+#[inline]
+fn to_state(owner: zx_handle_t) -> u32 {
+ owner
+}
+
+#[inline]
+fn to_owner(state: u32) -> zx_handle_t {
+ state | CONTESTED_BIT
+}
+
+#[inline]
+fn is_contested(state: u32) -> bool {
+ state & CONTESTED_BIT == 0
+}
+
+#[inline]
+fn mark_contested(state: u32) -> u32 {
+ state & !CONTESTED_BIT
+}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex { futex: AtomicU32::new(UNLOCKED) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ let thread_self = zx_thread_self();
+ self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ let thread_self = zx_thread_self();
+ if let Err(state) =
+ self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
+ {
+ self.lock_contested(state, thread_self);
+ }
+ }
+
+ #[cold]
+ fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
+ let owned_state = mark_contested(to_state(thread_self));
+ loop {
+ // Mark the mutex as contested if it is not already.
+ let contested = mark_contested(state);
+ if is_contested(state)
+ || self.futex.compare_exchange(state, contested, Relaxed, Relaxed).is_ok()
+ {
+ // The mutex has been marked as contested, wait for the state to change.
+ unsafe {
+ match zx_futex_wait(
+ &self.futex,
+ AtomicU32::new(contested),
+ to_owner(state),
+ ZX_TIME_INFINITE,
+ ) {
+ ZX_OK | ZX_ERR_BAD_STATE | ZX_ERR_TIMED_OUT => (),
+ // Note that if a thread handle is reused after its associated thread
+ // exits without unlocking the mutex, an arbitrary thread's priority
+ // could be boosted by the wait, but there is currently no way to
+ // prevent that.
+ ZX_ERR_INVALID_ARGS | ZX_ERR_BAD_HANDLE | ZX_ERR_WRONG_TYPE => {
+ panic!(
+ "either the current thread is trying to lock a mutex it has
+ already locked, or the previous owner did not unlock the mutex
+ before exiting"
+ )
+ }
+ error => panic!("unexpected error in zx_futex_wait: {error}"),
+ }
+ }
+ }
+
+ // The state has changed or a wakeup occured, try to lock the mutex.
+ match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) {
+ Ok(_) => return,
+ Err(updated) => state = updated,
+ }
+ }
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ if is_contested(self.futex.swap(UNLOCKED, Release)) {
+ // The woken thread will mark the mutex as contested again,
+ // and return here, waking until there are no waiters left,
+ // in which case this is a noop.
+ self.wake();
+ }
+ }
+
+ #[cold]
+ fn wake(&self) {
+ unsafe {
+ zx_futex_wake_single_owner(&self.futex);
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs
new file mode 100644
index 000000000..c0576c178
--- /dev/null
+++ b/library/std/src/sys/unix/locks/futex_condvar.rs
@@ -0,0 +1,58 @@
+use super::Mutex;
+use crate::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
+use crate::time::Duration;
+
+pub type MovableCondvar = Condvar;
+
+pub struct Condvar {
+ // The value of this atomic is simply incremented on every notification.
+ // This is used by `.wait()` to not miss any notifications after
+ // unlocking the mutex and before waiting for notifications.
+ futex: AtomicU32,
+}
+
+impl Condvar {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { futex: AtomicU32::new(0) }
+ }
+
+ // All the memory orderings here are `Relaxed`,
+ // because synchronization is done by unlocking and locking the mutex.
+
+ pub unsafe fn notify_one(&self) {
+ self.futex.fetch_add(1, Relaxed);
+ futex_wake(&self.futex);
+ }
+
+ pub unsafe fn notify_all(&self) {
+ self.futex.fetch_add(1, Relaxed);
+ futex_wake_all(&self.futex);
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ self.wait_optional_timeout(mutex, None);
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) -> bool {
+ self.wait_optional_timeout(mutex, Some(timeout))
+ }
+
+ unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout: Option<Duration>) -> bool {
+ // Examine the notification counter _before_ we unlock the mutex.
+ let futex_value = self.futex.load(Relaxed);
+
+ // Unlock the mutex before going to sleep.
+ mutex.unlock();
+
+ // Wait, but only if there hasn't been any
+ // notification since we unlocked the mutex.
+ let r = futex_wait(&self.futex, futex_value, timeout);
+
+ // Lock the mutex again.
+ mutex.lock();
+
+ r
+ }
+}
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
new file mode 100644
index 000000000..99ba86e5f
--- /dev/null
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -0,0 +1,101 @@
+use crate::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::futex::{futex_wait, futex_wake};
+
+pub type MovableMutex = Mutex;
+
+pub struct Mutex {
+ /// 0: unlocked
+ /// 1: locked, no other threads waiting
+ /// 2: locked, and other threads waiting (contended)
+ futex: AtomicU32,
+}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { futex: AtomicU32::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
+ self.lock_contended();
+ }
+ }
+
+ #[cold]
+ fn lock_contended(&self) {
+ // Spin first to speed things up if the lock is released quickly.
+ let mut state = self.spin();
+
+ // If it's unlocked now, attempt to take the lock
+ // without marking it as contended.
+ if state == 0 {
+ match self.futex.compare_exchange(0, 1, Acquire, Relaxed) {
+ Ok(_) => return, // Locked!
+ Err(s) => state = s,
+ }
+ }
+
+ loop {
+ // Put the lock in contended state.
+ // We avoid an unnecessary write if it as already set to 2,
+ // to be friendlier for the caches.
+ if state != 2 && self.futex.swap(2, Acquire) == 0 {
+ // We changed it from 0 to 2, so we just succesfully locked it.
+ return;
+ }
+
+ // Wait for the futex to change state, assuming it is still 2.
+ futex_wait(&self.futex, 2, None);
+
+ // Spin again after waking up.
+ state = self.spin();
+ }
+ }
+
+ fn spin(&self) -> u32 {
+ let mut spin = 100;
+ loop {
+ // We only use `load` (and not `swap` or `compare_exchange`)
+ // while spinning, to be easier on the caches.
+ let state = self.futex.load(Relaxed);
+
+ // We stop spinning when the mutex is unlocked (0),
+ // but also when it's contended (2).
+ if state != 1 || spin == 0 {
+ return state;
+ }
+
+ crate::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ if self.futex.swap(0, Release) == 2 {
+ // We only wake up one thread. When that thread locks the mutex, it
+ // will mark the mutex as contended (2) (see lock_contended above),
+ // which makes sure that any other waiting threads will also be
+ // woken up eventually.
+ self.wake();
+ }
+ }
+
+ #[cold]
+ fn wake(&self) {
+ futex_wake(&self.futex);
+ }
+}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
new file mode 100644
index 000000000..b3bbbf743
--- /dev/null
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -0,0 +1,322 @@
+use crate::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
+
+pub type MovableRwLock = RwLock;
+
+pub struct RwLock {
+ // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
+ // Bits 0..30:
+ // 0: Unlocked
+ // 1..=0x3FFF_FFFE: Locked by N readers
+ // 0x3FFF_FFFF: Write locked
+ // Bit 30: Readers are waiting on this futex.
+ // Bit 31: Writers are waiting on the writer_notify futex.
+ state: AtomicU32,
+ // The 'condition variable' to notify writers through.
+ // Incremented on every signal.
+ writer_notify: AtomicU32,
+}
+
+const READ_LOCKED: u32 = 1;
+const MASK: u32 = (1 << 30) - 1;
+const WRITE_LOCKED: u32 = MASK;
+const MAX_READERS: u32 = MASK - 1;
+const READERS_WAITING: u32 = 1 << 30;
+const WRITERS_WAITING: u32 = 1 << 31;
+
+#[inline]
+fn is_unlocked(state: u32) -> bool {
+ state & MASK == 0
+}
+
+#[inline]
+fn is_write_locked(state: u32) -> bool {
+ state & MASK == WRITE_LOCKED
+}
+
+#[inline]
+fn has_readers_waiting(state: u32) -> bool {
+ state & READERS_WAITING != 0
+}
+
+#[inline]
+fn has_writers_waiting(state: u32) -> bool {
+ state & WRITERS_WAITING != 0
+}
+
+#[inline]
+fn is_read_lockable(state: u32) -> bool {
+ // This also returns false if the counter could overflow if we tried to read lock it.
+ //
+ // We don't allow read-locking if there's readers waiting, even if the lock is unlocked
+ // and there's no writers waiting. The only situation when this happens is after unlocking,
+ // at which point the unlocking thread might be waking up writers, which have priority over readers.
+ // The unlocking thread will clear the readers waiting bit and wake up readers, if necssary.
+ state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
+}
+
+#[inline]
+fn has_reached_max_readers(state: u32) -> bool {
+ state & MASK == MAX_READERS
+}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { state: AtomicU32::new(0), writer_notify: AtomicU32::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ self.state
+ .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
+ .is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let state = self.state.load(Relaxed);
+ if !is_read_lockable(state)
+ || self
+ .state
+ .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
+ .is_err()
+ {
+ self.read_contended();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED;
+
+ // It's impossible for a reader to be waiting on a read-locked RwLock,
+ // except if there is also a writer waiting.
+ debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state));
+
+ // Wake up a writer if we were the last reader and there's a writer waiting.
+ if is_unlocked(state) && has_writers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ #[cold]
+ fn read_contended(&self) {
+ let mut state = self.spin_read();
+
+ loop {
+ // If we can lock it, lock it.
+ if is_read_lockable(state) {
+ match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
+ {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Check for overflow.
+ if has_reached_max_readers(state) {
+ panic!("too many active read locks on RwLock");
+ }
+
+ // Make sure the readers waiting bit is set before we go to sleep.
+ if !has_readers_waiting(state) {
+ if let Err(s) =
+ self.state.compare_exchange(state, state | READERS_WAITING, Relaxed, Relaxed)
+ {
+ state = s;
+ continue;
+ }
+ }
+
+ // Wait for the state to change.
+ futex_wait(&self.state, state | READERS_WAITING, None);
+
+ // Spin again after waking up.
+ state = self.spin_read();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ self.state
+ .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
+ .is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
+ self.write_contended();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED;
+
+ debug_assert!(is_unlocked(state));
+
+ if has_writers_waiting(state) || has_readers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ #[cold]
+ fn write_contended(&self) {
+ let mut state = self.spin_write();
+
+ let mut other_writers_waiting = 0;
+
+ loop {
+ // If it's unlocked, we try to lock it.
+ if is_unlocked(state) {
+ match self.state.compare_exchange_weak(
+ state,
+ state | WRITE_LOCKED | other_writers_waiting,
+ Acquire,
+ Relaxed,
+ ) {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Set the waiting bit indicating that we're waiting on it.
+ if !has_writers_waiting(state) {
+ if let Err(s) =
+ self.state.compare_exchange(state, state | WRITERS_WAITING, Relaxed, Relaxed)
+ {
+ state = s;
+ continue;
+ }
+ }
+
+ // Other writers might be waiting now too, so we should make sure
+ // we keep that bit on once we manage lock it.
+ other_writers_waiting = WRITERS_WAITING;
+
+ // Examine the notification counter before we check if `state` has changed,
+ // to make sure we don't miss any notifications.
+ let seq = self.writer_notify.load(Acquire);
+
+ // Don't go to sleep if the lock has become available,
+ // or if the writers waiting bit is no longer set.
+ state = self.state.load(Relaxed);
+ if is_unlocked(state) || !has_writers_waiting(state) {
+ continue;
+ }
+
+ // Wait for the state to change.
+ futex_wait(&self.writer_notify, seq, None);
+
+ // Spin again after waking up.
+ state = self.spin_write();
+ }
+ }
+
+ /// Wake up waiting threads after unlocking.
+ ///
+ /// If both are waiting, this will wake up only one writer, but will fall
+ /// back to waking up readers if there was no writer to wake up.
+ #[cold]
+ fn wake_writer_or_readers(&self, mut state: u32) {
+ assert!(is_unlocked(state));
+
+ // The readers waiting bit might be turned on at any point now,
+ // since readers will block when there's anything waiting.
+ // Writers will just lock the lock though, regardless of the waiting bits,
+ // so we don't have to worry about the writer waiting bit.
+ //
+ // If the lock gets locked in the meantime, we don't have to do
+ // anything, because then the thread that locked the lock will take
+ // care of waking up waiters when it unlocks.
+
+ // If only writers are waiting, wake one of them up.
+ if state == WRITERS_WAITING {
+ match self.state.compare_exchange(state, 0, Relaxed, Relaxed) {
+ Ok(_) => {
+ self.wake_writer();
+ return;
+ }
+ Err(s) => {
+ // Maybe some readers are now waiting too. So, continue to the next `if`.
+ state = s;
+ }
+ }
+ }
+
+ // If both writers and readers are waiting, leave the readers waiting
+ // and only wake up one writer.
+ if state == READERS_WAITING + WRITERS_WAITING {
+ if self.state.compare_exchange(state, READERS_WAITING, Relaxed, Relaxed).is_err() {
+ // The lock got locked. Not our problem anymore.
+ return;
+ }
+ if self.wake_writer() {
+ return;
+ }
+ // No writers were actually blocked on futex_wait, so we continue
+ // to wake up readers instead, since we can't be sure if we notified a writer.
+ state = READERS_WAITING;
+ }
+
+ // If readers are waiting, wake them all up.
+ if state == READERS_WAITING {
+ if self.state.compare_exchange(state, 0, Relaxed, Relaxed).is_ok() {
+ futex_wake_all(&self.state);
+ }
+ }
+ }
+
+ /// This wakes one writer and returns true if we woke up a writer that was
+ /// blocked on futex_wait.
+ ///
+ /// If this returns false, it might still be the case that we notified a
+ /// writer that was about to go to sleep.
+ fn wake_writer(&self) -> bool {
+ self.writer_notify.fetch_add(1, Release);
+ futex_wake(&self.writer_notify)
+ // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke
+ // up any threads or not, and always return `false` here. That still
+ // results in correct behaviour: it just means readers get woken up as
+ // well in case both readers and writers were waiting.
+ }
+
+ /// Spin for a while, but stop directly at the given condition.
+ #[inline]
+ fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 {
+ let mut spin = 100; // Chosen by fair dice roll.
+ loop {
+ let state = self.state.load(Relaxed);
+ if f(state) || spin == 0 {
+ return state;
+ }
+ crate::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+
+ #[inline]
+ fn spin_write(&self) -> u32 {
+ // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair.
+ self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state))
+ }
+
+ #[inline]
+ fn spin_read(&self) -> u32 {
+ // Stop spinning when it's unlocked or read locked, or when there's waiting threads.
+ self.spin_until(|state| {
+ !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state)
+ })
+ }
+}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
new file mode 100644
index 000000000..f5f92f693
--- /dev/null
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -0,0 +1,31 @@
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ ))] {
+ mod futex_mutex;
+ mod futex_rwlock;
+ mod futex_condvar;
+ pub(crate) use futex_mutex::{Mutex, MovableMutex};
+ pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_condvar::MovableCondvar;
+ } else if #[cfg(target_os = "fuchsia")] {
+ mod fuchsia_mutex;
+ mod futex_rwlock;
+ mod futex_condvar;
+ pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
+ pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_condvar::MovableCondvar;
+ } else {
+ mod pthread_mutex;
+ mod pthread_rwlock;
+ mod pthread_condvar;
+ pub(crate) use pthread_mutex::{Mutex, MovableMutex};
+ pub(crate) use pthread_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use pthread_condvar::MovableCondvar;
+ }
+}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
new file mode 100644
index 000000000..abf27e7db
--- /dev/null
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -0,0 +1,222 @@
+use crate::cell::UnsafeCell;
+use crate::sys::locks::{pthread_mutex, Mutex};
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+use crate::time::Duration;
+
+pub struct Condvar {
+ inner: UnsafeCell<libc::pthread_cond_t>,
+}
+
+pub(crate) type MovableCondvar = LazyBox<Condvar>;
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
+
+fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
+ if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
+}
+
+impl LazyInit for Condvar {
+ fn init() -> Box<Self> {
+ let mut condvar = Box::new(Self::new());
+ unsafe { condvar.init() };
+ condvar
+ }
+}
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ // Might be moved and address is changing it is better to avoid
+ // initialization of potentially opaque OS data before it landed
+ Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
+ }
+
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))]
+ unsafe fn init(&mut self) {}
+
+ // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
+ // So on that platform, init() should always be called
+ // Moreover, that platform does not have pthread_condattr_setclock support,
+ // hence that initialization should be skipped as well
+ //
+ // Similar story for the 3DS (horizon).
+ #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ unsafe fn init(&mut self) {
+ let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
+ assert_eq!(r, 0);
+ }
+
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox",
+ target_os = "espidf",
+ target_os = "horizon"
+ )))]
+ unsafe fn init(&mut self) {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {
+ let r = libc::pthread_cond_signal(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ let r = libc::pthread_cond_broadcast(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
+ debug_assert_eq!(r, 0);
+ }
+
+ // This implementation is used on systems that support pthread_condattr_setclock
+ // where we configure condition variable to use monotonic clock (instead of
+ // default system clock). This approach avoids all problems that result
+ // from changes made to the system time.
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "android",
+ target_os = "espidf",
+ target_os = "horizon"
+ )))]
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ use crate::mem;
+
+ let mut now: libc::timespec = mem::zeroed();
+ let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
+ assert_eq!(r, 0);
+
+ // Nanosecond calculations can't overflow because both values are below 1e9.
+ let nsec = dur.subsec_nanos() + now.tv_nsec as u32;
+
+ let sec = saturating_cast_to_time_t(dur.as_secs())
+ .checked_add((nsec / 1_000_000_000) as libc::time_t)
+ .and_then(|s| s.checked_add(now.tv_sec));
+ let nsec = nsec % 1_000_000_000;
+
+ let timeout =
+ sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
+
+ let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ assert!(r == libc::ETIMEDOUT || r == 0);
+ r == 0
+ }
+
+ // This implementation is modeled after libcxx's condition_variable
+ // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46
+ // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "android",
+ target_os = "espidf",
+ target_os = "horizon"
+ ))]
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
+ use crate::ptr;
+ use crate::time::Instant;
+
+ // 1000 years
+ let max_dur = Duration::from_secs(1000 * 365 * 86400);
+
+ if dur > max_dur {
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra return error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
+ // because of spurious wakeups.
+
+ dur = max_dur;
+ }
+
+ // First, figure out what time it currently is, in both system and
+ // stable time. pthread_cond_timedwait uses system time, but we want to
+ // report timeout based on stable time.
+ let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
+ let stable_now = Instant::now();
+ let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
+ debug_assert_eq!(r, 0);
+
+ let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
+ let extra = (nsec / 1_000_000_000) as libc::time_t;
+ let nsec = nsec % 1_000_000_000;
+ let seconds = saturating_cast_to_time_t(dur.as_secs());
+
+ let timeout = sys_now
+ .tv_sec
+ .checked_add(extra)
+ .and_then(|s| s.checked_add(seconds))
+ .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec })
+ .unwrap_or(TIMESPEC_MAX);
+
+ // And wait!
+ let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ debug_assert!(r == libc::ETIMEDOUT || r == 0);
+
+ // ETIMEDOUT is not a totally reliable method of determining timeout due
+ // to clock shifts, so do the check ourselves
+ stable_now.elapsed() < dur
+ }
+
+ #[inline]
+ #[cfg(not(target_os = "dragonfly"))]
+ unsafe fn destroy(&mut self) {
+ let r = libc::pthread_cond_destroy(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ #[cfg(target_os = "dragonfly")]
+ unsafe fn destroy(&mut self) {
+ let r = libc::pthread_cond_destroy(self.inner.get());
+ // On DragonFly pthread_cond_destroy() returns EINVAL if called on
+ // a condvar that was just initialized with
+ // libc::PTHREAD_COND_INITIALIZER. Once it is used or
+ // pthread_cond_init() is called, this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ }
+}
+
+impl Drop for Condvar {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { self.destroy() };
+ }
+}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
new file mode 100644
index 000000000..98afee69b
--- /dev/null
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -0,0 +1,135 @@
+use crate::cell::UnsafeCell;
+use crate::mem::{forget, MaybeUninit};
+use crate::sys::cvt_nz;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+
+pub struct Mutex {
+ inner: UnsafeCell<libc::pthread_mutex_t>,
+}
+
+pub(crate) type MovableMutex = LazyBox<Mutex>;
+
+#[inline]
+pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
+ m.inner.get()
+}
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl LazyInit for Mutex {
+ fn init() -> Box<Self> {
+ let mut mutex = Box::new(Self::new());
+ unsafe { mutex.init() };
+ mutex
+ }
+
+ fn destroy(mutex: Box<Self>) {
+ // We're not allowed to pthread_mutex_destroy a locked mutex,
+ // so check first if it's unlocked.
+ if unsafe { mutex.try_lock() } {
+ unsafe { mutex.unlock() };
+ drop(mutex);
+ } else {
+ // The mutex is locked. This happens if a MutexGuard is leaked.
+ // In this case, we just leak the Mutex too.
+ forget(mutex);
+ }
+ }
+
+ fn cancel_init(_: Box<Self>) {
+ // In this case, we can just drop it without any checks,
+ // since it cannot have been locked yet.
+ }
+}
+
+impl Mutex {
+ pub const fn new() -> Mutex {
+ // Might be moved to a different address, so it is better to avoid
+ // initialization of potentially opaque OS data before it landed.
+ // Be very careful using this newly constructed `Mutex`, reentrant
+ // locking is undefined behavior until `init` is called!
+ Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
+ }
+ #[inline]
+ pub unsafe fn init(&mut self) {
+ // Issue #33770
+ //
+ // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
+ // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
+ // try to re-lock it from the same thread when you already hold a lock
+ // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
+ // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
+ // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
+ // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
+ // a Mutex where re-locking is UB.
+ //
+ // In practice, glibc takes advantage of this undefined behavior to
+ // implement hardware lock elision, which uses hardware transactional
+ // memory to avoid acquiring the lock. While a transaction is in
+ // progress, the lock appears to be unlocked. This isn't a problem for
+ // other threads since the transactional memory will abort if a conflict
+ // is detected, however no abort is generated when re-locking from the
+ // same thread.
+ //
+ // Since locking the same mutex twice will result in two aliasing &mut
+ // references, we instead create the mutex with type
+ // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
+ // re-lock it from the same thread, thus avoiding undefined behavior.
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
+ .unwrap();
+ cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ }
+ #[inline]
+ pub unsafe fn lock(&self) {
+ let r = libc::pthread_mutex_lock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ let r = libc::pthread_mutex_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(self.inner.get()) == 0
+ }
+ #[inline]
+ #[cfg(not(target_os = "dragonfly"))]
+ unsafe fn destroy(&mut self) {
+ let r = libc::pthread_mutex_destroy(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ #[cfg(target_os = "dragonfly")]
+ unsafe fn destroy(&mut self) {
+ let r = libc::pthread_mutex_destroy(self.inner.get());
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ }
+}
+
+impl Drop for Mutex {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { self.destroy() };
+ }
+}
+
+pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit<libc::pthread_mutexattr_t>);
+
+impl Drop for PthreadMutexAttr<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr());
+ debug_assert_eq!(result, 0);
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
new file mode 100644
index 000000000..adfe2a883
--- /dev/null
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -0,0 +1,173 @@
+use crate::cell::UnsafeCell;
+use crate::mem::forget;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+
+pub struct RwLock {
+ inner: UnsafeCell<libc::pthread_rwlock_t>,
+ write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
+ num_readers: AtomicUsize,
+}
+
+pub(crate) type MovableRwLock = LazyBox<RwLock>;
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+impl LazyInit for RwLock {
+ fn init() -> Box<Self> {
+ Box::new(Self::new())
+ }
+
+ fn destroy(mut rwlock: Box<Self>) {
+ // We're not allowed to pthread_rwlock_destroy a locked rwlock,
+ // so check first if it's unlocked.
+ if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
+ // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
+ // In this case, we just leak the RwLock too.
+ forget(rwlock);
+ }
+ }
+
+ fn cancel_init(_: Box<Self>) {
+ // In this case, we can just drop it without any checks,
+ // since it cannot have been locked yet.
+ }
+}
+
+impl RwLock {
+ pub const fn new() -> RwLock {
+ RwLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ }
+ }
+ #[inline]
+ pub unsafe fn read(&self) {
+ let r = libc::pthread_rwlock_rdlock(self.inner.get());
+
+ // According to POSIX, when a thread tries to acquire this read lock
+ // while it already holds the write lock
+ // (or vice versa, or tries to acquire the write lock twice),
+ // "the call shall either deadlock or return [EDEADLK]"
+ // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
+ // So, in principle, all we have to do here is check `r == 0` to be sure we properly
+ // got the lock.
+ //
+ // However, (at least) glibc before version 2.25 does not conform to this spec,
+ // and can return `r == 0` even when this thread already holds the write lock.
+ // We thus check for this situation ourselves and panic when detecting that a thread
+ // got the write lock more than once, or got a read and a write lock.
+ if r == libc::EAGAIN {
+ panic!("rwlock maximum reader count exceeded");
+ } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+ // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+ // data races.
+ if r == 0 {
+ // `pthread_rwlock_rdlock` succeeded when it should not have.
+ self.raw_unlock();
+ }
+ panic!("rwlock read lock would result in deadlock");
+ } else {
+ // POSIX does not make guarantees about all the errors that may be returned.
+ // See issue #94705 for more details.
+ assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ }
+ }
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() {
+ // `pthread_rwlock_tryrdlock` succeeded when it should not have.
+ self.raw_unlock();
+ false
+ } else {
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ true
+ }
+ } else {
+ false
+ }
+ }
+ #[inline]
+ pub unsafe fn write(&self) {
+ let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ // See comments above for why we check for EDEADLK and write_locked. For the same reason,
+ // we also need to check that there are no readers (tracked in `num_readers`).
+ if r == libc::EDEADLK
+ || (r == 0 && *self.write_locked.get())
+ || self.num_readers.load(Ordering::Relaxed) != 0
+ {
+ // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+ // data races.
+ if r == 0 {
+ // `pthread_rwlock_wrlock` succeeded when it should not have.
+ self.raw_unlock();
+ }
+ panic!("rwlock write lock would result in deadlock");
+ } else {
+ // According to POSIX, for a properly initialized rwlock this can only
+ // return EDEADLK or 0. We rely on that.
+ debug_assert_eq!(r, 0);
+ }
+ *self.write_locked.get() = true;
+ }
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ // `pthread_rwlock_trywrlock` succeeded when it should not have.
+ self.raw_unlock();
+ false
+ } else {
+ *self.write_locked.get() = true;
+ true
+ }
+ } else {
+ false
+ }
+ }
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ debug_assert!(!*self.write_locked.get());
+ self.num_readers.fetch_sub(1, Ordering::Relaxed);
+ self.raw_unlock();
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*self.write_locked.get());
+ *self.write_locked.get() = false;
+ self.raw_unlock();
+ }
+ #[inline]
+ unsafe fn destroy(&mut self) {
+ let r = libc::pthread_rwlock_destroy(self.inner.get());
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
+ }
+}
+
+impl Drop for RwLock {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { self.destroy() };
+ }
+}
diff --git a/library/std/src/sys/unix/memchr.rs b/library/std/src/sys/unix/memchr.rs
new file mode 100644
index 000000000..73ba604ec
--- /dev/null
+++ b/library/std/src/sys/unix/memchr.rs
@@ -0,0 +1,40 @@
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ let p = unsafe {
+ libc::memchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len(),
+ )
+ };
+ if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
+}
+
+pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ #[cfg(target_os = "linux")]
+ fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
+ // GNU's memrchr() will - unlike memchr() - error if haystack is empty.
+ if haystack.is_empty() {
+ return None;
+ }
+ let p = unsafe {
+ libc::memrchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len(),
+ )
+ };
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
+ core::slice::memchr::memrchr(needle, haystack)
+ }
+
+ memrchr_specific(needle, haystack)
+}
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
new file mode 100644
index 000000000..3d0d91460
--- /dev/null
+++ b/library/std/src/sys/unix/mod.rs
@@ -0,0 +1,361 @@
+#![allow(missing_docs, nonstandard_style)]
+
+use crate::ffi::CStr;
+use crate::io::ErrorKind;
+
+pub use self::rand::hashmap_random_keys;
+
+#[cfg(not(target_os = "espidf"))]
+#[macro_use]
+pub mod weak;
+
+pub mod alloc;
+pub mod android;
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+pub mod fd;
+pub mod fs;
+pub mod futex;
+pub mod io;
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub mod kernel_copy;
+#[cfg(target_os = "l4re")]
+mod l4re;
+pub mod locks;
+pub mod memchr;
+#[cfg(not(target_os = "l4re"))]
+pub mod net;
+#[cfg(target_os = "l4re")]
+pub use self::l4re::net;
+pub mod os;
+pub mod os_str;
+pub mod path;
+pub mod pipe;
+pub mod process;
+pub mod rand;
+pub mod stack_overflow;
+pub mod stdio;
+pub mod thread;
+pub mod thread_local_dtor;
+pub mod thread_local_key;
+pub mod thread_parker;
+pub mod time;
+
+#[cfg(target_os = "espidf")]
+pub fn init(argc: isize, argv: *const *const u8) {}
+
+#[cfg(not(target_os = "espidf"))]
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ // The standard streams might be closed on application startup. To prevent
+ // std::io::{stdin, stdout,stderr} objects from using other unrelated file
+ // resources opened later, we reopen standards streams when they are closed.
+ sanitize_standard_fds();
+
+ // By default, some platforms will send a *signal* when an EPIPE error
+ // would otherwise be delivered. This runtime doesn't install a SIGPIPE
+ // handler, causing it to kill the program, which isn't exactly what we
+ // want!
+ //
+ // Hence, we set SIGPIPE to ignore when the program starts up in order
+ // to prevent this problem.
+ reset_sigpipe();
+
+ stack_overflow::init();
+ args::init(argc, argv);
+
+ // Normally, `thread::spawn` will call `Thread::set_name` but since this thread
+ // already exists, we have to call it ourselves. We only do this on macos
+ // because some unix-like operating systems such as Linux share process-id and
+ // thread-id for the main thread and so renaming the main thread will rename the
+ // process and we only want to enable this on platforms we've tested.
+ if cfg!(target_os = "macos") {
+ thread::Thread::set_name(&CStr::from_bytes_with_nul_unchecked(b"main\0"));
+ }
+
+ unsafe fn sanitize_standard_fds() {
+ // fast path with a single syscall for systems with poll()
+ #[cfg(not(any(
+ miri,
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "vxworks",
+ // The poll on Darwin doesn't set POLLNVAL for closed fds.
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "redox",
+ target_os = "l4re",
+ target_os = "horizon",
+ )))]
+ 'poll: {
+ use crate::sys::os::errno;
+ let pfds: &mut [_] = &mut [
+ libc::pollfd { fd: 0, events: 0, revents: 0 },
+ libc::pollfd { fd: 1, events: 0, revents: 0 },
+ libc::pollfd { fd: 2, events: 0, revents: 0 },
+ ];
+
+ while libc::poll(pfds.as_mut_ptr(), 3, 0) == -1 {
+ match errno() {
+ libc::EINTR => continue,
+ libc::EINVAL | libc::EAGAIN | libc::ENOMEM => {
+ // RLIMIT_NOFILE or temporary allocation failures
+ // may be preventing use of poll(), fall back to fcntl
+ break 'poll;
+ }
+ _ => libc::abort(),
+ }
+ }
+ for pfd in pfds {
+ if pfd.revents & libc::POLLNVAL == 0 {
+ continue;
+ }
+ if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ // If the stream is closed but we failed to reopen it, abort the
+ // process. Otherwise we wouldn't preserve the safety of
+ // operations on the corresponding Rust object Stdin, Stdout, or
+ // Stderr.
+ libc::abort();
+ }
+ }
+ return;
+ }
+
+ // fallback in case poll isn't available or limited by RLIMIT_NOFILE
+ #[cfg(not(any(
+ // The standard fds are always available in Miri.
+ miri,
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "vxworks",
+ target_os = "l4re",
+ target_os = "horizon",
+ )))]
+ {
+ use crate::sys::os::errno;
+ for fd in 0..3 {
+ if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF {
+ if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ // If the stream is closed but we failed to reopen it, abort the
+ // process. Otherwise we wouldn't preserve the safety of
+ // operations on the corresponding Rust object Stdin, Stdout, or
+ // Stderr.
+ libc::abort();
+ }
+ }
+ }
+ }
+ }
+
+ unsafe fn reset_sigpipe() {
+ #[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))]
+ rtassert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR);
+ }
+}
+
+// SAFETY: must be called only once during runtime cleanup.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {
+ stack_overflow::cleanup();
+}
+
+#[cfg(target_os = "android")]
+pub use crate::sys::android::signal;
+#[cfg(not(target_os = "android"))]
+pub use libc::signal;
+
+pub fn decode_error_kind(errno: i32) -> ErrorKind {
+ use ErrorKind::*;
+ match errno as libc::c_int {
+ libc::E2BIG => ArgumentListTooLong,
+ libc::EADDRINUSE => AddrInUse,
+ libc::EADDRNOTAVAIL => AddrNotAvailable,
+ libc::EBUSY => ResourceBusy,
+ libc::ECONNABORTED => ConnectionAborted,
+ libc::ECONNREFUSED => ConnectionRefused,
+ libc::ECONNRESET => ConnectionReset,
+ libc::EDEADLK => Deadlock,
+ libc::EDQUOT => FilesystemQuotaExceeded,
+ libc::EEXIST => AlreadyExists,
+ libc::EFBIG => FileTooLarge,
+ libc::EHOSTUNREACH => HostUnreachable,
+ libc::EINTR => Interrupted,
+ libc::EINVAL => InvalidInput,
+ libc::EISDIR => IsADirectory,
+ libc::ELOOP => FilesystemLoop,
+ libc::ENOENT => NotFound,
+ libc::ENOMEM => OutOfMemory,
+ libc::ENOSPC => StorageFull,
+ libc::ENOSYS => Unsupported,
+ libc::EMLINK => TooManyLinks,
+ libc::ENAMETOOLONG => InvalidFilename,
+ libc::ENETDOWN => NetworkDown,
+ libc::ENETUNREACH => NetworkUnreachable,
+ libc::ENOTCONN => NotConnected,
+ libc::ENOTDIR => NotADirectory,
+ libc::ENOTEMPTY => DirectoryNotEmpty,
+ libc::EPIPE => BrokenPipe,
+ libc::EROFS => ReadOnlyFilesystem,
+ libc::ESPIPE => NotSeekable,
+ libc::ESTALE => StaleNetworkFileHandle,
+ libc::ETIMEDOUT => TimedOut,
+ libc::ETXTBSY => ExecutableFileBusy,
+ libc::EXDEV => CrossesDevices,
+
+ libc::EACCES | libc::EPERM => PermissionDenied,
+
+ // These two constants can have the same value on some systems,
+ // but different values on others, so we can't use a match
+ // clause
+ x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock,
+
+ _ => Uncategorized,
+ }
+}
+
+#[doc(hidden)]
+pub trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+macro_rules! impl_is_minus_one {
+ ($($t:ident)*) => ($(impl IsMinusOne for $t {
+ fn is_minus_one(&self) -> bool {
+ *self == -1
+ }
+ })*)
+}
+
+impl_is_minus_one! { i8 i16 i32 i64 isize }
+
+pub fn cvt<T: IsMinusOne>(t: T) -> crate::io::Result<T> {
+ if t.is_minus_one() { Err(crate::io::Error::last_os_error()) } else { Ok(t) }
+}
+
+pub fn cvt_r<T, F>(mut f: F) -> crate::io::Result<T>
+where
+ T: IsMinusOne,
+ F: FnMut() -> T,
+{
+ loop {
+ match cvt(f()) {
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ other => return other,
+ }
+ }
+}
+
+#[allow(dead_code)] // Not used on all platforms.
+pub fn cvt_nz(error: libc::c_int) -> crate::io::Result<()> {
+ if error == 0 { Ok(()) } else { Err(crate::io::Error::from_raw_os_error(error)) }
+}
+
+// libc::abort() will run the SIGABRT handler. That's fine because anyone who
+// installs a SIGABRT handler already has to expect it to run in Very Bad
+// situations (eg, malloc crashing).
+//
+// Current glibc's abort() function unblocks SIGABRT, raises SIGABRT, clears the
+// SIGABRT handler and raises it again, and then starts to get creative.
+//
+// See the public documentation for `intrinsics::abort()` and `process::abort()`
+// for further discussion.
+//
+// There is confusion about whether libc::abort() flushes stdio streams.
+// libc::abort() is required by ISO C 99 (7.14.1.1p5) to be async-signal-safe,
+// so flushing streams is at least extremely hard, if not entirely impossible.
+//
+// However, some versions of POSIX (eg IEEE Std 1003.1-2001) required abort to
+// do so. In 1003.1-2004 this was fixed.
+//
+// glibc's implementation did the flush, unsafely, before glibc commit
+// 91e7cf982d01 `abort: Do not flush stdio streams [BZ #15436]' by Florian
+// Weimer. According to glibc's NEWS:
+//
+// The abort function terminates the process immediately, without flushing
+// stdio streams. Previous glibc versions used to flush streams, resulting
+// in deadlocks and further data corruption. This change also affects
+// process aborts as the result of assertion failures.
+//
+// This is an accurate description of the problem. The only solution for
+// program with nontrivial use of C stdio is a fixed libc - one which does not
+// try to flush in abort - since even libc-internal errors, and assertion
+// failures generated from C, will go via abort().
+//
+// On systems with old, buggy, libcs, the impact can be severe for a
+// multithreaded C program. It is much less severe for Rust, because Rust
+// stdlib doesn't use libc stdio buffering. In a typical Rust program, which
+// does not use C stdio, even a buggy libc::abort() is, in fact, safe.
+pub fn abort_internal() -> ! {
+ unsafe { libc::abort() }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "android")] {
+ #[link(name = "dl")]
+ #[link(name = "log")]
+ extern "C" {}
+ } else if #[cfg(target_os = "freebsd")] {
+ #[link(name = "execinfo")]
+ #[link(name = "pthread")]
+ extern "C" {}
+ } else if #[cfg(target_os = "netbsd")] {
+ #[link(name = "pthread")]
+ #[link(name = "rt")]
+ extern "C" {}
+ } else if #[cfg(any(target_os = "dragonfly", target_os = "openbsd"))] {
+ #[link(name = "pthread")]
+ extern "C" {}
+ } else if #[cfg(target_os = "solaris")] {
+ #[link(name = "socket")]
+ #[link(name = "posix4")]
+ #[link(name = "pthread")]
+ #[link(name = "resolv")]
+ extern "C" {}
+ } else if #[cfg(target_os = "illumos")] {
+ #[link(name = "socket")]
+ #[link(name = "posix4")]
+ #[link(name = "pthread")]
+ #[link(name = "resolv")]
+ #[link(name = "nsl")]
+ // Use libumem for the (malloc-compatible) allocator
+ #[link(name = "umem")]
+ extern "C" {}
+ } else if #[cfg(target_os = "macos")] {
+ #[link(name = "System")]
+ // res_init and friends require -lresolv on macOS/iOS.
+ // See #41582 and https://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
+ #[link(name = "resolv")]
+ extern "C" {}
+ } else if #[cfg(any(target_os = "ios", target_os = "watchos"))] {
+ #[link(name = "System")]
+ #[link(name = "objc")]
+ #[link(name = "Security", kind = "framework")]
+ #[link(name = "Foundation", kind = "framework")]
+ #[link(name = "resolv")]
+ extern "C" {}
+ } else if #[cfg(target_os = "fuchsia")] {
+ #[link(name = "zircon")]
+ #[link(name = "fdio")]
+ extern "C" {}
+ } else if #[cfg(all(target_os = "linux", target_env = "uclibc"))] {
+ #[link(name = "dl")]
+ extern "C" {}
+ }
+}
+
+#[cfg(any(target_os = "espidf", target_os = "horizon"))]
+mod unsupported {
+ use crate::io;
+
+ pub fn unsupported<T>() -> io::Result<T> {
+ Err(unsupported_err())
+ }
+
+ pub fn unsupported_err() -> io::Error {
+ io::const_io_error!(io::ErrorKind::Unsupported, "operation not supported on this platform",)
+ }
+}
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
new file mode 100644
index 000000000..462a45b01
--- /dev/null
+++ b/library/std/src/sys/unix/net.rs
@@ -0,0 +1,512 @@
+use crate::cmp;
+use crate::ffi::CStr;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem;
+use crate::net::{Shutdown, SocketAddr};
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+use crate::str;
+use crate::sys::fd::FileDesc;
+use crate::sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::{Duration, Instant};
+
+use libc::{c_int, c_void, size_t, sockaddr, socklen_t, MSG_PEEK};
+
+cfg_if::cfg_if! {
+ if #[cfg(target_vendor = "apple")] {
+ use libc::SO_LINGER_SEC as SO_LINGER;
+ } else {
+ use libc::SO_LINGER;
+ }
+}
+
+pub use crate::sys::{cvt, cvt_r};
+
+#[allow(unused_extern_crates)]
+pub extern crate libc as netc;
+
+pub type wrlen_t = size_t;
+
+pub struct Socket(FileDesc);
+
+pub fn init() {}
+
+pub fn cvt_gai(err: c_int) -> io::Result<()> {
+ if err == 0 {
+ return Ok(());
+ }
+
+ // We may need to trigger a glibc workaround. See on_resolver_failure() for details.
+ on_resolver_failure();
+
+ #[cfg(not(target_os = "espidf"))]
+ if err == libc::EAI_SYSTEM {
+ return Err(io::Error::last_os_error());
+ }
+
+ #[cfg(not(target_os = "espidf"))]
+ let detail = unsafe {
+ str::from_utf8(CStr::from_ptr(libc::gai_strerror(err)).to_bytes()).unwrap().to_owned()
+ };
+
+ #[cfg(target_os = "espidf")]
+ let detail = "";
+
+ Err(io::Error::new(
+ io::ErrorKind::Uncategorized,
+ &format!("failed to lookup address information: {detail}")[..],
+ ))
+}
+
+impl Socket {
+ pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
+ let fam = match *addr {
+ SocketAddr::V4(..) => libc::AF_INET,
+ SocketAddr::V6(..) => libc::AF_INET6,
+ };
+ Socket::new_raw(fam, ty)
+ }
+
+ pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
+ unsafe {
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))] {
+ // On platforms that support it we pass the SOCK_CLOEXEC
+ // flag to atomically create the socket and set it as
+ // CLOEXEC. On Linux this was added in 2.6.27.
+ let fd = cvt(libc::socket(fam, ty | libc::SOCK_CLOEXEC, 0))?;
+ Ok(Socket(FileDesc::from_raw_fd(fd)))
+ } else {
+ let fd = cvt(libc::socket(fam, ty, 0))?;
+ let fd = FileDesc::from_raw_fd(fd);
+ fd.set_cloexec()?;
+ let socket = Socket(fd);
+
+ // macOS and iOS use `SO_NOSIGPIPE` as a `setsockopt`
+ // flag to disable `SIGPIPE` emission on socket.
+ #[cfg(target_vendor = "apple")]
+ setsockopt(&socket, libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1)?;
+
+ Ok(socket)
+ }
+ }
+ }
+ }
+
+ #[cfg(not(target_os = "vxworks"))]
+ pub fn new_pair(fam: c_int, ty: c_int) -> io::Result<(Socket, Socket)> {
+ unsafe {
+ let mut fds = [0, 0];
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))] {
+ // Like above, set cloexec atomically
+ cvt(libc::socketpair(fam, ty | libc::SOCK_CLOEXEC, 0, fds.as_mut_ptr()))?;
+ Ok((Socket(FileDesc::from_raw_fd(fds[0])), Socket(FileDesc::from_raw_fd(fds[1]))))
+ } else {
+ cvt(libc::socketpair(fam, ty, 0, fds.as_mut_ptr()))?;
+ let a = FileDesc::from_raw_fd(fds[0]);
+ let b = FileDesc::from_raw_fd(fds[1]);
+ a.set_cloexec()?;
+ b.set_cloexec()?;
+ Ok((Socket(a), Socket(b)))
+ }
+ }
+ }
+ }
+
+ #[cfg(target_os = "vxworks")]
+ pub fn new_pair(_fam: c_int, _ty: c_int) -> io::Result<(Socket, Socket)> {
+ unimplemented!()
+ }
+
+ pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
+ self.set_nonblocking(true)?;
+ let r = unsafe {
+ let (addr, len) = addr.into_inner();
+ cvt(libc::connect(self.as_raw_fd(), addr.as_ptr(), len))
+ };
+ self.set_nonblocking(false)?;
+
+ match r {
+ Ok(_) => return Ok(()),
+ // there's no ErrorKind for EINPROGRESS :(
+ Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
+ Err(e) => return Err(e),
+ }
+
+ let mut pollfd = libc::pollfd { fd: self.as_raw_fd(), events: libc::POLLOUT, revents: 0 };
+
+ if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+
+ let start = Instant::now();
+
+ loop {
+ let elapsed = start.elapsed();
+ if elapsed >= timeout {
+ return Err(io::const_io_error!(io::ErrorKind::TimedOut, "connection timed out"));
+ }
+
+ let timeout = timeout - elapsed;
+ let mut timeout = timeout
+ .as_secs()
+ .saturating_mul(1_000)
+ .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000);
+ if timeout == 0 {
+ timeout = 1;
+ }
+
+ let timeout = cmp::min(timeout, c_int::MAX as u64) as c_int;
+
+ match unsafe { libc::poll(&mut pollfd, 1, timeout) } {
+ -1 => {
+ let err = io::Error::last_os_error();
+ if err.kind() != io::ErrorKind::Interrupted {
+ return Err(err);
+ }
+ }
+ 0 => {}
+ _ => {
+ // linux returns POLLOUT|POLLERR|POLLHUP for refused connections (!), so look
+ // for POLLHUP rather than read readiness
+ if pollfd.revents & libc::POLLHUP != 0 {
+ let e = self.take_error()?.unwrap_or_else(|| {
+ io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "no error set after POLLHUP",
+ )
+ });
+ return Err(e);
+ }
+
+ return Ok(());
+ }
+ }
+ }
+ }
+
+ pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
+ // Unfortunately the only known way right now to accept a socket and
+ // atomically set the CLOEXEC flag is to use the `accept4` syscall on
+ // platforms that support it. On Linux, this was added in 2.6.28,
+ // glibc 2.10 and musl 0.9.5.
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ ))] {
+ unsafe {
+ let fd = cvt_r(|| libc::accept4(self.as_raw_fd(), storage, len, libc::SOCK_CLOEXEC))?;
+ Ok(Socket(FileDesc::from_raw_fd(fd)))
+ }
+ } else {
+ unsafe {
+ let fd = cvt_r(|| libc::accept(self.as_raw_fd(), storage, len))?;
+ let fd = FileDesc::from_raw_fd(fd);
+ fd.set_cloexec()?;
+ Ok(Socket(fd))
+ }
+ }
+ }
+ }
+
+ pub fn duplicate(&self) -> io::Result<Socket> {
+ self.0.duplicate().map(Socket)
+ }
+
+ fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::recv(self.as_raw_fd(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
+ })?;
+ Ok(ret as usize)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, 0)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, MSG_PEEK)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+
+ fn recv_from_with_flags(
+ &self,
+ buf: &mut [u8],
+ flags: c_int,
+ ) -> io::Result<(usize, SocketAddr)> {
+ let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() };
+ let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t;
+
+ let n = cvt(unsafe {
+ libc::recvfrom(
+ self.as_raw_fd(),
+ buf.as_mut_ptr() as *mut c_void,
+ buf.len(),
+ flags,
+ &mut storage as *mut _ as *mut _,
+ &mut addrlen,
+ )
+ })?;
+ Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, 0)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ pub fn recv_msg(&self, msg: &mut libc::msghdr) -> io::Result<usize> {
+ let n = cvt(unsafe { libc::recvmsg(self.as_raw_fd(), msg, libc::MSG_CMSG_CLOEXEC) })?;
+ Ok(n as usize)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, MSG_PEEK)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ pub fn send_msg(&self, msg: &mut libc::msghdr) -> io::Result<usize> {
+ let n = cvt(unsafe { libc::sendmsg(self.as_raw_fd(), msg, 0) })?;
+ Ok(n as usize)
+ }
+
+ pub fn set_timeout(&self, dur: Option<Duration>, kind: libc::c_int) -> io::Result<()> {
+ let timeout = match dur {
+ Some(dur) => {
+ if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+
+ let secs = if dur.as_secs() > libc::time_t::MAX as u64 {
+ libc::time_t::MAX
+ } else {
+ dur.as_secs() as libc::time_t
+ };
+ let mut timeout = libc::timeval {
+ tv_sec: secs,
+ tv_usec: dur.subsec_micros() as libc::suseconds_t,
+ };
+ if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
+ timeout.tv_usec = 1;
+ }
+ timeout
+ }
+ None => libc::timeval { tv_sec: 0, tv_usec: 0 },
+ };
+ setsockopt(self, libc::SOL_SOCKET, kind, timeout)
+ }
+
+ pub fn timeout(&self, kind: libc::c_int) -> io::Result<Option<Duration>> {
+ let raw: libc::timeval = getsockopt(self, libc::SOL_SOCKET, kind)?;
+ if raw.tv_sec == 0 && raw.tv_usec == 0 {
+ Ok(None)
+ } else {
+ let sec = raw.tv_sec as u64;
+ let nsec = (raw.tv_usec as u32) * 1000;
+ Ok(Some(Duration::new(sec, nsec)))
+ }
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Write => libc::SHUT_WR,
+ Shutdown::Read => libc::SHUT_RD,
+ Shutdown::Both => libc::SHUT_RDWR,
+ };
+ cvt(unsafe { libc::shutdown(self.as_raw_fd(), how) })?;
+ Ok(())
+ }
+
+ pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+ let linger = libc::linger {
+ l_onoff: linger.is_some() as libc::c_int,
+ l_linger: linger.unwrap_or_default().as_secs() as libc::c_int,
+ };
+
+ setsockopt(self, libc::SOL_SOCKET, SO_LINGER, linger)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ let val: libc::linger = getsockopt(self, libc::SOL_SOCKET, SO_LINGER)?;
+
+ Ok((val.l_onoff != 0).then(|| Duration::from_secs(val.l_linger as u64)))
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ setsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY, nodelay as c_int)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY)?;
+ Ok(raw != 0)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
+ setsockopt(self, libc::SOL_SOCKET, libc::SO_PASSCRED, passcred as libc::c_int)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn passcred(&self) -> io::Result<bool> {
+ let passcred: libc::c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_PASSCRED)?;
+ Ok(passcred != 0)
+ }
+
+ #[cfg(target_os = "netbsd")]
+ pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
+ setsockopt(self, 0 as libc::c_int, libc::LOCAL_CREDS, passcred as libc::c_int)
+ }
+
+ #[cfg(target_os = "netbsd")]
+ pub fn passcred(&self) -> io::Result<bool> {
+ let passcred: libc::c_int = getsockopt(self, 0 as libc::c_int, libc::LOCAL_CREDS)?;
+ Ok(passcred != 0)
+ }
+
+ #[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ let mut nonblocking = nonblocking as libc::c_int;
+ cvt(unsafe { libc::ioctl(self.as_raw_fd(), libc::FIONBIO, &mut nonblocking) }).map(drop)
+ }
+
+ #[cfg(any(target_os = "solaris", target_os = "illumos"))]
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ // FIONBIO is inadequate for sockets on illumos/Solaris, so use the
+ // fcntl(F_[GS]ETFL)-based method provided by FileDesc instead.
+ self.0.set_nonblocking(nonblocking)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?;
+ if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
+ }
+
+ // This is used by sys_common code to abstract over Windows and Unix.
+ pub fn as_raw(&self) -> RawFd {
+ self.as_raw_fd()
+ }
+}
+
+impl AsInner<FileDesc> for Socket {
+ fn as_inner(&self) -> &FileDesc {
+ &self.0
+ }
+}
+
+impl IntoInner<FileDesc> for Socket {
+ fn into_inner(self) -> FileDesc {
+ self.0
+ }
+}
+
+impl FromInner<FileDesc> for Socket {
+ fn from_inner(file_desc: FileDesc) -> Self {
+ Self(file_desc)
+ }
+}
+
+impl AsFd for Socket {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl AsRawFd for Socket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Socket {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+impl FromRawFd for Socket {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
+ }
+}
+
+// In versions of glibc prior to 2.26, there's a bug where the DNS resolver
+// will cache the contents of /etc/resolv.conf, so changes to that file on disk
+// can be ignored by a long-running program. That can break DNS lookups on e.g.
+// laptops where the network comes and goes. See
+// https://sourceware.org/bugzilla/show_bug.cgi?id=984. Note however that some
+// distros including Debian have patched glibc to fix this for a long time.
+//
+// A workaround for this bug is to call the res_init libc function, to clear
+// the cached configs. Unfortunately, while we believe glibc's implementation
+// of res_init is thread-safe, we know that other implementations are not
+// (https://github.com/rust-lang/rust/issues/43592). Code here in libstd could
+// try to synchronize its res_init calls with a Mutex, but that wouldn't
+// protect programs that call into libc in other ways. So instead of calling
+// res_init unconditionally, we call it only when we detect we're linking
+// against glibc version < 2.26. (That is, when we both know its needed and
+// believe it's thread-safe).
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+fn on_resolver_failure() {
+ use crate::sys;
+
+ // If the version fails to parse, we treat it the same as "not glibc".
+ if let Some(version) = sys::os::glibc_version() {
+ if version < (2, 26) {
+ unsafe { libc::res_init() };
+ }
+ }
+}
+
+#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+fn on_resolver_failure() {}
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
new file mode 100644
index 000000000..46545a083
--- /dev/null
+++ b/library/std/src/sys/unix/os.rs
@@ -0,0 +1,680 @@
+//! Implementation of `std::os` functionality for unix systems
+
+#![allow(unused_imports)] // lots of cfg code here
+
+#[cfg(test)]
+mod tests;
+
+use crate::os::unix::prelude::*;
+
+use crate::error::Error as StdError;
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::iter;
+use crate::mem;
+use crate::path::{self, PathBuf};
+use crate::ptr;
+use crate::slice;
+use crate::str;
+use crate::sys::cvt;
+use crate::sys::fd;
+use crate::sys::memchr;
+use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard};
+use crate::vec;
+
+#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
+use crate::sys::weak::weak;
+
+use libc::{c_char, c_int, c_void};
+
+const TMPBUF_SZ: usize = 128;
+
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "redox")] {
+ const PATH_SEPARATOR: u8 = b';';
+ } else {
+ const PATH_SEPARATOR: u8 = b':';
+ }
+}
+
+extern "C" {
+ #[cfg(not(any(target_os = "dragonfly", target_os = "vxworks")))]
+ #[cfg_attr(
+ any(
+ target_os = "linux",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "l4re"
+ ),
+ link_name = "__errno_location"
+ )]
+ #[cfg_attr(
+ any(
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "android",
+ target_os = "redox",
+ target_env = "newlib"
+ ),
+ link_name = "__errno"
+ )]
+ #[cfg_attr(any(target_os = "solaris", target_os = "illumos"), link_name = "___errno")]
+ #[cfg_attr(
+ any(target_os = "macos", target_os = "ios", target_os = "freebsd", target_os = "watchos"),
+ link_name = "__error"
+ )]
+ #[cfg_attr(target_os = "haiku", link_name = "_errnop")]
+ fn errno_location() -> *mut c_int;
+}
+
+/// Returns the platform-specific value of errno
+#[cfg(not(any(target_os = "dragonfly", target_os = "vxworks")))]
+pub fn errno() -> i32 {
+ unsafe { (*errno_location()) as i32 }
+}
+
+/// Sets the platform-specific value of errno
+#[cfg(all(not(target_os = "dragonfly"), not(target_os = "vxworks")))] // needed for readdir and syscall!
+#[allow(dead_code)] // but not all target cfgs actually end up using it
+pub fn set_errno(e: i32) {
+ unsafe { *errno_location() = e as c_int }
+}
+
+#[cfg(target_os = "vxworks")]
+pub fn errno() -> i32 {
+ unsafe { libc::errnoGet() }
+}
+
+#[cfg(target_os = "dragonfly")]
+pub fn errno() -> i32 {
+ extern "C" {
+ #[thread_local]
+ static errno: c_int;
+ }
+
+ unsafe { errno as i32 }
+}
+
+#[cfg(target_os = "dragonfly")]
+#[allow(dead_code)]
+pub fn set_errno(e: i32) {
+ extern "C" {
+ #[thread_local]
+ static mut errno: c_int;
+ }
+
+ unsafe {
+ errno = e;
+ }
+}
+
+/// Gets a detailed string description for the given error number.
+pub fn error_string(errno: i32) -> String {
+ extern "C" {
+ #[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")]
+ fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int;
+ }
+
+ let mut buf = [0 as c_char; TMPBUF_SZ];
+
+ let p = buf.as_mut_ptr();
+ unsafe {
+ if strerror_r(errno as c_int, p, buf.len()) < 0 {
+ panic!("strerror_r failure");
+ }
+
+ let p = p as *const _;
+ str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned()
+ }
+}
+
+#[cfg(target_os = "espidf")]
+pub fn getcwd() -> io::Result<PathBuf> {
+ Ok(PathBuf::from("/"))
+}
+
+#[cfg(not(target_os = "espidf"))]
+pub fn getcwd() -> io::Result<PathBuf> {
+ let mut buf = Vec::with_capacity(512);
+ loop {
+ unsafe {
+ let ptr = buf.as_mut_ptr() as *mut libc::c_char;
+ if !libc::getcwd(ptr, buf.capacity()).is_null() {
+ let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len();
+ buf.set_len(len);
+ buf.shrink_to_fit();
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ } else {
+ let error = io::Error::last_os_error();
+ if error.raw_os_error() != Some(libc::ERANGE) {
+ return Err(error);
+ }
+ }
+
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity.
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ buf.reserve(1);
+ }
+ }
+}
+
+#[cfg(target_os = "espidf")]
+pub fn chdir(p: &path::Path) -> io::Result<()> {
+ super::unsupported::unsupported()
+}
+
+#[cfg(not(target_os = "espidf"))]
+pub fn chdir(p: &path::Path) -> io::Result<()> {
+ let p: &OsStr = p.as_ref();
+ let p = CString::new(p.as_bytes())?;
+ if unsafe { libc::chdir(p.as_ptr()) } != 0 {
+ return Err(io::Error::last_os_error());
+ }
+ Ok(())
+}
+
+pub struct SplitPaths<'a> {
+ iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>,
+}
+
+pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> {
+ fn bytes_to_path(b: &[u8]) -> PathBuf {
+ PathBuf::from(<OsStr as OsStrExt>::from_bytes(b))
+ }
+ fn is_separator(b: &u8) -> bool {
+ *b == PATH_SEPARATOR
+ }
+ let unparsed = unparsed.as_bytes();
+ SplitPaths {
+ iter: unparsed
+ .split(is_separator as fn(&u8) -> bool)
+ .map(bytes_to_path as fn(&[u8]) -> PathBuf),
+ }
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ let mut joined = Vec::new();
+
+ for (i, path) in paths.enumerate() {
+ let path = path.as_ref().as_bytes();
+ if i > 0 {
+ joined.push(PATH_SEPARATOR)
+ }
+ if path.contains(&PATH_SEPARATOR) {
+ return Err(JoinPathsError);
+ }
+ joined.extend_from_slice(path);
+ }
+ Ok(OsStringExt::from_vec(joined))
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "path segment contains separator `{}`", char::from(PATH_SEPARATOR))
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "failed to join paths"
+ }
+}
+
+#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsafe {
+ let mut mib = [
+ libc::CTL_KERN as c_int,
+ libc::KERN_PROC as c_int,
+ libc::KERN_PROC_PATHNAME as c_int,
+ -1 as c_int,
+ ];
+ let mut sz = 0;
+ cvt(libc::sysctl(
+ mib.as_mut_ptr(),
+ mib.len() as libc::c_uint,
+ ptr::null_mut(),
+ &mut sz,
+ ptr::null_mut(),
+ 0,
+ ))?;
+ if sz == 0 {
+ return Err(io::Error::last_os_error());
+ }
+ let mut v: Vec<u8> = Vec::with_capacity(sz);
+ cvt(libc::sysctl(
+ mib.as_mut_ptr(),
+ mib.len() as libc::c_uint,
+ v.as_mut_ptr() as *mut libc::c_void,
+ &mut sz,
+ ptr::null_mut(),
+ 0,
+ ))?;
+ if sz == 0 {
+ return Err(io::Error::last_os_error());
+ }
+ v.set_len(sz - 1); // chop off trailing NUL
+ Ok(PathBuf::from(OsString::from_vec(v)))
+ }
+}
+
+#[cfg(target_os = "netbsd")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ fn sysctl() -> io::Result<PathBuf> {
+ unsafe {
+ let mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, -1, libc::KERN_PROC_PATHNAME];
+ let mut path_len: usize = 0;
+ cvt(libc::sysctl(
+ mib.as_ptr(),
+ mib.len() as libc::c_uint,
+ ptr::null_mut(),
+ &mut path_len,
+ ptr::null(),
+ 0,
+ ))?;
+ if path_len <= 1 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "KERN_PROC_PATHNAME sysctl returned zero-length string",
+ ));
+ }
+ let mut path: Vec<u8> = Vec::with_capacity(path_len);
+ cvt(libc::sysctl(
+ mib.as_ptr(),
+ mib.len() as libc::c_uint,
+ path.as_ptr() as *mut libc::c_void,
+ &mut path_len,
+ ptr::null(),
+ 0,
+ ))?;
+ path.set_len(path_len - 1); // chop off NUL
+ Ok(PathBuf::from(OsString::from_vec(path)))
+ }
+ }
+ fn procfs() -> io::Result<PathBuf> {
+ let curproc_exe = path::Path::new("/proc/curproc/exe");
+ if curproc_exe.is_file() {
+ return crate::fs::read_link(curproc_exe);
+ }
+ Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "/proc/curproc/exe doesn't point to regular file.",
+ ))
+ }
+ sysctl().or_else(|_| procfs())
+}
+
+#[cfg(target_os = "openbsd")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsafe {
+ let mut mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, libc::getpid(), libc::KERN_PROC_ARGV];
+ let mib = mib.as_mut_ptr();
+ let mut argv_len = 0;
+ cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len, ptr::null_mut(), 0))?;
+ let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize);
+ cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _, &mut argv_len, ptr::null_mut(), 0))?;
+ argv.set_len(argv_len as usize);
+ if argv[0].is_null() {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "no current exe available",
+ ));
+ }
+ let argv0 = CStr::from_ptr(argv[0]).to_bytes();
+ if argv0[0] == b'.' || argv0.iter().any(|b| *b == b'/') {
+ crate::fs::canonicalize(OsStr::from_bytes(argv0))
+ } else {
+ Ok(PathBuf::from(OsStr::from_bytes(argv0)))
+ }
+ }
+}
+
+#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))]
+pub fn current_exe() -> io::Result<PathBuf> {
+ match crate::fs::read_link("/proc/self/exe") {
+ Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "no /proc/self/exe available. Is /proc mounted?",
+ )),
+ other => other,
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsafe {
+ let mut sz: u32 = 0;
+ libc::_NSGetExecutablePath(ptr::null_mut(), &mut sz);
+ if sz == 0 {
+ return Err(io::Error::last_os_error());
+ }
+ let mut v: Vec<u8> = Vec::with_capacity(sz as usize);
+ let err = libc::_NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz);
+ if err != 0 {
+ return Err(io::Error::last_os_error());
+ }
+ v.set_len(sz as usize - 1); // chop off trailing NUL
+ Ok(PathBuf::from(OsString::from_vec(v)))
+ }
+}
+
+#[cfg(any(target_os = "solaris", target_os = "illumos"))]
+pub fn current_exe() -> io::Result<PathBuf> {
+ if let Ok(path) = crate::fs::read_link("/proc/self/path/a.out") {
+ Ok(path)
+ } else {
+ unsafe {
+ let path = libc::getexecname();
+ if path.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ let filename = CStr::from_ptr(path).to_bytes();
+ let path = PathBuf::from(<OsStr as OsStrExt>::from_bytes(filename));
+
+ // Prepend a current working directory to the path if
+ // it doesn't contain an absolute pathname.
+ if filename[0] == b'/' { Ok(path) } else { getcwd().map(|cwd| cwd.join(path)) }
+ }
+ }
+ }
+}
+
+#[cfg(target_os = "haiku")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsafe {
+ let mut info: mem::MaybeUninit<libc::image_info> = mem::MaybeUninit::uninit();
+ let mut cookie: i32 = 0;
+ // the executable can be found at team id 0
+ let result = libc::_get_next_image_info(
+ 0,
+ &mut cookie,
+ info.as_mut_ptr(),
+ mem::size_of::<libc::image_info>(),
+ );
+ if result != 0 {
+ use crate::io::ErrorKind;
+ Err(io::const_io_error!(ErrorKind::Uncategorized, "Error getting executable path"))
+ } else {
+ let name = CStr::from_ptr((*info.as_ptr()).name.as_ptr()).to_bytes();
+ Ok(PathBuf::from(OsStr::from_bytes(name)))
+ }
+ }
+}
+
+#[cfg(target_os = "redox")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ crate::fs::read_to_string("sys:exe").map(PathBuf::from)
+}
+
+#[cfg(target_os = "l4re")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ use crate::io::ErrorKind;
+ Err(io::const_io_error!(ErrorKind::Unsupported, "Not yet implemented!"))
+}
+
+#[cfg(target_os = "vxworks")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ #[cfg(test)]
+ use realstd::env;
+
+ #[cfg(not(test))]
+ use crate::env;
+
+ let exe_path = env::args().next().unwrap();
+ let path = path::Path::new(&exe_path);
+ path.canonicalize()
+}
+
+#[cfg(any(target_os = "espidf", target_os = "horizon"))]
+pub fn current_exe() -> io::Result<PathBuf> {
+ super::unsupported::unsupported()
+}
+
+#[cfg(target_os = "fuchsia")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ use crate::io::ErrorKind;
+
+ #[cfg(test)]
+ use realstd::env;
+
+ #[cfg(not(test))]
+ use crate::env;
+
+ let exe_path = env::args().next().ok_or(io::const_io_error!(
+ ErrorKind::Uncategorized,
+ "an executable path was not found because no arguments were provided through argv"
+ ))?;
+ let path = PathBuf::from(exe_path);
+
+ // Prepend the current working directory to the path if it's not absolute.
+ if !path.is_absolute() { getcwd().map(|cwd| cwd.join(path)) } else { Ok(path) }
+}
+
+pub struct Env {
+ iter: vec::IntoIter<(OsString, OsString)>,
+}
+
+impl !Send for Env {}
+impl !Sync for Env {}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[cfg(target_os = "macos")]
+pub unsafe fn environ() -> *mut *const *const c_char {
+ libc::_NSGetEnviron() as *mut *const *const c_char
+}
+
+#[cfg(not(target_os = "macos"))]
+pub unsafe fn environ() -> *mut *const *const c_char {
+ extern "C" {
+ static mut environ: *const *const c_char;
+ }
+ ptr::addr_of_mut!(environ)
+}
+
+static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+
+pub fn env_read_lock() -> StaticRwLockReadGuard {
+ ENV_LOCK.read()
+}
+
+/// Returns a vector of (variable, value) byte-vector pairs for all the
+/// environment variables of the current process.
+pub fn env() -> Env {
+ unsafe {
+ let _guard = env_read_lock();
+ let mut environ = *environ();
+ let mut result = Vec::new();
+ if !environ.is_null() {
+ while !(*environ).is_null() {
+ if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) {
+ result.push(key_value);
+ }
+ environ = environ.add(1);
+ }
+ }
+ return Env { iter: result.into_iter() };
+ }
+
+ fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
+ // Strategy (copied from glibc): Variable name and value are separated
+ // by an ASCII equals sign '='. Since a variable name must not be
+ // empty, allow variable names starting with an equals sign. Skip all
+ // malformed lines.
+ if input.is_empty() {
+ return None;
+ }
+ let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
+ pos.map(|p| {
+ (
+ OsStringExt::from_vec(input[..p].to_vec()),
+ OsStringExt::from_vec(input[p + 1..].to_vec()),
+ )
+ })
+ }
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ // environment variables with a nul byte can't be set, so their value is
+ // always None as well
+ let k = CString::new(k.as_bytes()).ok()?;
+ unsafe {
+ let _guard = env_read_lock();
+ let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
+ }
+ }
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ let k = CString::new(k.as_bytes())?;
+ let v = CString::new(v.as_bytes())?;
+
+ unsafe {
+ let _guard = ENV_LOCK.write();
+ cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
+ }
+}
+
+pub fn unsetenv(n: &OsStr) -> io::Result<()> {
+ let nbuf = CString::new(n.as_bytes())?;
+
+ unsafe {
+ let _guard = ENV_LOCK.write();
+ cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
+ }
+}
+
+#[cfg(not(target_os = "espidf"))]
+pub fn page_size() -> usize {
+ unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
+}
+
+pub fn temp_dir() -> PathBuf {
+ crate::env::var_os("TMPDIR").map(PathBuf::from).unwrap_or_else(|| {
+ if cfg!(target_os = "android") {
+ PathBuf::from("/data/local/tmp")
+ } else {
+ PathBuf::from("/tmp")
+ }
+ })
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from);
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "emscripten",
+ target_os = "redox",
+ target_os = "vxworks",
+ target_os = "espidf",
+ target_os = "horizon"
+ ))]
+ unsafe fn fallback() -> Option<OsString> {
+ None
+ }
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "emscripten",
+ target_os = "redox",
+ target_os = "vxworks",
+ target_os = "espidf",
+ target_os = "horizon"
+ )))]
+ unsafe fn fallback() -> Option<OsString> {
+ let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) {
+ n if n < 0 => 512 as usize,
+ n => n as usize,
+ };
+ let mut buf = Vec::with_capacity(amt);
+ let mut passwd: libc::passwd = mem::zeroed();
+ let mut result = ptr::null_mut();
+ match libc::getpwuid_r(
+ libc::getuid(),
+ &mut passwd,
+ buf.as_mut_ptr(),
+ buf.capacity(),
+ &mut result,
+ ) {
+ 0 if !result.is_null() => {
+ let ptr = passwd.pw_dir as *const _;
+ let bytes = CStr::from_ptr(ptr).to_bytes().to_vec();
+ Some(OsStringExt::from_vec(bytes))
+ }
+ _ => None,
+ }
+ }
+}
+
+pub fn exit(code: i32) -> ! {
+ unsafe { libc::exit(code as c_int) }
+}
+
+pub fn getpid() -> u32 {
+ unsafe { libc::getpid() as u32 }
+}
+
+pub fn getppid() -> u32 {
+ unsafe { libc::getppid() as u32 }
+}
+
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+pub fn glibc_version() -> Option<(usize, usize)> {
+ extern "C" {
+ fn gnu_get_libc_version() -> *const libc::c_char;
+ }
+ let version_cstr = unsafe { CStr::from_ptr(gnu_get_libc_version()) };
+ if let Ok(version_str) = version_cstr.to_str() {
+ parse_glibc_version(version_str)
+ } else {
+ None
+ }
+}
+
+// Returns Some((major, minor)) if the string is a valid "x.y" version,
+// ignoring any extra dot-separated parts. Otherwise return None.
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+fn parse_glibc_version(version: &str) -> Option<(usize, usize)> {
+ let mut parsed_ints = version.split('.').map(str::parse::<usize>).fuse();
+ match (parsed_ints.next(), parsed_ints.next()) {
+ (Some(Ok(major)), Some(Ok(minor))) => Some((major, minor)),
+ _ => None,
+ }
+}
diff --git a/library/std/src/sys/unix/os/tests.rs b/library/std/src/sys/unix/os/tests.rs
new file mode 100644
index 000000000..efc29955b
--- /dev/null
+++ b/library/std/src/sys/unix/os/tests.rs
@@ -0,0 +1,23 @@
+#[test]
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+fn test_glibc_version() {
+ // This mostly just tests that the weak linkage doesn't panic wildly...
+ super::glibc_version();
+}
+
+#[test]
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+fn test_parse_glibc_version() {
+ let cases = [
+ ("0.0", Some((0, 0))),
+ ("01.+2", Some((1, 2))),
+ ("3.4.5.six", Some((3, 4))),
+ ("1", None),
+ ("1.-2", None),
+ ("1.foo", None),
+ ("foo.1", None),
+ ];
+ for &(version_str, parsed) in cases.iter() {
+ assert_eq!(parsed, super::parse_glibc_version(version_str));
+ }
+}
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
new file mode 100644
index 000000000..ccbc18224
--- /dev/null
+++ b/library/std/src/sys/unix/os_str.rs
@@ -0,0 +1,266 @@
+//! The underlying OsString/OsStr implementation on Unix and many other
+//! systems: just a `Vec<u8>`/`[u8]`.
+
+use crate::borrow::Cow;
+use crate::collections::TryReserveError;
+use crate::fmt;
+use crate::fmt::Write;
+use crate::mem;
+use crate::rc::Rc;
+use crate::str;
+use crate::sync::Arc;
+use crate::sys_common::{AsInner, IntoInner};
+
+use core::str::lossy::{Utf8Lossy, Utf8LossyChunk};
+
+#[cfg(test)]
+#[path = "../unix/os_str/tests.rs"]
+mod tests;
+
+#[derive(Hash)]
+#[repr(transparent)]
+pub struct Buf {
+ pub inner: Vec<u8>,
+}
+
+#[repr(transparent)]
+pub struct Slice {
+ pub inner: [u8],
+}
+
+impl fmt::Debug for Slice {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Writes out a valid unicode string with the correct escape sequences
+
+ formatter.write_str("\"")?;
+ for Utf8LossyChunk { valid, broken } in Utf8Lossy::from_bytes(&self.inner).chunks() {
+ for c in valid.chars().flat_map(|c| c.escape_debug()) {
+ formatter.write_char(c)?
+ }
+
+ for b in broken {
+ write!(formatter, "\\x{:02X}", b)?;
+ }
+ }
+ formatter.write_str("\"")
+ }
+}
+
+impl fmt::Display for Slice {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&Utf8Lossy::from_bytes(&self.inner), formatter)
+ }
+}
+
+impl fmt::Debug for Buf {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.as_slice(), formatter)
+ }
+}
+
+impl fmt::Display for Buf {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.as_slice(), formatter)
+ }
+}
+
+impl Clone for Buf {
+ #[inline]
+ fn clone(&self) -> Self {
+ Buf { inner: self.inner.clone() }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ self.inner.clone_from(&source.inner)
+ }
+}
+
+impl IntoInner<Vec<u8>> for Buf {
+ fn into_inner(self) -> Vec<u8> {
+ self.inner
+ }
+}
+
+impl AsInner<[u8]> for Buf {
+ fn as_inner(&self) -> &[u8] {
+ &self.inner
+ }
+}
+
+impl Buf {
+ pub fn from_string(s: String) -> Buf {
+ Buf { inner: s.into_bytes() }
+ }
+
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Buf {
+ Buf { inner: Vec::with_capacity(capacity) }
+ }
+
+ #[inline]
+ pub fn clear(&mut self) {
+ self.inner.clear()
+ }
+
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ self.inner.reserve(additional)
+ }
+
+ #[inline]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve(additional)
+ }
+
+ #[inline]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.inner.reserve_exact(additional)
+ }
+
+ #[inline]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve_exact(additional)
+ }
+
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.inner.shrink_to_fit()
+ }
+
+ #[inline]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.inner.shrink_to(min_capacity)
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &Slice {
+ // SAFETY: Slice just wraps [u8],
+ // and &*self.inner is &[u8], therefore
+ // transmuting &[u8] to &Slice is safe.
+ unsafe { mem::transmute(&*self.inner) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut Slice {
+ // SAFETY: Slice just wraps [u8],
+ // and &mut *self.inner is &mut [u8], therefore
+ // transmuting &mut [u8] to &mut Slice is safe.
+ unsafe { mem::transmute(&mut *self.inner) }
+ }
+
+ pub fn into_string(self) -> Result<String, Buf> {
+ String::from_utf8(self.inner).map_err(|p| Buf { inner: p.into_bytes() })
+ }
+
+ pub fn push_slice(&mut self, s: &Slice) {
+ self.inner.extend_from_slice(&s.inner)
+ }
+
+ #[inline]
+ pub fn into_box(self) -> Box<Slice> {
+ unsafe { mem::transmute(self.inner.into_boxed_slice()) }
+ }
+
+ #[inline]
+ pub fn from_box(boxed: Box<Slice>) -> Buf {
+ let inner: Box<[u8]> = unsafe { mem::transmute(boxed) };
+ Buf { inner: inner.into_vec() }
+ }
+
+ #[inline]
+ pub fn into_arc(&self) -> Arc<Slice> {
+ self.as_slice().into_arc()
+ }
+
+ #[inline]
+ pub fn into_rc(&self) -> Rc<Slice> {
+ self.as_slice().into_rc()
+ }
+}
+
+impl Slice {
+ #[inline]
+ fn from_u8_slice(s: &[u8]) -> &Slice {
+ unsafe { mem::transmute(s) }
+ }
+
+ #[inline]
+ pub fn from_str(s: &str) -> &Slice {
+ Slice::from_u8_slice(s.as_bytes())
+ }
+
+ pub fn to_str(&self) -> Option<&str> {
+ str::from_utf8(&self.inner).ok()
+ }
+
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ String::from_utf8_lossy(&self.inner)
+ }
+
+ pub fn to_owned(&self) -> Buf {
+ Buf { inner: self.inner.to_vec() }
+ }
+
+ pub fn clone_into(&self, buf: &mut Buf) {
+ self.inner.clone_into(&mut buf.inner)
+ }
+
+ #[inline]
+ pub fn into_box(&self) -> Box<Slice> {
+ let boxed: Box<[u8]> = self.inner.into();
+ unsafe { mem::transmute(boxed) }
+ }
+
+ pub fn empty_box() -> Box<Slice> {
+ let boxed: Box<[u8]> = Default::default();
+ unsafe { mem::transmute(boxed) }
+ }
+
+ #[inline]
+ pub fn into_arc(&self) -> Arc<Slice> {
+ let arc: Arc<[u8]> = Arc::from(&self.inner);
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Slice) }
+ }
+
+ #[inline]
+ pub fn into_rc(&self) -> Rc<Slice> {
+ let rc: Rc<[u8]> = Rc::from(&self.inner);
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Slice) }
+ }
+
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ self.inner.make_ascii_lowercase()
+ }
+
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ self.inner.make_ascii_uppercase()
+ }
+
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Buf {
+ Buf { inner: self.inner.to_ascii_lowercase() }
+ }
+
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Buf {
+ Buf { inner: self.inner.to_ascii_uppercase() }
+ }
+
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ self.inner.is_ascii()
+ }
+
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
+ self.inner.eq_ignore_ascii_case(&other.inner)
+ }
+}
diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs
new file mode 100644
index 000000000..213277f01
--- /dev/null
+++ b/library/std/src/sys/unix/os_str/tests.rs
@@ -0,0 +1,10 @@
+use super::*;
+
+#[test]
+fn slice_debug_output() {
+ let input = Slice::from_u8_slice(b"\xF0hello,\tworld");
+ let expected = r#""\xF0hello,\tworld""#;
+ let output = format!("{input:?}");
+
+ assert_eq!(output, expected);
+}
diff --git a/library/std/src/sys/unix/path.rs b/library/std/src/sys/unix/path.rs
new file mode 100644
index 000000000..a98a69e2d
--- /dev/null
+++ b/library/std/src/sys/unix/path.rs
@@ -0,0 +1,63 @@
+use crate::env;
+use crate::ffi::OsStr;
+use crate::io;
+use crate::path::{Path, PathBuf, Prefix};
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'/'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'/'
+}
+
+#[inline]
+pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> {
+ None
+}
+
+pub const MAIN_SEP_STR: &str = "/";
+pub const MAIN_SEP: char = '/';
+
+/// Make a POSIX path absolute without changing its semantics.
+pub(crate) fn absolute(path: &Path) -> io::Result<PathBuf> {
+ // This is mostly a wrapper around collecting `Path::components`, with
+ // exceptions made where this conflicts with the POSIX specification.
+ // See 4.13 Pathname Resolution, IEEE Std 1003.1-2017
+ // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13
+
+ // Get the components, skipping the redundant leading "." component if it exists.
+ let mut components = path.strip_prefix(".").unwrap_or(path).components();
+ let path_os = path.as_os_str().bytes();
+
+ let mut normalized = if path.is_absolute() {
+ // "If a pathname begins with two successive <slash> characters, the
+ // first component following the leading <slash> characters may be
+ // interpreted in an implementation-defined manner, although more than
+ // two leading <slash> characters shall be treated as a single <slash>
+ // character."
+ if path_os.starts_with(b"//") && !path_os.starts_with(b"///") {
+ components.next();
+ PathBuf::from("//")
+ } else {
+ PathBuf::new()
+ }
+ } else {
+ env::current_dir()?
+ };
+ normalized.extend(components);
+
+ // "Interfaces using pathname resolution may specify additional constraints
+ // when a pathname that does not name an existing directory contains at
+ // least one non- <slash> character and contains one or more trailing
+ // <slash> characters".
+ // A trailing <slash> is also meaningful if "a symbolic link is
+ // encountered during pathname resolution".
+ if path_os.ends_with(b"/") {
+ normalized.push("");
+ }
+
+ Ok(normalized)
+}
diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs
new file mode 100644
index 000000000..a56c275c9
--- /dev/null
+++ b/library/std/src/sys/unix/pipe.rs
@@ -0,0 +1,151 @@
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem;
+use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+use crate::sys::fd::FileDesc;
+use crate::sys::{cvt, cvt_r};
+use crate::sys_common::IntoInner;
+
+////////////////////////////////////////////////////////////////////////////////
+// Anonymous pipes
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct AnonPipe(FileDesc);
+
+pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
+ let mut fds = [0; 2];
+
+ // The only known way right now to create atomically set the CLOEXEC flag is
+ // to use the `pipe2` syscall. This was added to Linux in 2.6.27, glibc 2.9
+ // and musl 0.9.3, and some other targets also have it.
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "redox"
+ ))] {
+ unsafe {
+ cvt(libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC))?;
+ Ok((AnonPipe(FileDesc::from_raw_fd(fds[0])), AnonPipe(FileDesc::from_raw_fd(fds[1]))))
+ }
+ } else {
+ unsafe {
+ cvt(libc::pipe(fds.as_mut_ptr()))?;
+
+ let fd0 = FileDesc::from_raw_fd(fds[0]);
+ let fd1 = FileDesc::from_raw_fd(fds[1]);
+ fd0.set_cloexec()?;
+ fd1.set_cloexec()?;
+ Ok((AnonPipe(fd0), AnonPipe(fd1)))
+ }
+ }
+ }
+}
+
+impl AnonPipe {
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.0.is_read_vectored()
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.0.is_write_vectored()
+ }
+}
+
+impl IntoInner<FileDesc> for AnonPipe {
+ fn into_inner(self) -> FileDesc {
+ self.0
+ }
+}
+
+pub fn read2(p1: AnonPipe, v1: &mut Vec<u8>, p2: AnonPipe, v2: &mut Vec<u8>) -> io::Result<()> {
+ // Set both pipes into nonblocking mode as we're gonna be reading from both
+ // in the `select` loop below, and we wouldn't want one to block the other!
+ let p1 = p1.into_inner();
+ let p2 = p2.into_inner();
+ p1.set_nonblocking(true)?;
+ p2.set_nonblocking(true)?;
+
+ let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() };
+ fds[0].fd = p1.as_raw_fd();
+ fds[0].events = libc::POLLIN;
+ fds[1].fd = p2.as_raw_fd();
+ fds[1].events = libc::POLLIN;
+ loop {
+ // wait for either pipe to become readable using `poll`
+ cvt_r(|| unsafe { libc::poll(fds.as_mut_ptr(), 2, -1) })?;
+
+ if fds[0].revents != 0 && read(&p1, v1)? {
+ p2.set_nonblocking(false)?;
+ return p2.read_to_end(v2).map(drop);
+ }
+ if fds[1].revents != 0 && read(&p2, v2)? {
+ p1.set_nonblocking(false)?;
+ return p1.read_to_end(v1).map(drop);
+ }
+ }
+
+ // Read as much as we can from each pipe, ignoring EWOULDBLOCK or
+ // EAGAIN. If we hit EOF, then this will happen because the underlying
+ // reader will return Ok(0), in which case we'll see `Ok` ourselves. In
+ // this case we flip the other fd back into blocking mode and read
+ // whatever's leftover on that file descriptor.
+ fn read(fd: &FileDesc, dst: &mut Vec<u8>) -> Result<bool, io::Error> {
+ match fd.read_to_end(dst) {
+ Ok(_) => Ok(true),
+ Err(e) => {
+ if e.raw_os_error() == Some(libc::EWOULDBLOCK)
+ || e.raw_os_error() == Some(libc::EAGAIN)
+ {
+ Ok(false)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+}
+
+impl AsRawFd for AnonPipe {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+impl AsFd for AnonPipe {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl IntoRawFd for AnonPipe {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+impl FromRawFd for AnonPipe {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
+ }
+}
diff --git a/library/std/src/sys/unix/process/mod.rs b/library/std/src/sys/unix/process/mod.rs
new file mode 100644
index 000000000..3701510f3
--- /dev/null
+++ b/library/std/src/sys/unix/process/mod.rs
@@ -0,0 +1,24 @@
+pub use self::process_common::{Command, CommandArgs, ExitCode, Stdio, StdioPipes};
+pub use self::process_inner::{ExitStatus, ExitStatusError, Process};
+pub use crate::ffi::OsString as EnvKey;
+pub use crate::sys_common::process::CommandEnvs;
+
+#[cfg_attr(any(target_os = "espidf", target_os = "horizon"), allow(unused))]
+mod process_common;
+
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "fuchsia")] {
+ #[path = "process_fuchsia.rs"]
+ mod process_inner;
+ mod zircon;
+ } else if #[cfg(target_os = "vxworks")] {
+ #[path = "process_vxworks.rs"]
+ mod process_inner;
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ #[path = "process_unsupported.rs"]
+ mod process_inner;
+ } else {
+ #[path = "process_unix.rs"]
+ mod process_inner;
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
new file mode 100644
index 000000000..bca1b65a7
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -0,0 +1,523 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::os::unix::prelude::*;
+
+use crate::collections::BTreeMap;
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::path::Path;
+use crate::ptr;
+use crate::sys::fd::FileDesc;
+use crate::sys::fs::File;
+use crate::sys::pipe::{self, AnonPipe};
+use crate::sys_common::process::{CommandEnv, CommandEnvs};
+use crate::sys_common::IntoInner;
+
+#[cfg(not(target_os = "fuchsia"))]
+use crate::sys::fs::OpenOptions;
+
+use libc::{c_char, c_int, gid_t, pid_t, uid_t, EXIT_FAILURE, EXIT_SUCCESS};
+
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "fuchsia")] {
+ // fuchsia doesn't have /dev/null
+ } else if #[cfg(target_os = "redox")] {
+ const DEV_NULL: &str = "null:\0";
+ } else if #[cfg(target_os = "vxworks")] {
+ const DEV_NULL: &str = "/null\0";
+ } else {
+ const DEV_NULL: &str = "/dev/null\0";
+ }
+}
+
+// Android with api less than 21 define sig* functions inline, so it is not
+// available for dynamic link. Implementing sigemptyset and sigaddset allow us
+// to support older Android version (independent of libc version).
+// The following implementations are based on
+// https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "android")] {
+ pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int {
+ set.write_bytes(0u8, 1);
+ return 0;
+ }
+ #[allow(dead_code)]
+ pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int {
+ use crate::{slice, mem};
+
+ let raw = slice::from_raw_parts_mut(set as *mut u8, mem::size_of::<libc::sigset_t>());
+ let bit = (signum - 1) as usize;
+ raw[bit / 8] |= 1 << (bit % 8);
+ return 0;
+ }
+ } else {
+ pub use libc::{sigemptyset, sigaddset};
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct Command {
+ program: CString,
+ args: Vec<CString>,
+ /// Exactly what will be passed to `execvp`.
+ ///
+ /// First element is a pointer to `program`, followed by pointers to
+ /// `args`, followed by a `null`. Be careful when modifying `program` or
+ /// `args` to properly update this as well.
+ argv: Argv,
+ env: CommandEnv,
+
+ cwd: Option<CString>,
+ uid: Option<uid_t>,
+ gid: Option<gid_t>,
+ saw_nul: bool,
+ closures: Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>>,
+ groups: Option<Box<[gid_t]>>,
+ stdin: Option<Stdio>,
+ stdout: Option<Stdio>,
+ stderr: Option<Stdio>,
+ #[cfg(target_os = "linux")]
+ create_pidfd: bool,
+ pgroup: Option<pid_t>,
+}
+
+// Create a new type for argv, so that we can make it `Send` and `Sync`
+struct Argv(Vec<*const c_char>);
+
+// It is safe to make `Argv` `Send` and `Sync`, because it contains
+// pointers to memory owned by `Command.args`
+unsafe impl Send for Argv {}
+unsafe impl Sync for Argv {}
+
+// passed back to std::process with the pipes connected to the child, if any
+// were requested
+pub struct StdioPipes {
+ pub stdin: Option<AnonPipe>,
+ pub stdout: Option<AnonPipe>,
+ pub stderr: Option<AnonPipe>,
+}
+
+// passed to do_exec() with configuration of what the child stdio should look
+// like
+pub struct ChildPipes {
+ pub stdin: ChildStdio,
+ pub stdout: ChildStdio,
+ pub stderr: ChildStdio,
+}
+
+pub enum ChildStdio {
+ Inherit,
+ Explicit(c_int),
+ Owned(FileDesc),
+
+ // On Fuchsia, null stdio is the default, so we simply don't specify
+ // any actions at the time of spawning.
+ #[cfg(target_os = "fuchsia")]
+ Null,
+}
+
+pub enum Stdio {
+ Inherit,
+ Null,
+ MakePipe,
+ Fd(FileDesc),
+}
+
+impl Command {
+ #[cfg(not(target_os = "linux"))]
+ pub fn new(program: &OsStr) -> Command {
+ let mut saw_nul = false;
+ let program = os2c(program, &mut saw_nul);
+ Command {
+ argv: Argv(vec![program.as_ptr(), ptr::null()]),
+ args: vec![program.clone()],
+ program,
+ env: Default::default(),
+ cwd: None,
+ uid: None,
+ gid: None,
+ saw_nul,
+ closures: Vec::new(),
+ groups: None,
+ stdin: None,
+ stdout: None,
+ stderr: None,
+ pgroup: None,
+ }
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn new(program: &OsStr) -> Command {
+ let mut saw_nul = false;
+ let program = os2c(program, &mut saw_nul);
+ Command {
+ argv: Argv(vec![program.as_ptr(), ptr::null()]),
+ args: vec![program.clone()],
+ program,
+ env: Default::default(),
+ cwd: None,
+ uid: None,
+ gid: None,
+ saw_nul,
+ closures: Vec::new(),
+ groups: None,
+ stdin: None,
+ stdout: None,
+ stderr: None,
+ create_pidfd: false,
+ pgroup: None,
+ }
+ }
+
+ pub fn set_arg_0(&mut self, arg: &OsStr) {
+ // Set a new arg0
+ let arg = os2c(arg, &mut self.saw_nul);
+ debug_assert!(self.argv.0.len() > 1);
+ self.argv.0[0] = arg.as_ptr();
+ self.args[0] = arg;
+ }
+
+ pub fn arg(&mut self, arg: &OsStr) {
+ // Overwrite the trailing null pointer in `argv` and then add a new null
+ // pointer.
+ let arg = os2c(arg, &mut self.saw_nul);
+ self.argv.0[self.args.len()] = arg.as_ptr();
+ self.argv.0.push(ptr::null());
+
+ // Also make sure we keep track of the owned value to schedule a
+ // destructor for this memory.
+ self.args.push(arg);
+ }
+
+ pub fn cwd(&mut self, dir: &OsStr) {
+ self.cwd = Some(os2c(dir, &mut self.saw_nul));
+ }
+ pub fn uid(&mut self, id: uid_t) {
+ self.uid = Some(id);
+ }
+ pub fn gid(&mut self, id: gid_t) {
+ self.gid = Some(id);
+ }
+ pub fn groups(&mut self, groups: &[gid_t]) {
+ self.groups = Some(Box::from(groups));
+ }
+ pub fn pgroup(&mut self, pgroup: pid_t) {
+ self.pgroup = Some(pgroup);
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn create_pidfd(&mut self, val: bool) {
+ self.create_pidfd = val;
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ #[allow(dead_code)]
+ pub fn get_create_pidfd(&self) -> bool {
+ false
+ }
+
+ #[cfg(target_os = "linux")]
+ pub fn get_create_pidfd(&self) -> bool {
+ self.create_pidfd
+ }
+
+ pub fn saw_nul(&self) -> bool {
+ self.saw_nul
+ }
+
+ pub fn get_program(&self) -> &OsStr {
+ OsStr::from_bytes(self.program.as_bytes())
+ }
+
+ pub fn get_args(&self) -> CommandArgs<'_> {
+ let mut iter = self.args.iter();
+ iter.next();
+ CommandArgs { iter }
+ }
+
+ pub fn get_envs(&self) -> CommandEnvs<'_> {
+ self.env.iter()
+ }
+
+ pub fn get_current_dir(&self) -> Option<&Path> {
+ self.cwd.as_ref().map(|cs| Path::new(OsStr::from_bytes(cs.as_bytes())))
+ }
+
+ pub fn get_argv(&self) -> &Vec<*const c_char> {
+ &self.argv.0
+ }
+
+ pub fn get_program_cstr(&self) -> &CStr {
+ &*self.program
+ }
+
+ #[allow(dead_code)]
+ pub fn get_cwd(&self) -> &Option<CString> {
+ &self.cwd
+ }
+ #[allow(dead_code)]
+ pub fn get_uid(&self) -> Option<uid_t> {
+ self.uid
+ }
+ #[allow(dead_code)]
+ pub fn get_gid(&self) -> Option<gid_t> {
+ self.gid
+ }
+ #[allow(dead_code)]
+ pub fn get_groups(&self) -> Option<&[gid_t]> {
+ self.groups.as_deref()
+ }
+ #[allow(dead_code)]
+ pub fn get_pgroup(&self) -> Option<pid_t> {
+ self.pgroup
+ }
+
+ pub fn get_closures(&mut self) -> &mut Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>> {
+ &mut self.closures
+ }
+
+ pub unsafe fn pre_exec(&mut self, f: Box<dyn FnMut() -> io::Result<()> + Send + Sync>) {
+ self.closures.push(f);
+ }
+
+ pub fn stdin(&mut self, stdin: Stdio) {
+ self.stdin = Some(stdin);
+ }
+
+ pub fn stdout(&mut self, stdout: Stdio) {
+ self.stdout = Some(stdout);
+ }
+
+ pub fn stderr(&mut self, stderr: Stdio) {
+ self.stderr = Some(stderr);
+ }
+
+ pub fn env_mut(&mut self) -> &mut CommandEnv {
+ &mut self.env
+ }
+
+ pub fn capture_env(&mut self) -> Option<CStringArray> {
+ let maybe_env = self.env.capture_if_changed();
+ maybe_env.map(|env| construct_envp(env, &mut self.saw_nul))
+ }
+
+ #[allow(dead_code)]
+ pub fn env_saw_path(&self) -> bool {
+ self.env.have_changed_path()
+ }
+
+ #[allow(dead_code)]
+ pub fn program_is_path(&self) -> bool {
+ self.program.to_bytes().contains(&b'/')
+ }
+
+ pub fn setup_io(
+ &self,
+ default: Stdio,
+ needs_stdin: bool,
+ ) -> io::Result<(StdioPipes, ChildPipes)> {
+ let null = Stdio::Null;
+ let default_stdin = if needs_stdin { &default } else { &null };
+ let stdin = self.stdin.as_ref().unwrap_or(default_stdin);
+ let stdout = self.stdout.as_ref().unwrap_or(&default);
+ let stderr = self.stderr.as_ref().unwrap_or(&default);
+ let (their_stdin, our_stdin) = stdin.to_child_stdio(true)?;
+ let (their_stdout, our_stdout) = stdout.to_child_stdio(false)?;
+ let (their_stderr, our_stderr) = stderr.to_child_stdio(false)?;
+ let ours = StdioPipes { stdin: our_stdin, stdout: our_stdout, stderr: our_stderr };
+ let theirs = ChildPipes { stdin: their_stdin, stdout: their_stdout, stderr: their_stderr };
+ Ok((ours, theirs))
+ }
+}
+
+fn os2c(s: &OsStr, saw_nul: &mut bool) -> CString {
+ CString::new(s.as_bytes()).unwrap_or_else(|_e| {
+ *saw_nul = true;
+ CString::new("<string-with-nul>").unwrap()
+ })
+}
+
+// Helper type to manage ownership of the strings within a C-style array.
+pub struct CStringArray {
+ items: Vec<CString>,
+ ptrs: Vec<*const c_char>,
+}
+
+impl CStringArray {
+ pub fn with_capacity(capacity: usize) -> Self {
+ let mut result = CStringArray {
+ items: Vec::with_capacity(capacity),
+ ptrs: Vec::with_capacity(capacity + 1),
+ };
+ result.ptrs.push(ptr::null());
+ result
+ }
+ pub fn push(&mut self, item: CString) {
+ let l = self.ptrs.len();
+ self.ptrs[l - 1] = item.as_ptr();
+ self.ptrs.push(ptr::null());
+ self.items.push(item);
+ }
+ pub fn as_ptr(&self) -> *const *const c_char {
+ self.ptrs.as_ptr()
+ }
+}
+
+fn construct_envp(env: BTreeMap<OsString, OsString>, saw_nul: &mut bool) -> CStringArray {
+ let mut result = CStringArray::with_capacity(env.len());
+ for (mut k, v) in env {
+ // Reserve additional space for '=' and null terminator
+ k.reserve_exact(v.len() + 2);
+ k.push("=");
+ k.push(&v);
+
+ // Add the new entry into the array
+ if let Ok(item) = CString::new(k.into_vec()) {
+ result.push(item);
+ } else {
+ *saw_nul = true;
+ }
+ }
+
+ result
+}
+
+impl Stdio {
+ pub fn to_child_stdio(&self, readable: bool) -> io::Result<(ChildStdio, Option<AnonPipe>)> {
+ match *self {
+ Stdio::Inherit => Ok((ChildStdio::Inherit, None)),
+
+ // Make sure that the source descriptors are not an stdio
+ // descriptor, otherwise the order which we set the child's
+ // descriptors may blow away a descriptor which we are hoping to
+ // save. For example, suppose we want the child's stderr to be the
+ // parent's stdout, and the child's stdout to be the parent's
+ // stderr. No matter which we dup first, the second will get
+ // overwritten prematurely.
+ Stdio::Fd(ref fd) => {
+ if fd.as_raw_fd() >= 0 && fd.as_raw_fd() <= libc::STDERR_FILENO {
+ Ok((ChildStdio::Owned(fd.duplicate()?), None))
+ } else {
+ Ok((ChildStdio::Explicit(fd.as_raw_fd()), None))
+ }
+ }
+
+ Stdio::MakePipe => {
+ let (reader, writer) = pipe::anon_pipe()?;
+ let (ours, theirs) = if readable { (writer, reader) } else { (reader, writer) };
+ Ok((ChildStdio::Owned(theirs.into_inner()), Some(ours)))
+ }
+
+ #[cfg(not(target_os = "fuchsia"))]
+ Stdio::Null => {
+ let mut opts = OpenOptions::new();
+ opts.read(readable);
+ opts.write(!readable);
+ let path = unsafe { CStr::from_ptr(DEV_NULL.as_ptr() as *const _) };
+ let fd = File::open_c(&path, &opts)?;
+ Ok((ChildStdio::Owned(fd.into_inner()), None))
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ Stdio::Null => Ok((ChildStdio::Null, None)),
+ }
+ }
+}
+
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ Stdio::Fd(pipe.into_inner())
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(file: File) -> Stdio {
+ Stdio::Fd(file.into_inner())
+ }
+}
+
+impl ChildStdio {
+ pub fn fd(&self) -> Option<c_int> {
+ match *self {
+ ChildStdio::Inherit => None,
+ ChildStdio::Explicit(fd) => Some(fd),
+ ChildStdio::Owned(ref fd) => Some(fd.as_raw_fd()),
+
+ #[cfg(target_os = "fuchsia")]
+ ChildStdio::Null => None,
+ }
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.program != self.args[0] {
+ write!(f, "[{:?}] ", self.program)?;
+ }
+ write!(f, "{:?}", self.args[0])?;
+
+ for arg in &self.args[1..] {
+ write!(f, " {:?}", arg)?;
+ }
+ Ok(())
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct ExitCode(u8);
+
+impl fmt::Debug for ExitCode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("unix_exit_status").field(&self.0).finish()
+ }
+}
+
+impl ExitCode {
+ pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _);
+ pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _);
+
+ #[inline]
+ pub fn as_i32(&self) -> i32 {
+ self.0 as i32
+ }
+}
+
+impl From<u8> for ExitCode {
+ fn from(code: u8) -> Self {
+ Self(code)
+ }
+}
+
+pub struct CommandArgs<'a> {
+ iter: crate::slice::Iter<'a, CString>,
+}
+
+impl<'a> Iterator for CommandArgs<'a> {
+ type Item = &'a OsStr;
+ fn next(&mut self) -> Option<&'a OsStr> {
+ self.iter.next().map(|cs| OsStr::from_bytes(cs.as_bytes()))
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<'a> ExactSizeIterator for CommandArgs<'a> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+impl<'a> fmt::Debug for CommandArgs<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter.clone()).finish()
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs
new file mode 100644
index 000000000..1956b3692
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_common/tests.rs
@@ -0,0 +1,124 @@
+use super::*;
+
+use crate::ffi::OsStr;
+use crate::mem;
+use crate::ptr;
+use crate::sys::{cvt, cvt_nz};
+
+macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(t) => t,
+ Err(e) => panic!("received error for `{}`: {}", stringify!($e), e),
+ }
+ };
+}
+
+#[test]
+#[cfg_attr(
+ any(
+ // See #14232 for more information, but it appears that signal delivery to a
+ // newly spawned process may just be raced in the macOS, so to prevent this
+ // test from being flaky we ignore it on macOS.
+ target_os = "macos",
+ // When run under our current QEMU emulation test suite this test fails,
+ // although the reason isn't very clear as to why. For now this test is
+ // ignored there.
+ target_arch = "arm",
+ target_arch = "aarch64",
+ target_arch = "riscv64",
+ ),
+ ignore
+)]
+fn test_process_mask() {
+ unsafe {
+ // Test to make sure that a signal mask does not get inherited.
+ let mut cmd = Command::new(OsStr::new("cat"));
+
+ let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ t!(cvt(sigemptyset(set.as_mut_ptr())));
+ t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT)));
+ t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr())));
+
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+
+ let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
+ let stdin_write = pipes.stdin.take().unwrap();
+ let stdout_read = pipes.stdout.take().unwrap();
+
+ t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut())));
+
+ t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT)));
+ // We need to wait until SIGINT is definitely delivered. The
+ // easiest way is to write something to cat, and try to read it
+ // back: if SIGINT is unmasked, it'll get delivered when cat is
+ // next scheduled.
+ let _ = stdin_write.write(b"Hello");
+ drop(stdin_write);
+
+ // Either EOF or failure (EPIPE) is okay.
+ let mut buf = [0; 5];
+ if let Ok(ret) = stdout_read.read(&mut buf) {
+ assert_eq!(ret, 0);
+ }
+
+ t!(cat.wait());
+ }
+}
+
+#[test]
+#[cfg_attr(
+ any(
+ // See test_process_mask
+ target_os = "macos",
+ target_arch = "arm",
+ target_arch = "aarch64",
+ target_arch = "riscv64",
+ ),
+ ignore
+)]
+fn test_process_group_posix_spawn() {
+ unsafe {
+ // Spawn a cat subprocess that's just going to hang since there is no I/O.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ cmd.pgroup(0);
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+ let (mut cat, _pipes) = t!(cmd.spawn(Stdio::Null, true));
+
+ // Check that we can kill its process group, which means there *is* one.
+ t!(cvt(libc::kill(-(cat.id() as libc::pid_t), libc::SIGINT)));
+
+ t!(cat.wait());
+ }
+}
+
+#[test]
+#[cfg_attr(
+ any(
+ // See test_process_mask
+ target_os = "macos",
+ target_arch = "arm",
+ target_arch = "aarch64",
+ target_arch = "riscv64",
+ ),
+ ignore
+)]
+fn test_process_group_no_posix_spawn() {
+ unsafe {
+ // Same as above, create hang-y cat. This time, force using the non-posix_spawnp path.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ cmd.pgroup(0);
+ cmd.pre_exec(Box::new(|| Ok(()))); // pre_exec forces fork + exec
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+ let (mut cat, _pipes) = t!(cmd.spawn(Stdio::Null, true));
+
+ // Check that we can kill its process group, which means there *is* one.
+ t!(cvt(libc::kill(-(cat.id() as libc::pid_t), libc::SIGINT)));
+
+ t!(cat.wait());
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
new file mode 100644
index 000000000..73f5d3a61
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -0,0 +1,327 @@
+use crate::fmt;
+use crate::io;
+use crate::mem;
+use crate::num::{NonZeroI32, NonZeroI64};
+use crate::ptr;
+
+use crate::sys::process::process_common::*;
+use crate::sys::process::zircon::{zx_handle_t, Handle};
+
+use libc::{c_int, size_t};
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+impl Command {
+ pub fn spawn(
+ &mut self,
+ default: Stdio,
+ needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ let envp = self.capture_env();
+
+ if self.saw_nul() {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "nul byte found in provided data",
+ ));
+ }
+
+ let (ours, theirs) = self.setup_io(default, needs_stdin)?;
+
+ let process_handle = unsafe { self.do_exec(theirs, envp.as_ref())? };
+
+ Ok((Process { handle: Handle::new(process_handle) }, ours))
+ }
+
+ pub fn exec(&mut self, default: Stdio) -> io::Error {
+ if self.saw_nul() {
+ return io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "nul byte found in provided data",
+ );
+ }
+
+ match self.setup_io(default, true) {
+ Ok((_, _)) => {
+ // FIXME: This is tough because we don't support the exec syscalls
+ unimplemented!();
+ }
+ Err(e) => e,
+ }
+ }
+
+ unsafe fn do_exec(
+ &mut self,
+ stdio: ChildPipes,
+ maybe_envp: Option<&CStringArray>,
+ ) -> io::Result<zx_handle_t> {
+ use crate::sys::process::zircon::*;
+
+ let envp = match maybe_envp {
+ // None means to clone the current environment, which is done in the
+ // flags below.
+ None => ptr::null(),
+ Some(envp) => envp.as_ptr(),
+ };
+
+ let make_action = |local_io: &ChildStdio, target_fd| -> io::Result<fdio_spawn_action_t> {
+ if let Some(local_fd) = local_io.fd() {
+ Ok(fdio_spawn_action_t {
+ action: FDIO_SPAWN_ACTION_TRANSFER_FD,
+ local_fd,
+ target_fd,
+ ..Default::default()
+ })
+ } else {
+ if let ChildStdio::Null = local_io {
+ // acts as no-op
+ return Ok(Default::default());
+ }
+
+ let mut handle = ZX_HANDLE_INVALID;
+ let status = fdio_fd_clone(target_fd, &mut handle);
+ if status == ERR_INVALID_ARGS || status == ERR_NOT_SUPPORTED {
+ // This descriptor is closed; skip it rather than generating an
+ // error.
+ return Ok(Default::default());
+ }
+ zx_cvt(status)?;
+
+ let mut cloned_fd = 0;
+ zx_cvt(fdio_fd_create(handle, &mut cloned_fd))?;
+
+ Ok(fdio_spawn_action_t {
+ action: FDIO_SPAWN_ACTION_TRANSFER_FD,
+ local_fd: cloned_fd as i32,
+ target_fd,
+ ..Default::default()
+ })
+ }
+ };
+
+ // Clone stdin, stdout, and stderr
+ let action1 = make_action(&stdio.stdin, 0)?;
+ let action2 = make_action(&stdio.stdout, 1)?;
+ let action3 = make_action(&stdio.stderr, 2)?;
+ let actions = [action1, action2, action3];
+
+ // We don't want FileDesc::drop to be called on any stdio. fdio_spawn_etc
+ // always consumes transferred file descriptors.
+ mem::forget(stdio);
+
+ for callback in self.get_closures().iter_mut() {
+ callback()?;
+ }
+
+ let mut process_handle: zx_handle_t = 0;
+ zx_cvt(fdio_spawn_etc(
+ ZX_HANDLE_INVALID,
+ FDIO_SPAWN_CLONE_JOB
+ | FDIO_SPAWN_CLONE_LDSVC
+ | FDIO_SPAWN_CLONE_NAMESPACE
+ | FDIO_SPAWN_CLONE_ENVIRON // this is ignored when envp is non-null
+ | FDIO_SPAWN_CLONE_UTC_CLOCK,
+ self.get_program_cstr().as_ptr(),
+ self.get_argv().as_ptr(),
+ envp,
+ actions.len() as size_t,
+ actions.as_ptr(),
+ &mut process_handle,
+ ptr::null_mut(),
+ ))?;
+ // FIXME: See if we want to do something with that err_msg
+
+ Ok(process_handle)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Processes
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct Process {
+ handle: Handle,
+}
+
+impl Process {
+ pub fn id(&self) -> u32 {
+ self.handle.raw() as u32
+ }
+
+ pub fn kill(&mut self) -> io::Result<()> {
+ use crate::sys::process::zircon::*;
+
+ unsafe {
+ zx_cvt(zx_task_kill(self.handle.raw()))?;
+ }
+
+ Ok(())
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ use crate::default::Default;
+ use crate::sys::process::zircon::*;
+
+ let mut proc_info: zx_info_process_t = Default::default();
+ let mut actual: size_t = 0;
+ let mut avail: size_t = 0;
+
+ unsafe {
+ zx_cvt(zx_object_wait_one(
+ self.handle.raw(),
+ ZX_TASK_TERMINATED,
+ ZX_TIME_INFINITE,
+ ptr::null_mut(),
+ ))?;
+ zx_cvt(zx_object_get_info(
+ self.handle.raw(),
+ ZX_INFO_PROCESS,
+ &mut proc_info as *mut _ as *mut libc::c_void,
+ mem::size_of::<zx_info_process_t>(),
+ &mut actual,
+ &mut avail,
+ ))?;
+ }
+ if actual != 1 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Failed to get exit status of process",
+ ));
+ }
+ Ok(ExitStatus(proc_info.return_code))
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ use crate::default::Default;
+ use crate::sys::process::zircon::*;
+
+ let mut proc_info: zx_info_process_t = Default::default();
+ let mut actual: size_t = 0;
+ let mut avail: size_t = 0;
+
+ unsafe {
+ let status =
+ zx_object_wait_one(self.handle.raw(), ZX_TASK_TERMINATED, 0, ptr::null_mut());
+ match status {
+ 0 => {} // Success
+ x if x == ERR_TIMED_OUT => {
+ return Ok(None);
+ }
+ _ => {
+ panic!("Failed to wait on process handle: {status}");
+ }
+ }
+ zx_cvt(zx_object_get_info(
+ self.handle.raw(),
+ ZX_INFO_PROCESS,
+ &mut proc_info as *mut _ as *mut libc::c_void,
+ mem::size_of::<zx_info_process_t>(),
+ &mut actual,
+ &mut avail,
+ ))?;
+ }
+ if actual != 1 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Failed to get exit status of process",
+ ));
+ }
+ Ok(Some(ExitStatus(proc_info.return_code)))
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatus(i64);
+
+impl ExitStatus {
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ match NonZeroI64::try_from(self.0) {
+ /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
+ /* was zero, couldn't convert */ Err(_) => Ok(()),
+ }
+ }
+
+ pub fn code(&self) -> Option<i32> {
+ // FIXME: support extracting return code as an i64
+ self.0.try_into().ok()
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ None
+ }
+
+ // FIXME: The actually-Unix implementation in process_unix.rs uses WSTOPSIG, WCOREDUMP et al.
+ // I infer from the implementation of `success`, `code` and `signal` above that these are not
+ // available on Fuchsia.
+ //
+ // It does not appear that Fuchsia is Unix-like enough to implement ExitStatus (or indeed many
+ // other things from std::os::unix) properly. This veneer is always going to be a bodge. So
+ // while I don't know if these implementations are actually correct, I think they will do for
+ // now at least.
+ pub fn core_dumped(&self) -> bool {
+ false
+ }
+ pub fn stopped_signal(&self) -> Option<i32> {
+ None
+ }
+ pub fn continued(&self) -> bool {
+ false
+ }
+
+ pub fn into_raw(&self) -> c_int {
+ // We don't know what someone who calls into_raw() will do with this value, but it should
+ // have the conventional Unix representation. Despite the fact that this is not
+ // standardised in SuS or POSIX, all Unix systems encode the signal and exit status the
+ // same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every
+ // Unix.)
+ //
+ // The caller of `std::os::unix::into_raw` is probably wanting a Unix exit status, and may
+ // do their own shifting and masking, or even pass the status to another computer running a
+ // different Unix variant.
+ //
+ // The other view would be to say that the caller on Fuchsia ought to know that `into_raw`
+ // will give a raw Fuchsia status (whatever that is - I don't know, personally). That is
+ // not possible here because we must return a c_int because that's what Unix (including
+ // SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't
+ // necessarily fit.
+ //
+ // It seems to me that that the right answer would be to provide std::os::fuchsia with its
+ // own ExitStatusExt, rather that trying to provide a not very convincing imitation of
+ // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
+ // fixing this up that is beyond the scope of my efforts now.
+ let exit_status_as_if_unix: u8 = self.0.try_into().expect("Fuchsia process return code bigger than 8 bits, but std::os::unix::ExitStatusExt::into_raw() was called to try to convert the value into a traditional Unix-style wait status, which cannot represent values greater than 255.");
+ let wait_status_as_if_unix = (exit_status_as_if_unix as c_int) << 8;
+ wait_status_as_if_unix
+ }
+}
+
+/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying.
+impl From<c_int> for ExitStatus {
+ fn from(a: c_int) -> ExitStatus {
+ ExitStatus(a as i64)
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "exit code: {}", self.0)
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatusError(NonZeroI64);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ // fixme: affected by the same bug as ExitStatus::code()
+ ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
new file mode 100644
index 000000000..75bb92437
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -0,0 +1,836 @@
+use crate::fmt;
+use crate::io::{self, Error, ErrorKind};
+use crate::mem;
+use crate::num::NonZeroI32;
+use crate::ptr;
+use crate::sys;
+use crate::sys::cvt;
+use crate::sys::process::process_common::*;
+use core::ffi::NonZero_c_int;
+
+#[cfg(target_os = "linux")]
+use crate::os::linux::process::PidFd;
+
+#[cfg(target_os = "linux")]
+use crate::sys::weak::raw_syscall;
+
+#[cfg(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ all(target_os = "linux", target_env = "gnu"),
+ all(target_os = "linux", target_env = "musl"),
+))]
+use crate::sys::weak::weak;
+
+#[cfg(target_os = "vxworks")]
+use libc::RTP_ID as pid_t;
+
+#[cfg(not(target_os = "vxworks"))]
+use libc::{c_int, pid_t};
+
+#[cfg(not(any(target_os = "vxworks", target_os = "l4re")))]
+use libc::{gid_t, uid_t};
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+impl Command {
+ pub fn spawn(
+ &mut self,
+ default: Stdio,
+ needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ const CLOEXEC_MSG_FOOTER: [u8; 4] = *b"NOEX";
+
+ let envp = self.capture_env();
+
+ if self.saw_nul() {
+ return Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "nul byte found in provided data",
+ ));
+ }
+
+ let (ours, theirs) = self.setup_io(default, needs_stdin)?;
+
+ if let Some(ret) = self.posix_spawn(&theirs, envp.as_ref())? {
+ return Ok((ret, ours));
+ }
+
+ let (input, output) = sys::pipe::anon_pipe()?;
+
+ // Whatever happens after the fork is almost for sure going to touch or
+ // look at the environment in one way or another (PATH in `execvp` or
+ // accessing the `environ` pointer ourselves). Make sure no other thread
+ // is accessing the environment when we do the fork itself.
+ //
+ // Note that as soon as we're done with the fork there's no need to hold
+ // a lock any more because the parent won't do anything and the child is
+ // in its own process. Thus the parent drops the lock guard while the child
+ // forgets it to avoid unlocking it on a new thread, which would be invalid.
+ let env_lock = sys::os::env_read_lock();
+ let (pid, pidfd) = unsafe { self.do_fork()? };
+
+ if pid == 0 {
+ crate::panic::always_abort();
+ mem::forget(env_lock);
+ drop(input);
+ let Err(err) = unsafe { self.do_exec(theirs, envp.as_ref()) };
+ let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32;
+ let errno = errno.to_be_bytes();
+ let bytes = [
+ errno[0],
+ errno[1],
+ errno[2],
+ errno[3],
+ CLOEXEC_MSG_FOOTER[0],
+ CLOEXEC_MSG_FOOTER[1],
+ CLOEXEC_MSG_FOOTER[2],
+ CLOEXEC_MSG_FOOTER[3],
+ ];
+ // pipe I/O up to PIPE_BUF bytes should be atomic, and then
+ // we want to be sure we *don't* run at_exit destructors as
+ // we're being torn down regardless
+ rtassert!(output.write(&bytes).is_ok());
+ unsafe { libc::_exit(1) }
+ }
+
+ drop(env_lock);
+ drop(output);
+
+ // Safety: We obtained the pidfd from calling `clone3` with
+ // `CLONE_PIDFD` so it's valid an otherwise unowned.
+ let mut p = unsafe { Process::new(pid, pidfd) };
+ let mut bytes = [0; 8];
+
+ // loop to handle EINTR
+ loop {
+ match input.read(&mut bytes) {
+ Ok(0) => return Ok((p, ours)),
+ Ok(8) => {
+ let (errno, footer) = bytes.split_at(4);
+ assert_eq!(
+ CLOEXEC_MSG_FOOTER, footer,
+ "Validation on the CLOEXEC pipe failed: {:?}",
+ bytes
+ );
+ let errno = i32::from_be_bytes(errno.try_into().unwrap());
+ assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
+ return Err(Error::from_raw_os_error(errno));
+ }
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+ Err(e) => {
+ assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
+ panic!("the CLOEXEC pipe failed: {e:?}")
+ }
+ Ok(..) => {
+ // pipe I/O up to PIPE_BUF bytes should be atomic
+ assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
+ panic!("short read on the CLOEXEC pipe")
+ }
+ }
+ }
+ }
+
+ // Attempts to fork the process. If successful, returns Ok((0, -1))
+ // in the child, and Ok((child_pid, -1)) in the parent.
+ #[cfg(not(target_os = "linux"))]
+ unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
+ cvt(libc::fork()).map(|res| (res, -1))
+ }
+
+ // Attempts to fork the process. If successful, returns Ok((0, -1))
+ // in the child, and Ok((child_pid, child_pidfd)) in the parent.
+ #[cfg(target_os = "linux")]
+ unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
+ use crate::sync::atomic::{AtomicBool, Ordering};
+
+ static HAS_CLONE3: AtomicBool = AtomicBool::new(true);
+ const CLONE_PIDFD: u64 = 0x00001000;
+
+ #[repr(C)]
+ struct clone_args {
+ flags: u64,
+ pidfd: u64,
+ child_tid: u64,
+ parent_tid: u64,
+ exit_signal: u64,
+ stack: u64,
+ stack_size: u64,
+ tls: u64,
+ set_tid: u64,
+ set_tid_size: u64,
+ cgroup: u64,
+ }
+
+ raw_syscall! {
+ fn clone3(cl_args: *mut clone_args, len: libc::size_t) -> libc::c_long
+ }
+
+ // Bypassing libc for `clone3` can make further libc calls unsafe,
+ // so we use it sparingly for now. See #89522 for details.
+ // Some tools (e.g. sandboxing tools) may also expect `fork`
+ // rather than `clone3`.
+ let want_clone3_pidfd = self.get_create_pidfd();
+
+ // If we fail to create a pidfd for any reason, this will
+ // stay as -1, which indicates an error.
+ let mut pidfd: pid_t = -1;
+
+ // Attempt to use the `clone3` syscall, which supports more arguments
+ // (in particular, the ability to create a pidfd). If this fails,
+ // we will fall through this block to a call to `fork()`
+ if want_clone3_pidfd && HAS_CLONE3.load(Ordering::Relaxed) {
+ let mut args = clone_args {
+ flags: CLONE_PIDFD,
+ pidfd: &mut pidfd as *mut pid_t as u64,
+ child_tid: 0,
+ parent_tid: 0,
+ exit_signal: libc::SIGCHLD as u64,
+ stack: 0,
+ stack_size: 0,
+ tls: 0,
+ set_tid: 0,
+ set_tid_size: 0,
+ cgroup: 0,
+ };
+
+ let args_ptr = &mut args as *mut clone_args;
+ let args_size = crate::mem::size_of::<clone_args>();
+
+ let res = cvt(clone3(args_ptr, args_size));
+ match res {
+ Ok(n) => return Ok((n as pid_t, pidfd)),
+ Err(e) => match e.raw_os_error() {
+ // Multiple threads can race to execute this store,
+ // but that's fine - that just means that multiple threads
+ // will have tried and failed to execute the same syscall,
+ // with no other side effects.
+ Some(libc::ENOSYS) => HAS_CLONE3.store(false, Ordering::Relaxed),
+ // Fallback to fork if `EPERM` is returned. (e.g. blocked by seccomp)
+ Some(libc::EPERM) => {}
+ _ => return Err(e),
+ },
+ }
+ }
+
+ // Generally, we just call `fork`. If we get here after wanting `clone3`,
+ // then the syscall does not exist or we do not have permission to call it.
+ cvt(libc::fork()).map(|res| (res, pidfd))
+ }
+
+ pub fn exec(&mut self, default: Stdio) -> io::Error {
+ let envp = self.capture_env();
+
+ if self.saw_nul() {
+ return io::const_io_error!(ErrorKind::InvalidInput, "nul byte found in provided data",);
+ }
+
+ match self.setup_io(default, true) {
+ Ok((_, theirs)) => {
+ unsafe {
+ // Similar to when forking, we want to ensure that access to
+ // the environment is synchronized, so make sure to grab the
+ // environment lock before we try to exec.
+ let _lock = sys::os::env_read_lock();
+
+ let Err(e) = self.do_exec(theirs, envp.as_ref());
+ e
+ }
+ }
+ Err(e) => e,
+ }
+ }
+
+ // And at this point we've reached a special time in the life of the
+ // child. The child must now be considered hamstrung and unable to
+ // do anything other than syscalls really. Consider the following
+ // scenario:
+ //
+ // 1. Thread A of process 1 grabs the malloc() mutex
+ // 2. Thread B of process 1 forks(), creating thread C
+ // 3. Thread C of process 2 then attempts to malloc()
+ // 4. The memory of process 2 is the same as the memory of
+ // process 1, so the mutex is locked.
+ //
+ // This situation looks a lot like deadlock, right? It turns out
+ // that this is what pthread_atfork() takes care of, which is
+ // presumably implemented across platforms. The first thing that
+ // threads to *before* forking is to do things like grab the malloc
+ // mutex, and then after the fork they unlock it.
+ //
+ // Despite this information, libnative's spawn has been witnessed to
+ // deadlock on both macOS and FreeBSD. I'm not entirely sure why, but
+ // all collected backtraces point at malloc/free traffic in the
+ // child spawned process.
+ //
+ // For this reason, the block of code below should contain 0
+ // invocations of either malloc of free (or their related friends).
+ //
+ // As an example of not having malloc/free traffic, we don't close
+ // this file descriptor by dropping the FileDesc (which contains an
+ // allocation). Instead we just close it manually. This will never
+ // have the drop glue anyway because this code never returns (the
+ // child will either exec() or invoke libc::exit)
+ unsafe fn do_exec(
+ &mut self,
+ stdio: ChildPipes,
+ maybe_envp: Option<&CStringArray>,
+ ) -> Result<!, io::Error> {
+ use crate::sys::{self, cvt_r};
+
+ if let Some(fd) = stdio.stdin.fd() {
+ cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO))?;
+ }
+ if let Some(fd) = stdio.stdout.fd() {
+ cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO))?;
+ }
+ if let Some(fd) = stdio.stderr.fd() {
+ cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO))?;
+ }
+
+ #[cfg(not(target_os = "l4re"))]
+ {
+ if let Some(_g) = self.get_groups() {
+ //FIXME: Redox kernel does not support setgroups yet
+ #[cfg(not(target_os = "redox"))]
+ cvt(libc::setgroups(_g.len().try_into().unwrap(), _g.as_ptr()))?;
+ }
+ if let Some(u) = self.get_gid() {
+ cvt(libc::setgid(u as gid_t))?;
+ }
+ if let Some(u) = self.get_uid() {
+ // When dropping privileges from root, the `setgroups` call
+ // will remove any extraneous groups. We only drop groups
+ // if the current uid is 0 and we weren't given an explicit
+ // set of groups. If we don't call this, then even though our
+ // uid has dropped, we may still have groups that enable us to
+ // do super-user things.
+ //FIXME: Redox kernel does not support setgroups yet
+ #[cfg(not(target_os = "redox"))]
+ if libc::getuid() == 0 && self.get_groups().is_none() {
+ cvt(libc::setgroups(0, ptr::null()))?;
+ }
+ cvt(libc::setuid(u as uid_t))?;
+ }
+ }
+ if let Some(ref cwd) = *self.get_cwd() {
+ cvt(libc::chdir(cwd.as_ptr()))?;
+ }
+
+ if let Some(pgroup) = self.get_pgroup() {
+ cvt(libc::setpgid(0, pgroup))?;
+ }
+
+ // emscripten has no signal support.
+ #[cfg(not(target_os = "emscripten"))]
+ {
+ use crate::mem::MaybeUninit;
+ use crate::sys::cvt_nz;
+ // Reset signal handling so the child process starts in a
+ // standardized state. libstd ignores SIGPIPE, and signal-handling
+ // libraries often set a mask. Child processes inherit ignored
+ // signals and the signal mask from their parent, but most
+ // UNIX programs do not reset these things on their own, so we
+ // need to clean things up now to avoid confusing the program
+ // we're about to run.
+ let mut set = MaybeUninit::<libc::sigset_t>::uninit();
+ cvt(sigemptyset(set.as_mut_ptr()))?;
+ cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?;
+
+ #[cfg(target_os = "android")] // see issue #88585
+ {
+ let mut action: libc::sigaction = mem::zeroed();
+ action.sa_sigaction = libc::SIG_DFL;
+ cvt(libc::sigaction(libc::SIGPIPE, &action, ptr::null_mut()))?;
+ }
+ #[cfg(not(target_os = "android"))]
+ {
+ let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL);
+ if ret == libc::SIG_ERR {
+ return Err(io::Error::last_os_error());
+ }
+ }
+ }
+
+ for callback in self.get_closures().iter_mut() {
+ callback()?;
+ }
+
+ // Although we're performing an exec here we may also return with an
+ // error from this function (without actually exec'ing) in which case we
+ // want to be sure to restore the global environment back to what it
+ // once was, ensuring that our temporary override, when free'd, doesn't
+ // corrupt our process's environment.
+ let mut _reset = None;
+ if let Some(envp) = maybe_envp {
+ struct Reset(*const *const libc::c_char);
+
+ impl Drop for Reset {
+ fn drop(&mut self) {
+ unsafe {
+ *sys::os::environ() = self.0;
+ }
+ }
+ }
+
+ _reset = Some(Reset(*sys::os::environ()));
+ *sys::os::environ() = envp.as_ptr();
+ }
+
+ libc::execvp(self.get_program_cstr().as_ptr(), self.get_argv().as_ptr());
+ Err(io::Error::last_os_error())
+ }
+
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ all(target_os = "linux", target_env = "gnu"),
+ all(target_os = "linux", target_env = "musl"),
+ )))]
+ fn posix_spawn(
+ &mut self,
+ _: &ChildPipes,
+ _: Option<&CStringArray>,
+ ) -> io::Result<Option<Process>> {
+ Ok(None)
+ }
+
+ // Only support platforms for which posix_spawn() can return ENOENT
+ // directly.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ all(target_os = "linux", target_env = "gnu"),
+ all(target_os = "linux", target_env = "musl"),
+ ))]
+ fn posix_spawn(
+ &mut self,
+ stdio: &ChildPipes,
+ envp: Option<&CStringArray>,
+ ) -> io::Result<Option<Process>> {
+ use crate::mem::MaybeUninit;
+ use crate::sys::{self, cvt_nz};
+
+ if self.get_gid().is_some()
+ || self.get_uid().is_some()
+ || (self.env_saw_path() && !self.program_is_path())
+ || !self.get_closures().is_empty()
+ || self.get_groups().is_some()
+ || self.get_create_pidfd()
+ {
+ return Ok(None);
+ }
+
+ // Only glibc 2.24+ posix_spawn() supports returning ENOENT directly.
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ {
+ if let Some(version) = sys::os::glibc_version() {
+ if version < (2, 24) {
+ return Ok(None);
+ }
+ } else {
+ return Ok(None);
+ }
+ }
+
+ // Solaris, glibc 2.29+, and musl 1.24+ can set a new working directory,
+ // and maybe others will gain this non-POSIX function too. We'll check
+ // for this weak symbol as soon as it's needed, so we can return early
+ // otherwise to do a manual chdir before exec.
+ weak! {
+ fn posix_spawn_file_actions_addchdir_np(
+ *mut libc::posix_spawn_file_actions_t,
+ *const libc::c_char
+ ) -> libc::c_int
+ }
+ let addchdir = match self.get_cwd() {
+ Some(cwd) => {
+ if cfg!(target_os = "macos") {
+ // There is a bug in macOS where a relative executable
+ // path like "../myprogram" will cause `posix_spawn` to
+ // successfully launch the program, but erroneously return
+ // ENOENT when used with posix_spawn_file_actions_addchdir_np
+ // which was introduced in macOS 10.15.
+ return Ok(None);
+ }
+ match posix_spawn_file_actions_addchdir_np.get() {
+ Some(f) => Some((f, cwd)),
+ None => return Ok(None),
+ }
+ }
+ None => None,
+ };
+
+ let pgroup = self.get_pgroup();
+
+ // Safety: -1 indicates we don't have a pidfd.
+ let mut p = unsafe { Process::new(0, -1) };
+
+ struct PosixSpawnFileActions<'a>(&'a mut MaybeUninit<libc::posix_spawn_file_actions_t>);
+
+ impl Drop for PosixSpawnFileActions<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ libc::posix_spawn_file_actions_destroy(self.0.as_mut_ptr());
+ }
+ }
+ }
+
+ struct PosixSpawnattr<'a>(&'a mut MaybeUninit<libc::posix_spawnattr_t>);
+
+ impl Drop for PosixSpawnattr<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ libc::posix_spawnattr_destroy(self.0.as_mut_ptr());
+ }
+ }
+ }
+
+ unsafe {
+ let mut attrs = MaybeUninit::uninit();
+ cvt_nz(libc::posix_spawnattr_init(attrs.as_mut_ptr()))?;
+ let attrs = PosixSpawnattr(&mut attrs);
+
+ let mut flags = 0;
+
+ let mut file_actions = MaybeUninit::uninit();
+ cvt_nz(libc::posix_spawn_file_actions_init(file_actions.as_mut_ptr()))?;
+ let file_actions = PosixSpawnFileActions(&mut file_actions);
+
+ if let Some(fd) = stdio.stdin.fd() {
+ cvt_nz(libc::posix_spawn_file_actions_adddup2(
+ file_actions.0.as_mut_ptr(),
+ fd,
+ libc::STDIN_FILENO,
+ ))?;
+ }
+ if let Some(fd) = stdio.stdout.fd() {
+ cvt_nz(libc::posix_spawn_file_actions_adddup2(
+ file_actions.0.as_mut_ptr(),
+ fd,
+ libc::STDOUT_FILENO,
+ ))?;
+ }
+ if let Some(fd) = stdio.stderr.fd() {
+ cvt_nz(libc::posix_spawn_file_actions_adddup2(
+ file_actions.0.as_mut_ptr(),
+ fd,
+ libc::STDERR_FILENO,
+ ))?;
+ }
+ if let Some((f, cwd)) = addchdir {
+ cvt_nz(f(file_actions.0.as_mut_ptr(), cwd.as_ptr()))?;
+ }
+
+ if let Some(pgroup) = pgroup {
+ flags |= libc::POSIX_SPAWN_SETPGROUP;
+ cvt_nz(libc::posix_spawnattr_setpgroup(attrs.0.as_mut_ptr(), pgroup))?;
+ }
+
+ let mut set = MaybeUninit::<libc::sigset_t>::uninit();
+ cvt(sigemptyset(set.as_mut_ptr()))?;
+ cvt_nz(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?;
+ cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?;
+ cvt_nz(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?;
+
+ flags |= libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK;
+ cvt_nz(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?;
+
+ // Make sure we synchronize access to the global `environ` resource
+ let _env_lock = sys::os::env_read_lock();
+ let envp = envp.map(|c| c.as_ptr()).unwrap_or_else(|| *sys::os::environ() as *const _);
+ cvt_nz(libc::posix_spawnp(
+ &mut p.pid,
+ self.get_program_cstr().as_ptr(),
+ file_actions.0.as_ptr(),
+ attrs.0.as_ptr(),
+ self.get_argv().as_ptr() as *const _,
+ envp as *const _,
+ ))?;
+ Ok(Some(p))
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Processes
+////////////////////////////////////////////////////////////////////////////////
+
+/// The unique ID of the process (this should never be negative).
+pub struct Process {
+ pid: pid_t,
+ status: Option<ExitStatus>,
+ // On Linux, stores the pidfd created for this child.
+ // This is None if the user did not request pidfd creation,
+ // or if the pidfd could not be created for some reason
+ // (e.g. the `clone3` syscall was not available).
+ #[cfg(target_os = "linux")]
+ pidfd: Option<PidFd>,
+}
+
+impl Process {
+ #[cfg(target_os = "linux")]
+ unsafe fn new(pid: pid_t, pidfd: pid_t) -> Self {
+ use crate::os::unix::io::FromRawFd;
+ use crate::sys_common::FromInner;
+ // Safety: If `pidfd` is nonnegative, we assume it's valid and otherwise unowned.
+ let pidfd = (pidfd >= 0).then(|| PidFd::from_inner(sys::fd::FileDesc::from_raw_fd(pidfd)));
+ Process { pid, status: None, pidfd }
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ unsafe fn new(pid: pid_t, _pidfd: pid_t) -> Self {
+ Process { pid, status: None }
+ }
+
+ pub fn id(&self) -> u32 {
+ self.pid as u32
+ }
+
+ pub fn kill(&mut self) -> io::Result<()> {
+ // If we've already waited on this process then the pid can be recycled
+ // and used for another process, and we probably shouldn't be killing
+ // random processes, so just return an error.
+ if self.status.is_some() {
+ Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "invalid argument: can't kill an exited process",
+ ))
+ } else {
+ cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
+ }
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ use crate::sys::cvt_r;
+ if let Some(status) = self.status {
+ return Ok(status);
+ }
+ let mut status = 0 as c_int;
+ cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?;
+ self.status = Some(ExitStatus::new(status));
+ Ok(ExitStatus::new(status))
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ if let Some(status) = self.status {
+ return Ok(Some(status));
+ }
+ let mut status = 0 as c_int;
+ let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?;
+ if pid == 0 {
+ Ok(None)
+ } else {
+ self.status = Some(ExitStatus::new(status));
+ Ok(Some(ExitStatus::new(status)))
+ }
+ }
+}
+
+/// Unix exit statuses
+//
+// This is not actually an "exit status" in Unix terminology. Rather, it is a "wait status".
+// See the discussion in comments and doc comments for `std::process::ExitStatus`.
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct ExitStatus(c_int);
+
+impl fmt::Debug for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("unix_wait_status").field(&self.0).finish()
+ }
+}
+
+impl ExitStatus {
+ pub fn new(status: c_int) -> ExitStatus {
+ ExitStatus(status)
+ }
+
+ fn exited(&self) -> bool {
+ libc::WIFEXITED(self.0)
+ }
+
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
+ // true on all actual versions of Unix, is widely assumed, and is specified in SuS
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
+ // true for a platform pretending to be Unix, the tests (our doctests, and also
+ // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
+ match NonZero_c_int::try_from(self.0) {
+ /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
+ /* was zero, couldn't convert */ Err(_) => Ok(()),
+ }
+ }
+
+ pub fn code(&self) -> Option<i32> {
+ self.exited().then(|| libc::WEXITSTATUS(self.0))
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ libc::WIFSIGNALED(self.0).then(|| libc::WTERMSIG(self.0))
+ }
+
+ pub fn core_dumped(&self) -> bool {
+ libc::WIFSIGNALED(self.0) && libc::WCOREDUMP(self.0)
+ }
+
+ pub fn stopped_signal(&self) -> Option<i32> {
+ libc::WIFSTOPPED(self.0).then(|| libc::WSTOPSIG(self.0))
+ }
+
+ pub fn continued(&self) -> bool {
+ libc::WIFCONTINUED(self.0)
+ }
+
+ pub fn into_raw(&self) -> c_int {
+ self.0
+ }
+}
+
+/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying.
+impl From<c_int> for ExitStatus {
+ fn from(a: c_int) -> ExitStatus {
+ ExitStatus(a)
+ }
+}
+
+/// Convert a signal number to a readable, searchable name.
+///
+/// This string should be displayed right after the signal number.
+/// If a signal is unrecognized, it returns the empty string, so that
+/// you just get the number like "0". If it is recognized, you'll get
+/// something like "9 (SIGKILL)".
+fn signal_string(signal: i32) -> &'static str {
+ match signal {
+ libc::SIGHUP => " (SIGHUP)",
+ libc::SIGINT => " (SIGINT)",
+ libc::SIGQUIT => " (SIGQUIT)",
+ libc::SIGILL => " (SIGILL)",
+ libc::SIGTRAP => " (SIGTRAP)",
+ libc::SIGABRT => " (SIGABRT)",
+ libc::SIGBUS => " (SIGBUS)",
+ libc::SIGFPE => " (SIGFPE)",
+ libc::SIGKILL => " (SIGKILL)",
+ libc::SIGUSR1 => " (SIGUSR1)",
+ libc::SIGSEGV => " (SIGSEGV)",
+ libc::SIGUSR2 => " (SIGUSR2)",
+ libc::SIGPIPE => " (SIGPIPE)",
+ libc::SIGALRM => " (SIGALRM)",
+ libc::SIGTERM => " (SIGTERM)",
+ libc::SIGCHLD => " (SIGCHLD)",
+ libc::SIGCONT => " (SIGCONT)",
+ libc::SIGSTOP => " (SIGSTOP)",
+ libc::SIGTSTP => " (SIGTSTP)",
+ libc::SIGTTIN => " (SIGTTIN)",
+ libc::SIGTTOU => " (SIGTTOU)",
+ libc::SIGURG => " (SIGURG)",
+ libc::SIGXCPU => " (SIGXCPU)",
+ libc::SIGXFSZ => " (SIGXFSZ)",
+ libc::SIGVTALRM => " (SIGVTALRM)",
+ libc::SIGPROF => " (SIGPROF)",
+ libc::SIGWINCH => " (SIGWINCH)",
+ #[cfg(not(target_os = "haiku"))]
+ libc::SIGIO => " (SIGIO)",
+ libc::SIGSYS => " (SIGSYS)",
+ // For information on Linux signals, run `man 7 signal`
+ #[cfg(all(
+ target_os = "linux",
+ any(
+ target_arch = "x86_64",
+ target_arch = "x86",
+ target_arch = "arm",
+ target_arch = "aarch64"
+ )
+ ))]
+ libc::SIGSTKFLT => " (SIGSTKFLT)",
+ #[cfg(target_os = "linux")]
+ libc::SIGPWR => " (SIGPWR)",
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "tvos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "dragonfly"
+ ))]
+ libc::SIGEMT => " (SIGEMT)",
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "tvos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "dragonfly"
+ ))]
+ libc::SIGINFO => " (SIGINFO)",
+ _ => "",
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let Some(code) = self.code() {
+ write!(f, "exit status: {code}")
+ } else if let Some(signal) = self.signal() {
+ let signal_string = signal_string(signal);
+ if self.core_dumped() {
+ write!(f, "signal: {signal}{signal_string} (core dumped)")
+ } else {
+ write!(f, "signal: {signal}{signal_string}")
+ }
+ } else if let Some(signal) = self.stopped_signal() {
+ let signal_string = signal_string(signal);
+ write!(f, "stopped (not terminated) by signal: {signal}{signal_string}")
+ } else if self.continued() {
+ write!(f, "continued (WIFCONTINUED)")
+ } else {
+ write!(f, "unrecognised wait status: {} {:#x}", self.0, self.0)
+ }
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct ExitStatusError(NonZero_c_int);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+impl fmt::Debug for ExitStatusError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("unix_wait_status").field(&self.0).finish()
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())
+ }
+}
+
+#[cfg(target_os = "linux")]
+#[unstable(feature = "linux_pidfd", issue = "82971")]
+impl crate::os::linux::process::ChildExt for crate::process::Child {
+ fn pidfd(&self) -> io::Result<&PidFd> {
+ self.handle
+ .pidfd
+ .as_ref()
+ .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ }
+
+ fn take_pidfd(&mut self) -> io::Result<PidFd> {
+ self.handle
+ .pidfd
+ .take()
+ .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ }
+}
+
+#[cfg(test)]
+#[path = "process_unix/tests.rs"]
+mod tests;
diff --git a/library/std/src/sys/unix/process/process_unix/tests.rs b/library/std/src/sys/unix/process/process_unix/tests.rs
new file mode 100644
index 000000000..e0e2d478f
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_unix/tests.rs
@@ -0,0 +1,62 @@
+use crate::os::unix::process::{CommandExt, ExitStatusExt};
+use crate::panic::catch_unwind;
+use crate::process::Command;
+
+// Many of the other aspects of this situation, including heap alloc concurrency
+// safety etc., are tested in src/test/ui/process/process-panic-after-fork.rs
+
+#[test]
+fn exitstatus_display_tests() {
+ // In practice this is the same on every Unix.
+ // If some weird platform turns out to be different, and this test fails, use #[cfg].
+ use crate::os::unix::process::ExitStatusExt;
+ use crate::process::ExitStatus;
+
+ let t = |v, s| assert_eq!(s, format!("{}", <ExitStatus as ExitStatusExt>::from_raw(v)));
+
+ t(0x0000f, "signal: 15 (SIGTERM)");
+ t(0x0008b, "signal: 11 (SIGSEGV) (core dumped)");
+ t(0x00000, "exit status: 0");
+ t(0x0ff00, "exit status: 255");
+
+ // On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar.
+ // https://github.com/rust-lang/rust/pull/82749#issuecomment-790525956
+ // The purpose of this test is to test our string formatting, not our understanding of the wait
+ // status magic numbers. So restrict these to Linux.
+ if cfg!(target_os = "linux") {
+ t(0x0137f, "stopped (not terminated) by signal: 19 (SIGSTOP)");
+ t(0x0ffff, "continued (WIFCONTINUED)");
+ }
+
+ // Testing "unrecognised wait status" is hard because the wait.h macros typically
+ // assume that the value came from wait and isn't mad. With the glibc I have here
+ // this works:
+ if cfg!(all(target_os = "linux", target_env = "gnu")) {
+ t(0x000ff, "unrecognised wait status: 255 0xff");
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_command_fork_no_unwind() {
+ let got = catch_unwind(|| {
+ let mut c = Command::new("echo");
+ c.arg("hi");
+ unsafe {
+ c.pre_exec(|| panic!("{}", "crash now!"));
+ }
+ let st = c.status().expect("failed to get command status");
+ dbg!(st);
+ st
+ });
+ dbg!(&got);
+ let status = got.expect("panic unexpectedly propagated");
+ dbg!(status);
+ let signal = status.signal().expect("expected child process to die of signal");
+ assert!(
+ signal == libc::SIGABRT
+ || signal == libc::SIGILL
+ || signal == libc::SIGTRAP
+ || signal == libc::SIGSEGV
+ );
+}
diff --git a/library/std/src/sys/unix/process/process_unsupported.rs b/library/std/src/sys/unix/process/process_unsupported.rs
new file mode 100644
index 000000000..72f9f3f9c
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_unsupported.rs
@@ -0,0 +1,118 @@
+use crate::fmt;
+use crate::io;
+use crate::num::NonZeroI32;
+use crate::sys::process::process_common::*;
+use crate::sys::unix::unsupported::*;
+use core::ffi::NonZero_c_int;
+
+use libc::{c_int, pid_t};
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+impl Command {
+ pub fn spawn(
+ &mut self,
+ _default: Stdio,
+ _needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ unsupported()
+ }
+
+ pub fn exec(&mut self, _default: Stdio) -> io::Error {
+ unsupported_err()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Processes
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct Process {
+ _handle: pid_t,
+}
+
+impl Process {
+ pub fn id(&self) -> u32 {
+ 0
+ }
+
+ pub fn kill(&mut self) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ unsupported()
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ unsupported()
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatus(c_int);
+
+impl ExitStatus {
+ #[cfg_attr(target_os = "horizon", allow(unused))]
+ pub fn success(&self) -> bool {
+ self.code() == Some(0)
+ }
+
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ Err(ExitStatusError(1.try_into().unwrap()))
+ }
+
+ pub fn code(&self) -> Option<i32> {
+ None
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ None
+ }
+
+ pub fn core_dumped(&self) -> bool {
+ false
+ }
+
+ pub fn stopped_signal(&self) -> Option<i32> {
+ None
+ }
+
+ pub fn continued(&self) -> bool {
+ false
+ }
+
+ pub fn into_raw(&self) -> c_int {
+ 0
+ }
+}
+
+/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying.
+impl From<c_int> for ExitStatus {
+ fn from(a: c_int) -> ExitStatus {
+ ExitStatus(a as i32)
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "exit code: {}", self.0)
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatusError(NonZero_c_int);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_vxworks.rs b/library/std/src/sys/unix/process/process_vxworks.rs
new file mode 100644
index 000000000..200ef6719
--- /dev/null
+++ b/library/std/src/sys/unix/process/process_vxworks.rs
@@ -0,0 +1,262 @@
+use crate::fmt;
+use crate::io::{self, Error, ErrorKind};
+use crate::num::NonZeroI32;
+use crate::sys;
+use crate::sys::cvt;
+use crate::sys::process::process_common::*;
+use crate::sys_common::thread;
+use core::ffi::NonZero_c_int;
+use libc::RTP_ID;
+use libc::{self, c_char, c_int};
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+impl Command {
+ pub fn spawn(
+ &mut self,
+ default: Stdio,
+ needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ use crate::sys::cvt_r;
+ let envp = self.capture_env();
+
+ if self.saw_nul() {
+ return Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "nul byte found in provided data",
+ ));
+ }
+ let (ours, theirs) = self.setup_io(default, needs_stdin)?;
+ let mut p = Process { pid: 0, status: None };
+
+ unsafe {
+ macro_rules! t {
+ ($e:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => return Err(e.into()),
+ }
+ };
+ }
+
+ let mut orig_stdin = libc::STDIN_FILENO;
+ let mut orig_stdout = libc::STDOUT_FILENO;
+ let mut orig_stderr = libc::STDERR_FILENO;
+
+ if let Some(fd) = theirs.stdin.fd() {
+ orig_stdin = t!(cvt_r(|| libc::dup(libc::STDIN_FILENO)));
+ t!(cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO)));
+ }
+ if let Some(fd) = theirs.stdout.fd() {
+ orig_stdout = t!(cvt_r(|| libc::dup(libc::STDOUT_FILENO)));
+ t!(cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO)));
+ }
+ if let Some(fd) = theirs.stderr.fd() {
+ orig_stderr = t!(cvt_r(|| libc::dup(libc::STDERR_FILENO)));
+ t!(cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO)));
+ }
+
+ if let Some(ref cwd) = *self.get_cwd() {
+ t!(cvt(libc::chdir(cwd.as_ptr())));
+ }
+
+ // pre_exec closures are ignored on VxWorks
+ let _ = self.get_closures();
+
+ let c_envp = envp
+ .as_ref()
+ .map(|c| c.as_ptr())
+ .unwrap_or_else(|| *sys::os::environ() as *const _);
+ let stack_size = thread::min_stack();
+
+ // ensure that access to the environment is synchronized
+ let _lock = sys::os::env_read_lock();
+
+ let ret = libc::rtpSpawn(
+ self.get_program_cstr().as_ptr(),
+ self.get_argv().as_ptr() as *mut *const c_char, // argv
+ c_envp as *mut *const c_char,
+ 100 as c_int, // initial priority
+ stack_size, // initial stack size.
+ 0, // options
+ 0, // task options
+ );
+
+ // Because FileDesc was not used, each duplicated file descriptor
+ // needs to be closed manually
+ if orig_stdin != libc::STDIN_FILENO {
+ t!(cvt_r(|| libc::dup2(orig_stdin, libc::STDIN_FILENO)));
+ libc::close(orig_stdin);
+ }
+ if orig_stdout != libc::STDOUT_FILENO {
+ t!(cvt_r(|| libc::dup2(orig_stdout, libc::STDOUT_FILENO)));
+ libc::close(orig_stdout);
+ }
+ if orig_stderr != libc::STDERR_FILENO {
+ t!(cvt_r(|| libc::dup2(orig_stderr, libc::STDERR_FILENO)));
+ libc::close(orig_stderr);
+ }
+
+ if ret != libc::RTP_ID_ERROR {
+ p.pid = ret;
+ Ok((p, ours))
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+
+ pub fn exec(&mut self, default: Stdio) -> io::Error {
+ let ret = Command::spawn(self, default, false);
+ match ret {
+ Ok(t) => unsafe {
+ let mut status = 0 as c_int;
+ libc::waitpid(t.0.pid, &mut status, 0);
+ libc::exit(0);
+ },
+ Err(e) => e,
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Processes
+////////////////////////////////////////////////////////////////////////////////
+
+/// The unique id of the process (this should never be negative).
+pub struct Process {
+ pid: RTP_ID,
+ status: Option<ExitStatus>,
+}
+
+impl Process {
+ pub fn id(&self) -> u32 {
+ self.pid as u32
+ }
+
+ pub fn kill(&mut self) -> io::Result<()> {
+ // If we've already waited on this process then the pid can be recycled
+ // and used for another process, and we probably shouldn't be killing
+ // random processes, so just return an error.
+ if self.status.is_some() {
+ Err(io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "invalid argument: can't kill an exited process",
+ ))
+ } else {
+ cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
+ }
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ use crate::sys::cvt_r;
+ if let Some(status) = self.status {
+ return Ok(status);
+ }
+ let mut status = 0 as c_int;
+ cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?;
+ self.status = Some(ExitStatus::new(status));
+ Ok(ExitStatus::new(status))
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ if let Some(status) = self.status {
+ return Ok(Some(status));
+ }
+ let mut status = 0 as c_int;
+ let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?;
+ if pid == 0 {
+ Ok(None)
+ } else {
+ self.status = Some(ExitStatus::new(status));
+ Ok(Some(ExitStatus::new(status)))
+ }
+ }
+}
+
+/// Unix exit statuses
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatus(c_int);
+
+impl ExitStatus {
+ pub fn new(status: c_int) -> ExitStatus {
+ ExitStatus(status)
+ }
+
+ fn exited(&self) -> bool {
+ libc::WIFEXITED(self.0)
+ }
+
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
+ // true on all actual versions of Unix, is widely assumed, and is specified in SuS
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
+ // true for a platform pretending to be Unix, the tests (our doctests, and also
+ // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
+ match NonZero_c_int::try_from(self.0) {
+ Ok(failure) => Err(ExitStatusError(failure)),
+ Err(_) => Ok(()),
+ }
+ }
+
+ pub fn code(&self) -> Option<i32> {
+ if self.exited() { Some(libc::WEXITSTATUS(self.0)) } else { None }
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ if !self.exited() { Some(libc::WTERMSIG(self.0)) } else { None }
+ }
+
+ pub fn core_dumped(&self) -> bool {
+ // This method is not yet properly implemented on VxWorks
+ false
+ }
+
+ pub fn stopped_signal(&self) -> Option<i32> {
+ if libc::WIFSTOPPED(self.0) { Some(libc::WSTOPSIG(self.0)) } else { None }
+ }
+
+ pub fn continued(&self) -> bool {
+ // This method is not yet properly implemented on VxWorks
+ false
+ }
+
+ pub fn into_raw(&self) -> c_int {
+ self.0
+ }
+}
+
+/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying.
+impl From<c_int> for ExitStatus {
+ fn from(a: c_int) -> ExitStatus {
+ ExitStatus(a)
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let Some(code) = self.code() {
+ write!(f, "exit code: {code}")
+ } else {
+ let signal = self.signal().unwrap();
+ write!(f, "signal: {signal}")
+ }
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatusError(NonZero_c_int);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ ExitStatus(self.0.into()).code().map(|st| st.try_into().unwrap())
+ }
+}
diff --git a/library/std/src/sys/unix/process/zircon.rs b/library/std/src/sys/unix/process/zircon.rs
new file mode 100644
index 000000000..2e596486f
--- /dev/null
+++ b/library/std/src/sys/unix/process/zircon.rs
@@ -0,0 +1,309 @@
+#![allow(non_camel_case_types, unused)]
+
+use crate::io;
+use crate::mem::MaybeUninit;
+use crate::os::raw::c_char;
+
+use libc::{c_int, c_void, size_t};
+
+pub type zx_handle_t = u32;
+pub type zx_vaddr_t = usize;
+pub type zx_rights_t = u32;
+pub type zx_status_t = i32;
+
+pub const ZX_HANDLE_INVALID: zx_handle_t = 0;
+
+pub type zx_time_t = i64;
+pub const ZX_TIME_INFINITE: zx_time_t = i64::MAX;
+
+pub type zx_signals_t = u32;
+
+pub const ZX_OBJECT_SIGNAL_3: zx_signals_t = 1 << 3;
+
+pub const ZX_TASK_TERMINATED: zx_signals_t = ZX_OBJECT_SIGNAL_3;
+
+pub const ZX_RIGHT_SAME_RIGHTS: zx_rights_t = 1 << 31;
+
+// The upper four bits gives the minor version.
+pub type zx_object_info_topic_t = u32;
+
+pub const ZX_INFO_PROCESS: zx_object_info_topic_t = 3 | (1 << 28);
+
+pub type zx_info_process_flags_t = u32;
+
+pub fn zx_cvt<T>(t: T) -> io::Result<T>
+where
+ T: TryInto<zx_status_t> + Copy,
+{
+ if let Ok(status) = TryInto::try_into(t) {
+ if status < 0 { Err(io::Error::from_raw_os_error(status)) } else { Ok(t) }
+ } else {
+ Err(io::Error::last_os_error())
+ }
+}
+
+// Safe wrapper around zx_handle_t
+pub struct Handle {
+ raw: zx_handle_t,
+}
+
+impl Handle {
+ pub fn new(raw: zx_handle_t) -> Handle {
+ Handle { raw }
+ }
+
+ pub fn raw(&self) -> zx_handle_t {
+ self.raw
+ }
+}
+
+impl Drop for Handle {
+ fn drop(&mut self) {
+ unsafe {
+ zx_cvt(zx_handle_close(self.raw)).expect("Failed to close zx_handle_t");
+ }
+ }
+}
+
+// Returned for topic ZX_INFO_PROCESS
+#[derive(Default)]
+#[repr(C)]
+pub struct zx_info_process_t {
+ pub return_code: i64,
+ pub start_time: zx_time_t,
+ pub flags: zx_info_process_flags_t,
+ pub reserved1: u32,
+}
+
+extern "C" {
+ pub fn zx_job_default() -> zx_handle_t;
+
+ pub fn zx_task_kill(handle: zx_handle_t) -> zx_status_t;
+
+ pub fn zx_handle_close(handle: zx_handle_t) -> zx_status_t;
+
+ pub fn zx_handle_duplicate(
+ handle: zx_handle_t,
+ rights: zx_rights_t,
+ out: *const zx_handle_t,
+ ) -> zx_handle_t;
+
+ pub fn zx_object_wait_one(
+ handle: zx_handle_t,
+ signals: zx_signals_t,
+ timeout: zx_time_t,
+ pending: *mut zx_signals_t,
+ ) -> zx_status_t;
+
+ pub fn zx_object_get_info(
+ handle: zx_handle_t,
+ topic: u32,
+ buffer: *mut c_void,
+ buffer_size: size_t,
+ actual_size: *mut size_t,
+ avail: *mut size_t,
+ ) -> zx_status_t;
+}
+
+#[derive(Default)]
+#[repr(C)]
+pub struct fdio_spawn_action_t {
+ pub action: u32,
+ pub reserved0: u32,
+ pub local_fd: i32,
+ pub target_fd: i32,
+ pub reserved1: u64,
+}
+
+extern "C" {
+ pub fn fdio_spawn_etc(
+ job: zx_handle_t,
+ flags: u32,
+ path: *const c_char,
+ argv: *const *const c_char,
+ envp: *const *const c_char,
+ action_count: size_t,
+ actions: *const fdio_spawn_action_t,
+ process: *mut zx_handle_t,
+ err_msg: *mut c_char,
+ ) -> zx_status_t;
+
+ pub fn fdio_fd_clone(fd: c_int, out_handle: *mut zx_handle_t) -> zx_status_t;
+ pub fn fdio_fd_create(handle: zx_handle_t, fd: *mut c_int) -> zx_status_t;
+}
+
+// fdio_spawn_etc flags
+
+pub const FDIO_SPAWN_CLONE_JOB: u32 = 0x0001;
+pub const FDIO_SPAWN_CLONE_LDSVC: u32 = 0x0002;
+pub const FDIO_SPAWN_CLONE_NAMESPACE: u32 = 0x0004;
+pub const FDIO_SPAWN_CLONE_STDIO: u32 = 0x0008;
+pub const FDIO_SPAWN_CLONE_ENVIRON: u32 = 0x0010;
+pub const FDIO_SPAWN_CLONE_UTC_CLOCK: u32 = 0x0020;
+pub const FDIO_SPAWN_CLONE_ALL: u32 = 0xFFFF;
+
+// fdio_spawn_etc actions
+
+pub const FDIO_SPAWN_ACTION_CLONE_FD: u32 = 0x0001;
+pub const FDIO_SPAWN_ACTION_TRANSFER_FD: u32 = 0x0002;
+
+// Errors
+
+#[allow(unused)]
+pub const ERR_INTERNAL: zx_status_t = -1;
+
+// ERR_NOT_SUPPORTED: The operation is not implemented, supported,
+// or enabled.
+#[allow(unused)]
+pub const ERR_NOT_SUPPORTED: zx_status_t = -2;
+
+// ERR_NO_RESOURCES: The system was not able to allocate some resource
+// needed for the operation.
+#[allow(unused)]
+pub const ERR_NO_RESOURCES: zx_status_t = -3;
+
+// ERR_NO_MEMORY: The system was not able to allocate memory needed
+// for the operation.
+#[allow(unused)]
+pub const ERR_NO_MEMORY: zx_status_t = -4;
+
+// ERR_CALL_FAILED: The second phase of zx_channel_call(; did not complete
+// successfully.
+#[allow(unused)]
+pub const ERR_CALL_FAILED: zx_status_t = -5;
+
+// ERR_INTERRUPTED_RETRY: The system call was interrupted, but should be
+// retried. This should not be seen outside of the VDSO.
+#[allow(unused)]
+pub const ERR_INTERRUPTED_RETRY: zx_status_t = -6;
+
+// ======= Parameter errors =======
+// ERR_INVALID_ARGS: an argument is invalid, ex. null pointer
+#[allow(unused)]
+pub const ERR_INVALID_ARGS: zx_status_t = -10;
+
+// ERR_BAD_HANDLE: A specified handle value does not refer to a handle.
+#[allow(unused)]
+pub const ERR_BAD_HANDLE: zx_status_t = -11;
+
+// ERR_WRONG_TYPE: The subject of the operation is the wrong type to
+// perform the operation.
+// Example: Attempting a message_read on a thread handle.
+#[allow(unused)]
+pub const ERR_WRONG_TYPE: zx_status_t = -12;
+
+// ERR_BAD_SYSCALL: The specified syscall number is invalid.
+#[allow(unused)]
+pub const ERR_BAD_SYSCALL: zx_status_t = -13;
+
+// ERR_OUT_OF_RANGE: An argument is outside the valid range for this
+// operation.
+#[allow(unused)]
+pub const ERR_OUT_OF_RANGE: zx_status_t = -14;
+
+// ERR_BUFFER_TOO_SMALL: A caller provided buffer is too small for
+// this operation.
+#[allow(unused)]
+pub const ERR_BUFFER_TOO_SMALL: zx_status_t = -15;
+
+// ======= Precondition or state errors =======
+// ERR_BAD_STATE: operation failed because the current state of the
+// object does not allow it, or a precondition of the operation is
+// not satisfied
+#[allow(unused)]
+pub const ERR_BAD_STATE: zx_status_t = -20;
+
+// ERR_TIMED_OUT: The time limit for the operation elapsed before
+// the operation completed.
+#[allow(unused)]
+pub const ERR_TIMED_OUT: zx_status_t = -21;
+
+// ERR_SHOULD_WAIT: The operation cannot be performed currently but
+// potentially could succeed if the caller waits for a prerequisite
+// to be satisfied, for example waiting for a handle to be readable
+// or writable.
+// Example: Attempting to read from a message pipe that has no
+// messages waiting but has an open remote will return ERR_SHOULD_WAIT.
+// Attempting to read from a message pipe that has no messages waiting
+// and has a closed remote end will return ERR_REMOTE_CLOSED.
+#[allow(unused)]
+pub const ERR_SHOULD_WAIT: zx_status_t = -22;
+
+// ERR_CANCELED: The in-progress operation (e.g., a wait) has been
+// // canceled.
+#[allow(unused)]
+pub const ERR_CANCELED: zx_status_t = -23;
+
+// ERR_PEER_CLOSED: The operation failed because the remote end
+// of the subject of the operation was closed.
+#[allow(unused)]
+pub const ERR_PEER_CLOSED: zx_status_t = -24;
+
+// ERR_NOT_FOUND: The requested entity is not found.
+#[allow(unused)]
+pub const ERR_NOT_FOUND: zx_status_t = -25;
+
+// ERR_ALREADY_EXISTS: An object with the specified identifier
+// already exists.
+// Example: Attempting to create a file when a file already exists
+// with that name.
+#[allow(unused)]
+pub const ERR_ALREADY_EXISTS: zx_status_t = -26;
+
+// ERR_ALREADY_BOUND: The operation failed because the named entity
+// is already owned or controlled by another entity. The operation
+// could succeed later if the current owner releases the entity.
+#[allow(unused)]
+pub const ERR_ALREADY_BOUND: zx_status_t = -27;
+
+// ERR_UNAVAILABLE: The subject of the operation is currently unable
+// to perform the operation.
+// Note: This is used when there's no direct way for the caller to
+// observe when the subject will be able to perform the operation
+// and should thus retry.
+#[allow(unused)]
+pub const ERR_UNAVAILABLE: zx_status_t = -28;
+
+// ======= Permission check errors =======
+// ERR_ACCESS_DENIED: The caller did not have permission to perform
+// the specified operation.
+#[allow(unused)]
+pub const ERR_ACCESS_DENIED: zx_status_t = -30;
+
+// ======= Input-output errors =======
+// ERR_IO: Otherwise unspecified error occurred during I/O.
+#[allow(unused)]
+pub const ERR_IO: zx_status_t = -40;
+
+// ERR_REFUSED: The entity the I/O operation is being performed on
+// rejected the operation.
+// Example: an I2C device NAK'ing a transaction or a disk controller
+// rejecting an invalid command.
+#[allow(unused)]
+pub const ERR_IO_REFUSED: zx_status_t = -41;
+
+// ERR_IO_DATA_INTEGRITY: The data in the operation failed an integrity
+// check and is possibly corrupted.
+// Example: CRC or Parity error.
+#[allow(unused)]
+pub const ERR_IO_DATA_INTEGRITY: zx_status_t = -42;
+
+// ERR_IO_DATA_LOSS: The data in the operation is currently unavailable
+// and may be permanently lost.
+// Example: A disk block is irrecoverably damaged.
+#[allow(unused)]
+pub const ERR_IO_DATA_LOSS: zx_status_t = -43;
+
+// Filesystem specific errors
+#[allow(unused)]
+pub const ERR_BAD_PATH: zx_status_t = -50;
+#[allow(unused)]
+pub const ERR_NOT_DIR: zx_status_t = -51;
+#[allow(unused)]
+pub const ERR_NOT_FILE: zx_status_t = -52;
+// ERR_FILE_BIG: A file exceeds a filesystem-specific size limit.
+#[allow(unused)]
+pub const ERR_FILE_BIG: zx_status_t = -53;
+// ERR_NO_SPACE: Filesystem or device space is exhausted.
+#[allow(unused)]
+pub const ERR_NO_SPACE: zx_status_t = -54;
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
new file mode 100644
index 000000000..bf4920488
--- /dev/null
+++ b/library/std/src/sys/unix/rand.rs
@@ -0,0 +1,301 @@
+use crate::mem;
+use crate::slice;
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ let mut v = (0, 0);
+ unsafe {
+ let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v));
+ imp::fill_bytes(view);
+ }
+ v
+}
+
+#[cfg(all(
+ unix,
+ not(target_os = "macos"),
+ not(target_os = "ios"),
+ not(target_os = "watchos"),
+ not(target_os = "openbsd"),
+ not(target_os = "freebsd"),
+ not(target_os = "netbsd"),
+ not(target_os = "fuchsia"),
+ not(target_os = "redox"),
+ not(target_os = "vxworks")
+))]
+mod imp {
+ use crate::fs::File;
+ use crate::io::Read;
+
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ use crate::sys::weak::syscall;
+
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ fn getrandom(buf: &mut [u8]) -> libc::ssize_t {
+ use crate::sync::atomic::{AtomicBool, Ordering};
+ use crate::sys::os::errno;
+
+ // A weak symbol allows interposition, e.g. for perf measurements that want to
+ // disable randomness for consistency. Otherwise, we'll try a raw syscall.
+ // (`getrandom` was added in glibc 2.25, musl 1.1.20, android API level 28)
+ syscall! {
+ fn getrandom(
+ buffer: *mut libc::c_void,
+ length: libc::size_t,
+ flags: libc::c_uint
+ ) -> libc::ssize_t
+ }
+
+ // This provides the best quality random numbers available at the given moment
+ // without ever blocking, and is preferable to falling back to /dev/urandom.
+ static GRND_INSECURE_AVAILABLE: AtomicBool = AtomicBool::new(true);
+ if GRND_INSECURE_AVAILABLE.load(Ordering::Relaxed) {
+ let ret = unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_INSECURE) };
+ if ret == -1 && errno() as libc::c_int == libc::EINVAL {
+ GRND_INSECURE_AVAILABLE.store(false, Ordering::Relaxed);
+ } else {
+ return ret;
+ }
+ }
+
+ unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), libc::GRND_NONBLOCK) }
+ }
+
+ #[cfg(any(target_os = "espidf", target_os = "horizon"))]
+ fn getrandom(buf: &mut [u8]) -> libc::ssize_t {
+ unsafe { libc::getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) }
+ }
+
+ #[cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "espidf",
+ target_os = "horizon"
+ )))]
+ fn getrandom_fill_bytes(_buf: &mut [u8]) -> bool {
+ false
+ }
+
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ target_os = "espidf",
+ target_os = "horizon"
+ ))]
+ fn getrandom_fill_bytes(v: &mut [u8]) -> bool {
+ use crate::sync::atomic::{AtomicBool, Ordering};
+ use crate::sys::os::errno;
+
+ static GETRANDOM_UNAVAILABLE: AtomicBool = AtomicBool::new(false);
+ if GETRANDOM_UNAVAILABLE.load(Ordering::Relaxed) {
+ return false;
+ }
+
+ let mut read = 0;
+ while read < v.len() {
+ let result = getrandom(&mut v[read..]);
+ if result == -1 {
+ let err = errno() as libc::c_int;
+ if err == libc::EINTR {
+ continue;
+ } else if err == libc::ENOSYS || err == libc::EPERM {
+ // Fall back to reading /dev/urandom if `getrandom` is not
+ // supported on the current kernel.
+ //
+ // Also fall back in case it is disabled by something like
+ // seccomp or inside of virtual machines.
+ GETRANDOM_UNAVAILABLE.store(true, Ordering::Relaxed);
+ return false;
+ } else if err == libc::EAGAIN {
+ return false;
+ } else {
+ panic!("unexpected getrandom error: {err}");
+ }
+ } else {
+ read += result as usize;
+ }
+ }
+ true
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ // getrandom_fill_bytes here can fail if getrandom() returns EAGAIN,
+ // meaning it would have blocked because the non-blocking pool (urandom)
+ // has not initialized in the kernel yet due to a lack of entropy. The
+ // fallback we do here is to avoid blocking applications which could
+ // depend on this call without ever knowing they do and don't have a
+ // work around. The PRNG of /dev/urandom will still be used but over a
+ // possibly predictable entropy pool.
+ if getrandom_fill_bytes(v) {
+ return;
+ }
+
+ // getrandom failed because it is permanently or temporarily (because
+ // of missing entropy) unavailable. Open /dev/urandom, read from it,
+ // and close it again.
+ let mut file = File::open("/dev/urandom").expect("failed to open /dev/urandom");
+ file.read_exact(v).expect("failed to read /dev/urandom")
+ }
+}
+
+#[cfg(target_os = "macos")]
+mod imp {
+ use crate::fs::File;
+ use crate::io::Read;
+ use crate::sys::os::errno;
+ use crate::sys::weak::weak;
+ use libc::{c_int, c_void, size_t};
+
+ fn getentropy_fill_bytes(v: &mut [u8]) -> bool {
+ weak!(fn getentropy(*mut c_void, size_t) -> c_int);
+
+ getentropy
+ .get()
+ .map(|f| {
+ // getentropy(2) permits a maximum buffer size of 256 bytes
+ for s in v.chunks_mut(256) {
+ let ret = unsafe { f(s.as_mut_ptr() as *mut c_void, s.len()) };
+ if ret == -1 {
+ panic!("unexpected getentropy error: {}", errno());
+ }
+ }
+ true
+ })
+ .unwrap_or(false)
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ if getentropy_fill_bytes(v) {
+ return;
+ }
+
+ // for older macos which doesn't support getentropy
+ let mut file = File::open("/dev/urandom").expect("failed to open /dev/urandom");
+ file.read_exact(v).expect("failed to read /dev/urandom")
+ }
+}
+
+#[cfg(target_os = "openbsd")]
+mod imp {
+ use crate::sys::os::errno;
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ // getentropy(2) permits a maximum buffer size of 256 bytes
+ for s in v.chunks_mut(256) {
+ let ret = unsafe { libc::getentropy(s.as_mut_ptr() as *mut libc::c_void, s.len()) };
+ if ret == -1 {
+ panic!("unexpected getentropy error: {}", errno());
+ }
+ }
+ }
+}
+
+// On iOS and MacOS `SecRandomCopyBytes` calls `CCRandomCopyBytes` with
+// `kCCRandomDefault`. `CCRandomCopyBytes` manages a CSPRNG which is seeded
+// from `/dev/random` and which runs on its own thread accessed via GCD.
+// This seems needlessly heavyweight for the purposes of generating two u64s
+// once per thread in `hashmap_random_keys`. Therefore `SecRandomCopyBytes` is
+// only used on iOS where direct access to `/dev/urandom` is blocked by the
+// sandbox.
+#[cfg(any(target_os = "ios", target_os = "watchos"))]
+mod imp {
+ use crate::io;
+ use crate::ptr;
+ use libc::{c_int, size_t};
+
+ enum SecRandom {}
+
+ #[allow(non_upper_case_globals)]
+ const kSecRandomDefault: *const SecRandom = ptr::null();
+
+ extern "C" {
+ fn SecRandomCopyBytes(rnd: *const SecRandom, count: size_t, bytes: *mut u8) -> c_int;
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ let ret = unsafe { SecRandomCopyBytes(kSecRandomDefault, v.len(), v.as_mut_ptr()) };
+ if ret == -1 {
+ panic!("couldn't generate random bytes: {}", io::Error::last_os_error());
+ }
+ }
+}
+
+#[cfg(any(target_os = "freebsd", target_os = "netbsd"))]
+mod imp {
+ use crate::ptr;
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ let mib = [libc::CTL_KERN, libc::KERN_ARND];
+ // kern.arandom permits a maximum buffer size of 256 bytes
+ for s in v.chunks_mut(256) {
+ let mut s_len = s.len();
+ let ret = unsafe {
+ libc::sysctl(
+ mib.as_ptr(),
+ mib.len() as libc::c_uint,
+ s.as_mut_ptr() as *mut _,
+ &mut s_len,
+ ptr::null(),
+ 0,
+ )
+ };
+ if ret == -1 || s_len != s.len() {
+ panic!(
+ "kern.arandom sysctl failed! (returned {}, s.len() {}, oldlenp {})",
+ ret,
+ s.len(),
+ s_len
+ );
+ }
+ }
+ }
+}
+
+#[cfg(target_os = "fuchsia")]
+mod imp {
+ #[link(name = "zircon")]
+ extern "C" {
+ fn zx_cprng_draw(buffer: *mut u8, len: usize);
+ }
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ unsafe { zx_cprng_draw(v.as_mut_ptr(), v.len()) }
+ }
+}
+
+#[cfg(target_os = "redox")]
+mod imp {
+ use crate::fs::File;
+ use crate::io::Read;
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ // Open rand:, read from it, and close it again.
+ let mut file = File::open("rand:").expect("failed to open rand:");
+ file.read_exact(v).expect("failed to read rand:")
+ }
+}
+
+#[cfg(target_os = "vxworks")]
+mod imp {
+ use crate::io;
+ use core::sync::atomic::{AtomicBool, Ordering::Relaxed};
+
+ pub fn fill_bytes(v: &mut [u8]) {
+ static RNG_INIT: AtomicBool = AtomicBool::new(false);
+ while !RNG_INIT.load(Relaxed) {
+ let ret = unsafe { libc::randSecure() };
+ if ret < 0 {
+ panic!("couldn't generate random bytes: {}", io::Error::last_os_error());
+ } else if ret > 0 {
+ RNG_INIT.store(true, Relaxed);
+ break;
+ }
+ unsafe { libc::usleep(10) };
+ }
+ let ret = unsafe {
+ libc::randABytes(v.as_mut_ptr() as *mut libc::c_uchar, v.len() as libc::c_int)
+ };
+ if ret < 0 {
+ panic!("couldn't generate random bytes: {}", io::Error::last_os_error());
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs
new file mode 100644
index 000000000..75a5c0f92
--- /dev/null
+++ b/library/std/src/sys/unix/stack_overflow.rs
@@ -0,0 +1,208 @@
+#![cfg_attr(test, allow(dead_code))]
+
+use self::imp::{drop_handler, make_handler};
+
+pub use self::imp::cleanup;
+pub use self::imp::init;
+
+pub struct Handler {
+ data: *mut libc::c_void,
+}
+
+impl Handler {
+ pub unsafe fn new() -> Handler {
+ make_handler()
+ }
+
+ fn null() -> Handler {
+ Handler { data: crate::ptr::null_mut() }
+ }
+}
+
+impl Drop for Handler {
+ fn drop(&mut self) {
+ unsafe {
+ drop_handler(self.data);
+ }
+ }
+}
+
+#[cfg(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+mod imp {
+ use super::Handler;
+ use crate::io;
+ use crate::mem;
+ use crate::ptr;
+ use crate::thread;
+
+ use libc::MAP_FAILED;
+ use libc::{mmap, munmap};
+ use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIG_DFL};
+ use libc::{sigaltstack, SIGSTKSZ, SS_DISABLE};
+ use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SIGSEGV};
+
+ use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+ use crate::sys::unix::os::page_size;
+ use crate::sys_common::thread_info;
+
+ // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
+ // (unmapped pages) at the end of every thread's stack, so if a thread ends
+ // up running into the guard page it'll trigger this handler. We want to
+ // detect these cases and print out a helpful error saying that the stack
+ // has overflowed. All other signals, however, should go back to what they
+ // were originally supposed to do.
+ //
+ // This handler currently exists purely to print an informative message
+ // whenever a thread overflows its stack. We then abort to exit and
+ // indicate a crash, but to avoid a misleading SIGSEGV that might lead
+ // users to believe that unsafe code has accessed an invalid pointer; the
+ // SIGSEGV encountered when overflowing the stack is expected and
+ // well-defined.
+ //
+ // If this is not a stack overflow, the handler un-registers itself and
+ // then returns (to allow the original signal to be delivered again).
+ // Returning from this kind of signal handler is technically not defined
+ // to work when reading the POSIX spec strictly, but in practice it turns
+ // out many large systems and all implementations allow returning from a
+ // signal handler to work. For a more detailed explanation see the
+ // comments on #26458.
+ unsafe extern "C" fn signal_handler(
+ signum: libc::c_int,
+ info: *mut libc::siginfo_t,
+ _data: *mut libc::c_void,
+ ) {
+ let guard = thread_info::stack_guard().unwrap_or(0..0);
+ let addr = (*info).si_addr() as usize;
+
+ // If the faulting address is within the guard page, then we print a
+ // message saying so and abort.
+ if guard.start <= addr && addr < guard.end {
+ rtprintpanic!(
+ "\nthread '{}' has overflowed its stack\n",
+ thread::current().name().unwrap_or("<unknown>")
+ );
+ rtabort!("stack overflow");
+ } else {
+ // Unregister ourselves by reverting back to the default behavior.
+ let mut action: sigaction = mem::zeroed();
+ action.sa_sigaction = SIG_DFL;
+ sigaction(signum, &action, ptr::null_mut());
+
+ // See comment above for why this function returns.
+ }
+ }
+
+ static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut());
+ static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false);
+
+ pub unsafe fn init() {
+ let mut action: sigaction = mem::zeroed();
+ for &signal in &[SIGSEGV, SIGBUS] {
+ sigaction(signal, ptr::null_mut(), &mut action);
+ // Configure our signal handler if one is not already set.
+ if action.sa_sigaction == SIG_DFL {
+ action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ action.sa_sigaction = signal_handler as sighandler_t;
+ sigaction(signal, &action, ptr::null_mut());
+ NEED_ALTSTACK.store(true, Ordering::Relaxed);
+ }
+ }
+
+ let handler = make_handler();
+ MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
+ mem::forget(handler);
+ }
+
+ pub unsafe fn cleanup() {
+ drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed));
+ }
+
+ unsafe fn get_stackp() -> *mut libc::c_void {
+ // OpenBSD requires this flag for stack mapping
+ // otherwise the said mapping will fail as a no-op on most systems
+ // and has a different meaning on FreeBSD
+ #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",))]
+ let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
+ #[cfg(not(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",)))]
+ let flags = MAP_PRIVATE | MAP_ANON;
+ let stackp =
+ mmap(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0);
+ if stackp == MAP_FAILED {
+ panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
+ }
+ let guard_result = libc::mprotect(stackp, page_size(), PROT_NONE);
+ if guard_result != 0 {
+ panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
+ }
+ stackp.add(page_size())
+ }
+
+ unsafe fn get_stack() -> libc::stack_t {
+ libc::stack_t { ss_sp: get_stackp(), ss_flags: 0, ss_size: SIGSTKSZ }
+ }
+
+ pub unsafe fn make_handler() -> Handler {
+ if !NEED_ALTSTACK.load(Ordering::Relaxed) {
+ return Handler::null();
+ }
+ let mut stack = mem::zeroed();
+ sigaltstack(ptr::null(), &mut stack);
+ // Configure alternate signal stack, if one is not already set.
+ if stack.ss_flags & SS_DISABLE != 0 {
+ stack = get_stack();
+ sigaltstack(&stack, ptr::null_mut());
+ Handler { data: stack.ss_sp as *mut libc::c_void }
+ } else {
+ Handler::null()
+ }
+ }
+
+ pub unsafe fn drop_handler(data: *mut libc::c_void) {
+ if !data.is_null() {
+ let stack = libc::stack_t {
+ ss_sp: ptr::null_mut(),
+ ss_flags: SS_DISABLE,
+ // Workaround for bug in macOS implementation of sigaltstack
+ // UNIX2003 which returns ENOMEM when disabling a stack while
+ // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
+ // both ss_sp and ss_size should be ignored in this case.
+ ss_size: SIGSTKSZ,
+ };
+ sigaltstack(&stack, ptr::null_mut());
+ // We know from `get_stackp` that the alternate stack we installed is part of a mapping
+ // that started one page earlier, so walk back a page and unmap from there.
+ munmap(data.sub(page_size()), SIGSTKSZ + page_size());
+ }
+ }
+}
+
+#[cfg(not(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+)))]
+mod imp {
+ pub unsafe fn init() {}
+
+ pub unsafe fn cleanup() {}
+
+ pub unsafe fn make_handler() -> super::Handler {
+ super::Handler::null()
+ }
+
+ pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
+}
diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs
new file mode 100644
index 000000000..329f9433d
--- /dev/null
+++ b/library/std/src/sys/unix/stdio.rs
@@ -0,0 +1,141 @@
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem::ManuallyDrop;
+use crate::os::unix::io::{AsFd, BorrowedFd, FromRawFd};
+use crate::sys::fd::FileDesc;
+
+pub struct Stdin(());
+pub struct Stdout(());
+pub struct Stderr(());
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin(())
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDIN_FILENO)).read(buf) }
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDIN_FILENO)).read_vectored(bufs) }
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout(())
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDOUT_FILENO)).write(buf) }
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ unsafe {
+ ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDOUT_FILENO)).write_vectored(bufs)
+ }
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr(())
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDERR_FILENO)).write(buf) }
+ }
+
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ unsafe {
+ ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDERR_FILENO)).write_vectored(bufs)
+ }
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(libc::EBADF as i32)
+}
+
+pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stdin {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StdinLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stdout {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StdoutLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl AsFd for io::Stderr {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
+ }
+}
+
+#[stable(feature = "io_safety", since = "1.63.0")]
+impl<'a> AsFd for io::StderrLock<'a> {
+ #[inline]
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
+ }
+}
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
new file mode 100644
index 000000000..36a3fa602
--- /dev/null
+++ b/library/std/src/sys/unix/thread.rs
@@ -0,0 +1,889 @@
+use crate::cmp;
+use crate::ffi::CStr;
+use crate::io;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr;
+use crate::sys::{os, stack_overflow};
+use crate::time::Duration;
+
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+use crate::sys::weak::dlsym;
+#[cfg(any(target_os = "solaris", target_os = "illumos"))]
+use crate::sys::weak::weak;
+#[cfg(not(any(target_os = "l4re", target_os = "vxworks", target_os = "espidf")))]
+pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
+#[cfg(target_os = "l4re")]
+pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
+#[cfg(target_os = "vxworks")]
+pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
+#[cfg(target_os = "espidf")]
+pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF menuconfig system should be used
+
+#[cfg(target_os = "fuchsia")]
+mod zircon {
+ type zx_handle_t = u32;
+ type zx_status_t = i32;
+ pub const ZX_PROP_NAME: u32 = 3;
+
+ extern "C" {
+ pub fn zx_object_set_property(
+ handle: zx_handle_t,
+ property: u32,
+ value: *const libc::c_void,
+ value_size: libc::size_t,
+ ) -> zx_status_t;
+ pub fn zx_thread_self() -> zx_handle_t;
+ }
+}
+
+pub struct Thread {
+ id: libc::pthread_t,
+}
+
+// Some platforms may have pthread_t as a pointer in which case we still want
+// a thread to be Send/Sync
+unsafe impl Send for Thread {}
+unsafe impl Sync for Thread {}
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(box p);
+ let mut native: libc::pthread_t = mem::zeroed();
+ let mut attr: libc::pthread_attr_t = mem::zeroed();
+ assert_eq!(libc::pthread_attr_init(&mut attr), 0);
+
+ #[cfg(target_os = "espidf")]
+ if stack > 0 {
+ // Only set the stack if a non-zero value is passed
+ // 0 is used as an indication that the default stack size configured in the ESP-IDF menuconfig system should be used
+ assert_eq!(
+ libc::pthread_attr_setstacksize(&mut attr, cmp::max(stack, min_stack_size(&attr))),
+ 0
+ );
+ }
+
+ #[cfg(not(target_os = "espidf"))]
+ {
+ let stack_size = cmp::max(stack, min_stack_size(&attr));
+
+ match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
+ 0 => {}
+ n => {
+ assert_eq!(n, libc::EINVAL);
+ // EINVAL means |stack_size| is either too small or not a
+ // multiple of the system page size. Because it's definitely
+ // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+ // Round up to the nearest page and try again.
+ let page_size = os::page_size();
+ let stack_size =
+ (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
+ assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
+ }
+ };
+ }
+
+ let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
+ // Note: if the thread creation fails and this assert fails, then p will
+ // be leaked. However, an alternative design could cause double-free
+ // which is clearly worse.
+ assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
+
+ return if ret != 0 {
+ // The thread failed to start and as a result p was not consumed. Therefore, it is
+ // safe to reconstruct the box so that it gets deallocated.
+ drop(Box::from_raw(p));
+ Err(io::Error::from_raw_os_error(ret))
+ } else {
+ Ok(Thread { id: native })
+ };
+
+ extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
+ unsafe {
+ // Next, set up our stack overflow handler which may get triggered if we run
+ // out of stack.
+ let _handler = stack_overflow::Handler::new();
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+ ptr::null_mut()
+ }
+ }
+
+ pub fn yield_now() {
+ let ret = unsafe { libc::sched_yield() };
+ debug_assert_eq!(ret, 0);
+ }
+
+ #[cfg(any(target_os = "linux", target_os = "android"))]
+ pub fn set_name(name: &CStr) {
+ const PR_SET_NAME: libc::c_int = 15;
+ // pthread wrapper only appeared in glibc 2.12, so we use syscall
+ // directly.
+ unsafe {
+ libc::prctl(
+ PR_SET_NAME,
+ name.as_ptr(),
+ 0 as libc::c_ulong,
+ 0 as libc::c_ulong,
+ 0 as libc::c_ulong,
+ );
+ }
+ }
+
+ #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
+ pub fn set_name(name: &CStr) {
+ unsafe {
+ libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
+ }
+ }
+
+ #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+ pub fn set_name(name: &CStr) {
+ unsafe {
+ libc::pthread_setname_np(name.as_ptr());
+ }
+ }
+
+ #[cfg(target_os = "netbsd")]
+ pub fn set_name(name: &CStr) {
+ use crate::ffi::CString;
+ let cname = CString::new(&b"%s"[..]).unwrap();
+ unsafe {
+ libc::pthread_setname_np(
+ libc::pthread_self(),
+ cname.as_ptr(),
+ name.as_ptr() as *mut libc::c_void,
+ );
+ }
+ }
+
+ #[cfg(any(target_os = "solaris", target_os = "illumos"))]
+ pub fn set_name(name: &CStr) {
+ weak! {
+ fn pthread_setname_np(
+ libc::pthread_t, *const libc::c_char
+ ) -> libc::c_int
+ }
+
+ if let Some(f) = pthread_setname_np.get() {
+ unsafe {
+ f(libc::pthread_self(), name.as_ptr());
+ }
+ }
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ pub fn set_name(name: &CStr) {
+ use self::zircon::*;
+ unsafe {
+ zx_object_set_property(
+ zx_thread_self(),
+ ZX_PROP_NAME,
+ name.as_ptr() as *const libc::c_void,
+ name.to_bytes().len(),
+ );
+ }
+ }
+
+ #[cfg(target_os = "haiku")]
+ pub fn set_name(name: &CStr) {
+ unsafe {
+ let thread_self = libc::find_thread(ptr::null_mut());
+ libc::rename_thread(thread_self, name.as_ptr());
+ }
+ }
+
+ #[cfg(any(
+ target_env = "newlib",
+ target_os = "l4re",
+ target_os = "emscripten",
+ target_os = "redox",
+ target_os = "vxworks"
+ ))]
+ pub fn set_name(_name: &CStr) {
+ // Newlib, Emscripten, and VxWorks have no way to set a thread name.
+ }
+
+ #[cfg(not(target_os = "espidf"))]
+ pub fn sleep(dur: Duration) {
+ let mut secs = dur.as_secs();
+ let mut nsecs = dur.subsec_nanos() as _;
+
+ // If we're awoken with a signal then the return value will be -1 and
+ // nanosleep will fill in `ts` with the remaining time.
+ unsafe {
+ while secs > 0 || nsecs > 0 {
+ let mut ts = libc::timespec {
+ tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
+ tv_nsec: nsecs,
+ };
+ secs -= ts.tv_sec as u64;
+ let ts_ptr = &mut ts as *mut _;
+ if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
+ assert_eq!(os::errno(), libc::EINTR);
+ secs += ts.tv_sec as u64;
+ nsecs = ts.tv_nsec;
+ } else {
+ nsecs = 0;
+ }
+ }
+ }
+ }
+
+ #[cfg(target_os = "espidf")]
+ pub fn sleep(dur: Duration) {
+ let mut micros = dur.as_micros();
+ unsafe {
+ while micros > 0 {
+ let st = if micros > u32::MAX as u128 { u32::MAX } else { micros as u32 };
+ libc::usleep(st);
+
+ micros -= st as u128;
+ }
+ }
+ }
+
+ pub fn join(self) {
+ unsafe {
+ let ret = libc::pthread_join(self.id, ptr::null_mut());
+ mem::forget(self);
+ assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
+ }
+ }
+
+ pub fn id(&self) -> libc::pthread_t {
+ self.id
+ }
+
+ pub fn into_id(self) -> libc::pthread_t {
+ let id = self.id;
+ mem::forget(self);
+ id
+ }
+}
+
+impl Drop for Thread {
+ fn drop(&mut self) {
+ let ret = unsafe { libc::pthread_detach(self.id) };
+ debug_assert_eq!(ret, 0);
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "android",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "ios",
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "solaris",
+ target_os = "illumos",
+ ))] {
+ #[cfg(any(target_os = "android", target_os = "linux"))]
+ {
+ let quota = cgroups::quota().max(1);
+ let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
+ unsafe {
+ if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
+ let count = libc::CPU_COUNT(&set) as usize;
+ let count = count.min(quota);
+ // SAFETY: affinity mask can't be empty and the quota gets clamped to a minimum of 1
+ return Ok(NonZeroUsize::new_unchecked(count));
+ }
+ }
+ }
+ match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
+ -1 => Err(io::Error::last_os_error()),
+ 0 => Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")),
+ cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }),
+ }
+ } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] {
+ use crate::ptr;
+
+ let mut cpus: libc::c_uint = 0;
+ let mut cpus_size = crate::mem::size_of_val(&cpus);
+
+ unsafe {
+ cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
+ }
+
+ // Fallback approach in case of errors or no hardware threads.
+ if cpus < 1 {
+ let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
+ let res = unsafe {
+ libc::sysctl(
+ mib.as_mut_ptr(),
+ 2,
+ &mut cpus as *mut _ as *mut _,
+ &mut cpus_size as *mut _ as *mut _,
+ ptr::null_mut(),
+ 0,
+ )
+ };
+
+ // Handle errors if any.
+ if res == -1 {
+ return Err(io::Error::last_os_error());
+ } else if cpus == 0 {
+ return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
+ }
+ }
+ Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
+ } else if #[cfg(target_os = "openbsd")] {
+ use crate::ptr;
+
+ let mut cpus: libc::c_uint = 0;
+ let mut cpus_size = crate::mem::size_of_val(&cpus);
+ let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
+
+ let res = unsafe {
+ libc::sysctl(
+ mib.as_mut_ptr(),
+ 2,
+ &mut cpus as *mut _ as *mut _,
+ &mut cpus_size as *mut _ as *mut _,
+ ptr::null_mut(),
+ 0,
+ )
+ };
+
+ // Handle errors if any.
+ if res == -1 {
+ return Err(io::Error::last_os_error());
+ } else if cpus == 0 {
+ return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
+ }
+
+ Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
+ } else if #[cfg(target_os = "haiku")] {
+ // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
+ // `get_system_info` calls then `smp_get_num_cpus`
+ unsafe {
+ let mut sinfo: libc::system_info = crate::mem::zeroed();
+ let res = libc::get_system_info(&mut sinfo);
+
+ if res != libc::B_OK {
+ return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
+ }
+
+ Ok(NonZeroUsize::new_unchecked(sinfo.cpu_count as usize))
+ }
+ } else {
+ // FIXME: implement on vxWorks, Redox, l4re
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "Getting the number of hardware threads is not supported on the target platform"))
+ }
+ }
+}
+
+#[cfg(any(target_os = "android", target_os = "linux"))]
+mod cgroups {
+ //! Currently not covered
+ //! * cgroup v2 in non-standard mountpoints
+ //! * paths containing control characters or spaces, since those would be escaped in procfs
+ //! output and we don't unescape
+ use crate::borrow::Cow;
+ use crate::ffi::OsString;
+ use crate::fs::{try_exists, File};
+ use crate::io::Read;
+ use crate::io::{BufRead, BufReader};
+ use crate::os::unix::ffi::OsStringExt;
+ use crate::path::Path;
+ use crate::path::PathBuf;
+ use crate::str::from_utf8;
+
+ #[derive(PartialEq)]
+ enum Cgroup {
+ V1,
+ V2,
+ }
+
+ /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
+ /// be determined or is not set.
+ pub(super) fn quota() -> usize {
+ let mut quota = usize::MAX;
+ if cfg!(miri) {
+ // Attempting to open a file fails under default flags due to isolation.
+ // And Miri does not have parallelism anyway.
+ return quota;
+ }
+
+ let _: Option<()> = try {
+ let mut buf = Vec::with_capacity(128);
+ // find our place in the cgroup hierarchy
+ File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
+ let (cgroup_path, version) =
+ buf.split(|&c| c == b'\n').fold(None, |previous, line| {
+ let mut fields = line.splitn(3, |&c| c == b':');
+ // 2nd field is a list of controllers for v1 or empty for v2
+ let version = match fields.nth(1) {
+ Some(b"") => Cgroup::V2,
+ Some(controllers)
+ if from_utf8(controllers)
+ .is_ok_and(|c| c.split(",").any(|c| c == "cpu")) =>
+ {
+ Cgroup::V1
+ }
+ _ => return previous,
+ };
+
+ // already-found v1 trumps v2 since it explicitly specifies its controllers
+ if previous.is_some() && version == Cgroup::V2 {
+ return previous;
+ }
+
+ let path = fields.last()?;
+ // skip leading slash
+ Some((path[1..].to_owned(), version))
+ })?;
+ let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
+
+ quota = match version {
+ Cgroup::V1 => quota_v1(cgroup_path),
+ Cgroup::V2 => quota_v2(cgroup_path),
+ };
+ };
+
+ quota
+ }
+
+ fn quota_v2(group_path: PathBuf) -> usize {
+ let mut quota = usize::MAX;
+
+ let mut path = PathBuf::with_capacity(128);
+ let mut read_buf = String::with_capacity(20);
+
+ // standard mount location defined in file-hierarchy(7) manpage
+ let cgroup_mount = "/sys/fs/cgroup";
+
+ path.push(cgroup_mount);
+ path.push(&group_path);
+
+ path.push("cgroup.controllers");
+
+ // skip if we're not looking at cgroup2
+ if matches!(try_exists(&path), Err(_) | Ok(false)) {
+ return usize::MAX;
+ };
+
+ path.pop();
+
+ let _: Option<()> = try {
+ while path.starts_with(cgroup_mount) {
+ path.push("cpu.max");
+
+ read_buf.clear();
+
+ if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
+ let raw_quota = read_buf.lines().next()?;
+ let mut raw_quota = raw_quota.split(' ');
+ let limit = raw_quota.next()?;
+ let period = raw_quota.next()?;
+ match (limit.parse::<usize>(), period.parse::<usize>()) {
+ (Ok(limit), Ok(period)) => {
+ quota = quota.min(limit / period);
+ }
+ _ => {}
+ }
+ }
+
+ path.pop(); // pop filename
+ path.pop(); // pop dir
+ }
+ };
+
+ quota
+ }
+
+ fn quota_v1(group_path: PathBuf) -> usize {
+ let mut quota = usize::MAX;
+ let mut path = PathBuf::with_capacity(128);
+ let mut read_buf = String::with_capacity(20);
+
+ // Hardcode commonly used locations mentioned in the cgroups(7) manpage
+ // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
+ let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
+ |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
+ |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
+ // this can be expensive on systems with tons of mountpoints
+ // but we only get to this point when /proc/self/cgroups explicitly indicated
+ // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
+ find_mountpoint,
+ ];
+
+ for mount in mounts {
+ let Some((mount, group_path)) = mount(&group_path) else { continue };
+
+ path.clear();
+ path.push(mount.as_ref());
+ path.push(&group_path);
+
+ // skip if we guessed the mount incorrectly
+ if matches!(try_exists(&path), Err(_) | Ok(false)) {
+ continue;
+ }
+
+ while path.starts_with(mount.as_ref()) {
+ let mut parse_file = |name| {
+ path.push(name);
+ read_buf.clear();
+
+ let f = File::open(&path);
+ path.pop(); // restore buffer before any early returns
+ f.ok()?.read_to_string(&mut read_buf).ok()?;
+ let parsed = read_buf.trim().parse::<usize>().ok()?;
+
+ Some(parsed)
+ };
+
+ let limit = parse_file("cpu.cfs_quota_us");
+ let period = parse_file("cpu.cfs_period_us");
+
+ match (limit, period) {
+ (Some(limit), Some(period)) => quota = quota.min(limit / period),
+ _ => {}
+ }
+
+ path.pop();
+ }
+
+ // we passed the try_exists above so we should have traversed the correct hierarchy
+ // when reaching this line
+ break;
+ }
+
+ quota
+ }
+
+ /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
+ ///
+ /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
+ /// over the already-included prefix
+ fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
+ let mut reader = BufReader::new(File::open("/proc/self/mountinfo").ok()?);
+ let mut line = String::with_capacity(256);
+ loop {
+ line.clear();
+ if reader.read_line(&mut line).ok()? == 0 {
+ break;
+ }
+
+ let line = line.trim();
+ let mut items = line.split(' ');
+
+ let sub_path = items.nth(3)?;
+ let mount_point = items.next()?;
+ let mount_opts = items.next_back()?;
+ let filesystem_type = items.nth_back(1)?;
+
+ if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
+ // not a cgroup / not a cpu-controller
+ continue;
+ }
+
+ let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
+
+ if !group_path.starts_with(sub_path) {
+ // this is a bind-mount and the bound subdirectory
+ // does not contain the cgroup this process belongs to
+ continue;
+ }
+
+ let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
+
+ return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
+ }
+
+ None
+ }
+}
+
+#[cfg(all(
+ not(target_os = "linux"),
+ not(target_os = "freebsd"),
+ not(target_os = "macos"),
+ not(target_os = "netbsd"),
+ not(target_os = "openbsd"),
+ not(target_os = "solaris")
+))]
+#[cfg_attr(test, allow(dead_code))]
+pub mod guard {
+ use crate::ops::Range;
+ pub type Guard = Range<usize>;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
+
+#[cfg(any(
+ target_os = "linux",
+ target_os = "freebsd",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "solaris"
+))]
+#[cfg_attr(test, allow(dead_code))]
+pub mod guard {
+ use libc::{mmap, mprotect};
+ use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
+
+ use crate::io;
+ use crate::ops::Range;
+ use crate::sync::atomic::{AtomicUsize, Ordering};
+ use crate::sys::os;
+
+ // This is initialized in init() and only read from after
+ static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
+
+ pub type Guard = Range<usize>;
+
+ #[cfg(target_os = "solaris")]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let mut current_stack: libc::stack_t = crate::mem::zeroed();
+ assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
+ Some(current_stack.ss_sp)
+ }
+
+ #[cfg(target_os = "macos")]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let th = libc::pthread_self();
+ let stackptr = libc::pthread_get_stackaddr_np(th);
+ Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
+ }
+
+ #[cfg(target_os = "openbsd")]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let mut current_stack: libc::stack_t = crate::mem::zeroed();
+ assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
+
+ let stack_ptr = current_stack.ss_sp;
+ let stackaddr = if libc::pthread_main_np() == 1 {
+ // main thread
+ stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
+ } else {
+ // new thread
+ stack_ptr.addr() - current_stack.ss_size
+ };
+ Some(stack_ptr.with_addr(stackaddr))
+ }
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "l4re"
+ ))]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let mut ret = None;
+ let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
+ #[cfg(target_os = "freebsd")]
+ assert_eq!(libc::pthread_attr_init(&mut attr), 0);
+ #[cfg(target_os = "freebsd")]
+ let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
+ #[cfg(not(target_os = "freebsd"))]
+ let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
+ if e == 0 {
+ let mut stackaddr = crate::ptr::null_mut();
+ let mut stacksize = 0;
+ assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
+ ret = Some(stackaddr);
+ }
+ if e == 0 || cfg!(target_os = "freebsd") {
+ assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
+ }
+ ret
+ }
+
+ // Precondition: PAGE_SIZE is initialized.
+ unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
+ let page_size = PAGE_SIZE.load(Ordering::Relaxed);
+ assert!(page_size != 0);
+ let stackptr = get_stack_start()?;
+ let stackaddr = stackptr.addr();
+
+ // Ensure stackaddr is page aligned! A parent process might
+ // have reset RLIMIT_STACK to be non-page aligned. The
+ // pthread_attr_getstack() reports the usable stack area
+ // stackaddr < stackaddr + stacksize, so if stackaddr is not
+ // page-aligned, calculate the fix such that stackaddr <
+ // new_page_aligned_stackaddr < stackaddr + stacksize
+ let remainder = stackaddr % page_size;
+ Some(if remainder == 0 {
+ stackptr
+ } else {
+ stackptr.with_addr(stackaddr + page_size - remainder)
+ })
+ }
+
+ pub unsafe fn init() -> Option<Guard> {
+ let page_size = os::page_size();
+ PAGE_SIZE.store(page_size, Ordering::Relaxed);
+
+ if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
+ // Linux doesn't allocate the whole stack right away, and
+ // the kernel has its own stack-guard mechanism to fault
+ // when growing too close to an existing mapping. If we map
+ // our own guard, then the kernel starts enforcing a rather
+ // large gap above that, rendering much of the possible
+ // stack space useless. See #43052.
+ //
+ // Instead, we'll just note where we expect rlimit to start
+ // faulting, so our handler can report "stack overflow", and
+ // trust that the kernel's own stack guard will work.
+ let stackptr = get_stack_start_aligned()?;
+ let stackaddr = stackptr.addr();
+ Some(stackaddr - page_size..stackaddr)
+ } else if cfg!(all(target_os = "linux", target_env = "musl")) {
+ // For the main thread, the musl's pthread_attr_getstack
+ // returns the current stack size, rather than maximum size
+ // it can eventually grow to. It cannot be used to determine
+ // the position of kernel's stack guard.
+ None
+ } else if cfg!(target_os = "freebsd") {
+ // FreeBSD's stack autogrows, and optionally includes a guard page
+ // at the bottom. If we try to remap the bottom of the stack
+ // ourselves, FreeBSD's guard page moves upwards. So we'll just use
+ // the builtin guard page.
+ let stackptr = get_stack_start_aligned()?;
+ let guardaddr = stackptr.addr();
+ // Technically the number of guard pages is tunable and controlled
+ // by the security.bsd.stack_guard_page sysctl, but there are
+ // few reasons to change it from the default. The default value has
+ // been 1 ever since FreeBSD 11.1 and 10.4.
+ const GUARD_PAGES: usize = 1;
+ let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
+ Some(guard)
+ } else {
+ // Reallocate the last page of the stack.
+ // This ensures SIGBUS will be raised on
+ // stack overflow.
+ // Systems which enforce strict PAX MPROTECT do not allow
+ // to mprotect() a mapping with less restrictive permissions
+ // than the initial mmap() used, so we mmap() here with
+ // read/write permissions and only then mprotect() it to
+ // no permissions at all. See issue #50313.
+ let stackptr = get_stack_start_aligned()?;
+ let result = mmap(
+ stackptr,
+ page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1,
+ 0,
+ );
+ if result != stackptr || result == MAP_FAILED {
+ panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
+ }
+
+ let result = mprotect(stackptr, page_size, PROT_NONE);
+ if result != 0 {
+ panic!("failed to protect the guard page: {}", io::Error::last_os_error());
+ }
+
+ let guardaddr = stackptr.addr();
+
+ Some(guardaddr..guardaddr + page_size)
+ }
+ }
+
+ #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
+ pub unsafe fn current() -> Option<Guard> {
+ let stackptr = get_stack_start()?;
+ let stackaddr = stackptr.addr();
+ Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
+ }
+
+ #[cfg(any(
+ target_os = "android",
+ target_os = "freebsd",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "l4re"
+ ))]
+ pub unsafe fn current() -> Option<Guard> {
+ let mut ret = None;
+ let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
+ #[cfg(target_os = "freebsd")]
+ assert_eq!(libc::pthread_attr_init(&mut attr), 0);
+ #[cfg(target_os = "freebsd")]
+ let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
+ #[cfg(not(target_os = "freebsd"))]
+ let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
+ if e == 0 {
+ let mut guardsize = 0;
+ assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
+ if guardsize == 0 {
+ if cfg!(all(target_os = "linux", target_env = "musl")) {
+ // musl versions before 1.1.19 always reported guard
+ // size obtained from pthread_attr_get_np as zero.
+ // Use page size as a fallback.
+ guardsize = PAGE_SIZE.load(Ordering::Relaxed);
+ } else {
+ panic!("there is no guard page");
+ }
+ }
+ let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
+ let mut size = 0;
+ assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
+
+ let stackaddr = stackptr.addr();
+ ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
+ Some(stackaddr - guardsize..stackaddr)
+ } else if cfg!(all(target_os = "linux", target_env = "musl")) {
+ Some(stackaddr - guardsize..stackaddr)
+ } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
+ {
+ // glibc used to include the guard area within the stack, as noted in the BUGS
+ // section of `man pthread_attr_getguardsize`. This has been corrected starting
+ // with glibc 2.27, and in some distro backports, so the guard is now placed at the
+ // end (below) the stack. There's no easy way for us to know which we have at
+ // runtime, so we'll just match any fault in the range right above or below the
+ // stack base to call that fault a stack overflow.
+ Some(stackaddr - guardsize..stackaddr + guardsize)
+ } else {
+ Some(stackaddr..stackaddr + guardsize)
+ };
+ }
+ if e == 0 || cfg!(target_os = "freebsd") {
+ assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
+ }
+ ret
+ }
+}
+
+// glibc >= 2.15 has a __pthread_get_minstack() function that returns
+// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
+// We need that information to avoid blowing up when a small stack
+// is created in an application with big thread-local storage requirements.
+// See #6233 for rationale and details.
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
+ // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
+ // We shouldn't really be using such an internal symbol, but there's currently
+ // no other way to account for the TLS size.
+ dlsym!(fn __pthread_get_minstack(*const libc::pthread_attr_t) -> libc::size_t);
+
+ match __pthread_get_minstack.get() {
+ None => libc::PTHREAD_STACK_MIN,
+ Some(f) => unsafe { f(attr) },
+ }
+}
+
+// No point in looking up __pthread_get_minstack() on non-glibc platforms.
+#[cfg(all(not(all(target_os = "linux", target_env = "gnu")), not(target_os = "netbsd")))]
+fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
+ libc::PTHREAD_STACK_MIN
+}
+
+#[cfg(target_os = "netbsd")]
+fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
+ 2048 // just a guess
+}
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
new file mode 100644
index 000000000..6e8be2a91
--- /dev/null
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -0,0 +1,100 @@
+#![cfg(target_thread_local)]
+#![unstable(feature = "thread_local_internals", issue = "none")]
+
+//! Provides thread-local destructors without an associated "key", which
+//! can be more efficient.
+
+// Since what appears to be glibc 2.18 this symbol has been shipped which
+// GCC and clang both use to invoke destructors in thread_local globals, so
+// let's do the same!
+//
+// Note, however, that we run on lots older linuxes, as well as cross
+// compiling from a newer linux to an older linux, so we also have a
+// fallback implementation to use as well.
+#[cfg(any(
+ target_os = "linux",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "emscripten"
+))]
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ use crate::mem;
+ use crate::sys_common::thread_local_dtor::register_dtor_fallback;
+
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static __dso_handle: *mut u8;
+ #[linkage = "extern_weak"]
+ static __cxa_thread_atexit_impl: *const libc::c_void;
+ }
+ if !__cxa_thread_atexit_impl.is_null() {
+ type F = unsafe extern "C" fn(
+ dtor: unsafe extern "C" fn(*mut u8),
+ arg: *mut u8,
+ dso_handle: *mut u8,
+ ) -> libc::c_int;
+ mem::transmute::<*const libc::c_void, F>(__cxa_thread_atexit_impl)(
+ dtor,
+ t,
+ &__dso_handle as *const _ as *mut _,
+ );
+ return;
+ }
+ register_dtor_fallback(t, dtor);
+}
+
+// This implementation is very similar to register_dtor_fallback in
+// sys_common/thread_local.rs. The main difference is that we want to hook into
+// macOS's analog of the above linux function, _tlv_atexit. OSX will run the
+// registered dtors before any TLS slots get freed, and when the main thread
+// exits.
+//
+// Unfortunately, calling _tlv_atexit while tls dtors are running is UB. The
+// workaround below is to register, via _tlv_atexit, a custom DTOR list once per
+// thread. thread_local dtors are pushed to the DTOR list without calling
+// _tlv_atexit.
+#[cfg(target_os = "macos")]
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ use crate::cell::Cell;
+ use crate::ptr;
+
+ #[thread_local]
+ static REGISTERED: Cell<bool> = Cell::new(false);
+ if !REGISTERED.get() {
+ _tlv_atexit(run_dtors, ptr::null_mut());
+ REGISTERED.set(true);
+ }
+
+ type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>;
+
+ #[thread_local]
+ static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut());
+ if DTORS.get().is_null() {
+ let v: Box<List> = box Vec::new();
+ DTORS.set(Box::into_raw(v));
+ }
+
+ extern "C" {
+ fn _tlv_atexit(dtor: unsafe extern "C" fn(*mut u8), arg: *mut u8);
+ }
+
+ let list: &mut List = &mut *DTORS.get();
+ list.push((t, dtor));
+
+ unsafe extern "C" fn run_dtors(_: *mut u8) {
+ let mut ptr = DTORS.replace(ptr::null_mut());
+ while !ptr.is_null() {
+ let list = Box::from_raw(ptr);
+ for (ptr, dtor) in list.into_iter() {
+ dtor(ptr);
+ }
+ ptr = DTORS.replace(ptr::null_mut());
+ }
+ }
+}
+
+#[cfg(any(target_os = "vxworks", target_os = "horizon"))]
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ use crate::sys_common::thread_local_dtor::register_dtor_fallback;
+ register_dtor_fallback(t, dtor);
+}
diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs
new file mode 100644
index 000000000..2c5b94b1e
--- /dev/null
+++ b/library/std/src/sys/unix/thread_local_key.rs
@@ -0,0 +1,34 @@
+#![allow(dead_code)] // not used on all platforms
+
+use crate::mem;
+
+pub type Key = libc::pthread_key_t;
+
+#[inline]
+pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ let mut key = 0;
+ assert_eq!(libc::pthread_key_create(&mut key, mem::transmute(dtor)), 0);
+ key
+}
+
+#[inline]
+pub unsafe fn set(key: Key, value: *mut u8) {
+ let r = libc::pthread_setspecific(key, value as *mut _);
+ debug_assert_eq!(r, 0);
+}
+
+#[inline]
+pub unsafe fn get(key: Key) -> *mut u8 {
+ libc::pthread_getspecific(key) as *mut u8
+}
+
+#[inline]
+pub unsafe fn destroy(key: Key) {
+ let r = libc::pthread_key_delete(key);
+ debug_assert_eq!(r, 0);
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+ false
+}
diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs
new file mode 100644
index 000000000..ca1a7138f
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker.rs
@@ -0,0 +1,281 @@
+//! Thread parking without `futex` using the `pthread` synchronization primitives.
+
+#![cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+)))]
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomPinned;
+use crate::pin::Pin;
+use crate::ptr::addr_of_mut;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::time::Duration;
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+unsafe fn lock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_lock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn unlock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_unlock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn notify_one(cond: *mut libc::pthread_cond_t) {
+ let r = libc::pthread_cond_signal(cond);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_cond_wait(cond, lock);
+ debug_assert_eq!(r, 0);
+}
+
+const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
+
+unsafe fn wait_timeout(
+ cond: *mut libc::pthread_cond_t,
+ lock: *mut libc::pthread_mutex_t,
+ dur: Duration,
+) {
+ // Use the system clock on systems that do not support pthread_condattr_setclock.
+ // This unfortunately results in problems when the system time changes.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ ))]
+ let (now, dur) = {
+ use super::time::SystemTime;
+ use crate::cmp::min;
+
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra return error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `park_timeout`
+ // because of spurious wakeups.
+ let dur = min(dur, Duration::from_secs(1000 * 365 * 86400));
+ let now = SystemTime::now().t;
+ (now, dur)
+ };
+ // Use the monotonic clock on other systems.
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ )))]
+ let (now, dur) = {
+ use super::time::Timespec;
+
+ (Timespec::now(libc::CLOCK_MONOTONIC), dur)
+ };
+
+ let timeout =
+ now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX);
+ let r = libc::pthread_cond_timedwait(cond, lock, &timeout);
+ debug_assert!(r == libc::ETIMEDOUT || r == 0);
+}
+
+pub struct Parker {
+ state: AtomicUsize,
+ lock: UnsafeCell<libc::pthread_mutex_t>,
+ cvar: UnsafeCell<libc::pthread_cond_t>,
+ // The `pthread` primitives require a stable address, so make this struct `!Unpin`.
+ _pinned: PhantomPinned,
+}
+
+impl Parker {
+ /// Construct the UNIX parker in-place.
+ ///
+ /// # Safety
+ /// The constructed parker must never be moved.
+ pub unsafe fn new(parker: *mut Parker) {
+ // Use the default mutex implementation to allow for simpler initialization.
+ // This could lead to undefined behaviour when deadlocking. This is avoided
+ // by not deadlocking. Note in particular the unlocking operation before any
+ // panic, as code after the panic could try to park again.
+ addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
+ addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+
+ panic!("inconsistent park state")
+ }
+ }
+
+ loop {
+ wait(self.cvar.get(), self.lock.get());
+
+ match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+ Ok(_) => break, // got a notification
+ Err(_) => {} // spurious wakeup, go back to sleep
+ }
+ }
+
+ unlock(self.lock.get());
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker. Use
+ // `Pin` to guarantee a stable address for the mutex and condition variable.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread, and
+ // afterwards we start coordinating for a sleep.
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state")
+ }
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ wait_timeout(self.cvar.get(), self.lock.get(), dur);
+
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
+ PARKED => unlock(self.lock.get()), // no notification, alas
+ n => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state: {n}")
+ }
+ }
+ }
+
+ pub fn unpark(self: Pin<&Self>) {
+ // To ensure the unparked thread will observe any writes we made
+ // before this call, we must perform a release operation that `park`
+ // can synchronize with. To do that we must write `NOTIFIED` even if
+ // `state` is already `NOTIFIED`. That is why this must be a swap
+ // rather than a compare-and-swap that returns if it reads `NOTIFIED`
+ // on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ unsafe {
+ lock(self.lock.get());
+ unlock(self.lock.get());
+ notify_one(self.cvar.get());
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ unsafe {
+ libc::pthread_cond_destroy(self.cvar.get_mut());
+ libc::pthread_mutex_destroy(self.lock.get_mut());
+ }
+ }
+}
+
+unsafe impl Sync for Parker {}
+unsafe impl Send for Parker {}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
new file mode 100644
index 000000000..dff973f59
--- /dev/null
+++ b/library/std/src/sys/unix/time.rs
@@ -0,0 +1,346 @@
+use crate::fmt;
+use crate::time::Duration;
+
+pub use self::inner::Instant;
+
+const NSEC_PER_SEC: u64 = 1_000_000_000;
+pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct SystemTime {
+ pub(in crate::sys::unix) t: Timespec,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub(in crate::sys::unix) struct Timespec {
+ tv_sec: i64,
+ tv_nsec: i64,
+}
+
+impl SystemTime {
+ #[cfg_attr(target_os = "horizon", allow(unused))]
+ pub fn new(tv_sec: i64, tv_nsec: i64) -> SystemTime {
+ SystemTime { t: Timespec::new(tv_sec, tv_nsec) }
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.t.sub_timespec(&other.t)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime { t: self.t.checked_add_duration(other)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime { t: self.t.checked_sub_duration(other)? })
+ }
+}
+
+impl From<libc::timespec> for SystemTime {
+ fn from(t: libc::timespec) -> SystemTime {
+ SystemTime { t: Timespec::from(t) }
+ }
+}
+
+impl fmt::Debug for SystemTime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SystemTime")
+ .field("tv_sec", &self.t.tv_sec)
+ .field("tv_nsec", &self.t.tv_nsec)
+ .finish()
+ }
+}
+
+impl Timespec {
+ pub const fn zero() -> Timespec {
+ Timespec { tv_sec: 0, tv_nsec: 0 }
+ }
+
+ fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
+ Timespec { tv_sec, tv_nsec }
+ }
+
+ pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
+ if self >= other {
+ // NOTE(eddyb) two aspects of this `if`-`else` are required for LLVM
+ // to optimize it into a branchless form (see also #75545):
+ //
+ // 1. `self.tv_sec - other.tv_sec` shows up as a common expression
+ // in both branches, i.e. the `else` must have its `- 1`
+ // subtraction after the common one, not interleaved with it
+ // (it used to be `self.tv_sec - 1 - other.tv_sec`)
+ //
+ // 2. the `Duration::new` call (or any other additional complexity)
+ // is outside of the `if`-`else`, not duplicated in both branches
+ //
+ // Ideally this code could be rearranged such that it more
+ // directly expresses the lower-cost behavior we want from it.
+ let (secs, nsec) = if self.tv_nsec >= other.tv_nsec {
+ ((self.tv_sec - other.tv_sec) as u64, (self.tv_nsec - other.tv_nsec) as u32)
+ } else {
+ (
+ (self.tv_sec - other.tv_sec - 1) as u64,
+ self.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.tv_nsec as u32,
+ )
+ };
+
+ Ok(Duration::new(secs, nsec))
+ } else {
+ match other.sub_timespec(self) {
+ Ok(d) => Err(d),
+ Err(d) => Ok(d),
+ }
+ }
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
+ let mut secs = other
+ .as_secs()
+ .try_into() // <- target type would be `i64`
+ .ok()
+ .and_then(|secs| self.tv_sec.checked_add(secs))?;
+
+ // Nano calculations can't overflow because nanos are <1B which fit
+ // in a u32.
+ let mut nsec = other.subsec_nanos() + self.tv_nsec as u32;
+ if nsec >= NSEC_PER_SEC as u32 {
+ nsec -= NSEC_PER_SEC as u32;
+ secs = secs.checked_add(1)?;
+ }
+ Some(Timespec::new(secs, nsec as i64))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
+ let mut secs = other
+ .as_secs()
+ .try_into() // <- target type would be `i64`
+ .ok()
+ .and_then(|secs| self.tv_sec.checked_sub(secs))?;
+
+ // Similar to above, nanos can't overflow.
+ let mut nsec = self.tv_nsec as i32 - other.subsec_nanos() as i32;
+ if nsec < 0 {
+ nsec += NSEC_PER_SEC as i32;
+ secs = secs.checked_sub(1)?;
+ }
+ Some(Timespec::new(secs, nsec as i64))
+ }
+
+ #[allow(dead_code)]
+ pub fn to_timespec(&self) -> Option<libc::timespec> {
+ Some(libc::timespec {
+ tv_sec: self.tv_sec.try_into().ok()?,
+ tv_nsec: self.tv_nsec.try_into().ok()?,
+ })
+ }
+}
+
+impl From<libc::timespec> for Timespec {
+ fn from(t: libc::timespec) -> Timespec {
+ Timespec::new(t.tv_sec as i64, t.tv_nsec as i64)
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+mod inner {
+ use crate::sync::atomic::{AtomicU64, Ordering};
+ use crate::sys::cvt;
+ use crate::sys_common::mul_div_u64;
+ use crate::time::Duration;
+
+ use super::{SystemTime, Timespec, NSEC_PER_SEC};
+
+ #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+ pub struct Instant {
+ t: u64,
+ }
+
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ struct mach_timebase_info {
+ numer: u32,
+ denom: u32,
+ }
+ type mach_timebase_info_t = *mut mach_timebase_info;
+ type kern_return_t = libc::c_int;
+
+ impl Instant {
+ pub fn now() -> Instant {
+ extern "C" {
+ fn mach_absolute_time() -> u64;
+ }
+ Instant { t: unsafe { mach_absolute_time() } }
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ let diff = self.t.checked_sub(other.t)?;
+ let info = info();
+ let nanos = mul_div_u64(diff, info.numer as u64, info.denom as u64);
+ Some(Duration::new(nanos / NSEC_PER_SEC, (nanos % NSEC_PER_SEC) as u32))
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_add(checked_dur2intervals(other)?)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_sub(checked_dur2intervals(other)?)? })
+ }
+ }
+
+ impl SystemTime {
+ pub fn now() -> SystemTime {
+ use crate::ptr;
+
+ let mut s = libc::timeval { tv_sec: 0, tv_usec: 0 };
+ cvt(unsafe { libc::gettimeofday(&mut s, ptr::null_mut()) }).unwrap();
+ return SystemTime::from(s);
+ }
+ }
+
+ impl From<libc::timeval> for Timespec {
+ fn from(t: libc::timeval) -> Timespec {
+ Timespec::new(t.tv_sec as i64, 1000 * t.tv_usec as i64)
+ }
+ }
+
+ impl From<libc::timeval> for SystemTime {
+ fn from(t: libc::timeval) -> SystemTime {
+ SystemTime { t: Timespec::from(t) }
+ }
+ }
+
+ fn checked_dur2intervals(dur: &Duration) -> Option<u64> {
+ let nanos =
+ dur.as_secs().checked_mul(NSEC_PER_SEC)?.checked_add(dur.subsec_nanos() as u64)?;
+ let info = info();
+ Some(mul_div_u64(nanos, info.denom as u64, info.numer as u64))
+ }
+
+ fn info() -> mach_timebase_info {
+ // INFO_BITS conceptually is an `Option<mach_timebase_info>`. We can do
+ // this in 64 bits because we know 0 is never a valid value for the
+ // `denom` field.
+ //
+ // Encoding this as a single `AtomicU64` allows us to use `Relaxed`
+ // operations, as we are only interested in the effects on a single
+ // memory location.
+ static INFO_BITS: AtomicU64 = AtomicU64::new(0);
+
+ // If a previous thread has initialized `INFO_BITS`, use it.
+ let info_bits = INFO_BITS.load(Ordering::Relaxed);
+ if info_bits != 0 {
+ return info_from_bits(info_bits);
+ }
+
+ // ... otherwise learn for ourselves ...
+ extern "C" {
+ fn mach_timebase_info(info: mach_timebase_info_t) -> kern_return_t;
+ }
+
+ let mut info = info_from_bits(0);
+ unsafe {
+ mach_timebase_info(&mut info);
+ }
+ INFO_BITS.store(info_to_bits(info), Ordering::Relaxed);
+ info
+ }
+
+ #[inline]
+ fn info_to_bits(info: mach_timebase_info) -> u64 {
+ ((info.denom as u64) << 32) | (info.numer as u64)
+ }
+
+ #[inline]
+ fn info_from_bits(bits: u64) -> mach_timebase_info {
+ mach_timebase_info { numer: bits as u32, denom: (bits >> 32) as u32 }
+ }
+}
+
+#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "watchos")))]
+mod inner {
+ use crate::fmt;
+ use crate::mem::MaybeUninit;
+ use crate::sys::cvt;
+ use crate::time::Duration;
+
+ use super::{SystemTime, Timespec};
+
+ #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+ pub struct Instant {
+ t: Timespec,
+ }
+
+ impl Instant {
+ pub fn now() -> Instant {
+ Instant { t: Timespec::now(libc::CLOCK_MONOTONIC) }
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.t.sub_timespec(&other.t).ok()
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_add_duration(other)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_sub_duration(other)? })
+ }
+ }
+
+ impl fmt::Debug for Instant {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Instant")
+ .field("tv_sec", &self.t.tv_sec)
+ .field("tv_nsec", &self.t.tv_nsec)
+ .finish()
+ }
+ }
+
+ impl SystemTime {
+ pub fn now() -> SystemTime {
+ SystemTime { t: Timespec::now(libc::CLOCK_REALTIME) }
+ }
+ }
+
+ #[cfg(not(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon")))]
+ pub type clock_t = libc::c_int;
+ #[cfg(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon"))]
+ pub type clock_t = libc::c_ulong;
+
+ impl Timespec {
+ pub fn now(clock: clock_t) -> Timespec {
+ // Try to use 64-bit time in preparation for Y2038.
+ #[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))]
+ {
+ use crate::sys::weak::weak;
+
+ // __clock_gettime64 was added to 32-bit arches in glibc 2.34,
+ // and it handles both vDSO calls and ENOSYS fallbacks itself.
+ weak!(fn __clock_gettime64(libc::clockid_t, *mut __timespec64) -> libc::c_int);
+
+ #[repr(C)]
+ struct __timespec64 {
+ tv_sec: i64,
+ #[cfg(target_endian = "big")]
+ _padding: i32,
+ tv_nsec: i32,
+ #[cfg(target_endian = "little")]
+ _padding: i32,
+ }
+
+ if let Some(clock_gettime64) = __clock_gettime64.get() {
+ let mut t = MaybeUninit::uninit();
+ cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
+ let t = unsafe { t.assume_init() };
+ return Timespec { tv_sec: t.tv_sec, tv_nsec: t.tv_nsec as i64 };
+ }
+ }
+
+ let mut t = MaybeUninit::uninit();
+ cvt(unsafe { libc::clock_gettime(clock, t.as_mut_ptr()) }).unwrap();
+ Timespec::from(unsafe { t.assume_init() })
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs
new file mode 100644
index 000000000..e4ff21b25
--- /dev/null
+++ b/library/std/src/sys/unix/weak.rs
@@ -0,0 +1,205 @@
+//! Support for "weak linkage" to symbols on Unix
+//!
+//! Some I/O operations we do in libstd require newer versions of OSes but we
+//! need to maintain binary compatibility with older releases for now. In order
+//! to use the new functionality when available we use this module for
+//! detection.
+//!
+//! One option to use here is weak linkage, but that is unfortunately only
+//! really workable with ELF. Otherwise, use dlsym to get the symbol value at
+//! runtime. This is also done for compatibility with older versions of glibc,
+//! and to avoid creating dependencies on GLIBC_PRIVATE symbols. It assumes that
+//! we've been dynamically linked to the library the symbol comes from, but that
+//! is currently always the case for things like libpthread/libc.
+//!
+//! A long time ago this used weak linkage for the __pthread_get_minstack
+//! symbol, but that caused Debian to detect an unnecessarily strict versioned
+//! dependency on libc6 (#23628) because it is GLIBC_PRIVATE. We now use `dlsym`
+//! for a runtime lookup of that symbol to avoid the ELF versioned dependency.
+
+// There are a variety of `#[cfg]`s controlling which targets are involved in
+// each instance of `weak!` and `syscall!`. Rather than trying to unify all of
+// that, we'll just allow that some unix targets don't use this module at all.
+#![allow(dead_code, unused_macros)]
+
+use crate::ffi::CStr;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::ptr;
+use crate::sync::atomic::{self, AtomicPtr, Ordering};
+
+// We can use true weak linkage on ELF targets.
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+pub(crate) macro weak {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static $name: *const libc::c_void;
+ }
+ #[allow(unused_unsafe)]
+ ExternWeak::new(unsafe { $name })
+ };
+ )
+}
+
+// On non-ELF targets, use the dlsym approximation of weak linkage.
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+pub(crate) use self::dlsym as weak;
+
+pub(crate) struct ExternWeak<F> {
+ weak_ptr: *const libc::c_void,
+ _marker: PhantomData<F>,
+}
+
+impl<F> ExternWeak<F> {
+ #[inline]
+ pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self {
+ ExternWeak { weak_ptr, _marker: PhantomData }
+ }
+}
+
+impl<F> ExternWeak<F> {
+ #[inline]
+ pub(crate) fn get(&self) -> Option<F> {
+ unsafe {
+ if self.weak_ptr.is_null() {
+ None
+ } else {
+ Some(mem::transmute_copy::<*const libc::c_void, F>(&self.weak_ptr))
+ }
+ }
+ }
+}
+
+pub(crate) macro dlsym {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ dlsym!(fn $name($($t),*) -> $ret, stringify!($name));
+ ),
+ (fn $name:ident($($t:ty),*) -> $ret:ty, $sym:expr) => (
+ static DLSYM: DlsymWeak<unsafe extern "C" fn($($t),*) -> $ret> =
+ DlsymWeak::new(concat!($sym, '\0'));
+ let $name = &DLSYM;
+ )
+}
+pub(crate) struct DlsymWeak<F> {
+ name: &'static str,
+ func: AtomicPtr<libc::c_void>,
+ _marker: PhantomData<F>,
+}
+
+impl<F> DlsymWeak<F> {
+ pub(crate) const fn new(name: &'static str) -> Self {
+ DlsymWeak { name, func: AtomicPtr::new(ptr::invalid_mut(1)), _marker: PhantomData }
+ }
+
+ #[inline]
+ pub(crate) fn get(&self) -> Option<F> {
+ unsafe {
+ // Relaxed is fine here because we fence before reading through the
+ // pointer (see the comment below).
+ match self.func.load(Ordering::Relaxed) {
+ func if func.addr() == 1 => self.initialize(),
+ func if func.is_null() => None,
+ func => {
+ let func = mem::transmute_copy::<*mut libc::c_void, F>(&func);
+ // The caller is presumably going to read through this value
+ // (by calling the function we've dlsymed). This means we'd
+ // need to have loaded it with at least C11's consume
+ // ordering in order to be guaranteed that the data we read
+ // from the pointer isn't from before the pointer was
+ // stored. Rust has no equivalent to memory_order_consume,
+ // so we use an acquire fence (sorry, ARM).
+ //
+ // Now, in practice this likely isn't needed even on CPUs
+ // where relaxed and consume mean different things. The
+ // symbols we're loading are probably present (or not) at
+ // init, and even if they aren't the runtime dynamic loader
+ // is extremely likely have sufficient barriers internally
+ // (possibly implicitly, for example the ones provided by
+ // invoking `mprotect`).
+ //
+ // That said, none of that's *guaranteed*, and so we fence.
+ atomic::fence(Ordering::Acquire);
+ Some(func)
+ }
+ }
+ }
+ }
+
+ // Cold because it should only happen during first-time initialization.
+ #[cold]
+ unsafe fn initialize(&self) -> Option<F> {
+ assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut libc::c_void>());
+
+ let val = fetch(self.name);
+ // This synchronizes with the acquire fence in `get`.
+ self.func.store(val, Ordering::Release);
+
+ if val.is_null() { None } else { Some(mem::transmute_copy::<*mut libc::c_void, F>(&val)) }
+ }
+}
+
+unsafe fn fetch(name: &str) -> *mut libc::c_void {
+ let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
+ Ok(cstr) => cstr,
+ Err(..) => return ptr::null_mut(),
+ };
+ libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr())
+}
+
+#[cfg(not(any(target_os = "linux", target_os = "android")))]
+pub(crate) macro syscall {
+ (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => (
+ unsafe fn $name($($arg_name: $t),*) -> $ret {
+ weak! { fn $name($($t),*) -> $ret }
+
+ if let Some(fun) = $name.get() {
+ fun($($arg_name),*)
+ } else {
+ super::os::set_errno(libc::ENOSYS);
+ -1
+ }
+ }
+ )
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub(crate) macro syscall {
+ (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => (
+ unsafe fn $name($($arg_name:$t),*) -> $ret {
+ weak! { fn $name($($t),*) -> $ret }
+
+ // Use a weak symbol from libc when possible, allowing `LD_PRELOAD`
+ // interposition, but if it's not found just use a raw syscall.
+ if let Some(fun) = $name.get() {
+ fun($($arg_name),*)
+ } else {
+ // This looks like a hack, but concat_idents only accepts idents
+ // (not paths).
+ use libc::*;
+
+ syscall(
+ concat_idents!(SYS_, $name),
+ $($arg_name),*
+ ) as $ret
+ }
+ }
+ )
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub(crate) macro raw_syscall {
+ (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => (
+ unsafe fn $name($($arg_name:$t),*) -> $ret {
+ // This looks like a hack, but concat_idents only accepts idents
+ // (not paths).
+ use libc::*;
+
+ syscall(
+ concat_idents!(SYS_, $name),
+ $($arg_name),*
+ ) as $ret
+ }
+ )
+}
diff --git a/library/std/src/sys/unsupported/alloc.rs b/library/std/src/sys/unsupported/alloc.rs
new file mode 100644
index 000000000..8d5d0a2f5
--- /dev/null
+++ b/library/std/src/sys/unsupported/alloc.rs
@@ -0,0 +1,22 @@
+use crate::alloc::{GlobalAlloc, Layout, System};
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
+ 0 as *mut u8
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, _layout: Layout) -> *mut u8 {
+ 0 as *mut u8
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
+
+ #[inline]
+ unsafe fn realloc(&self, _ptr: *mut u8, _layout: Layout, _new_size: usize) -> *mut u8 {
+ 0 as *mut u8
+ }
+}
diff --git a/library/std/src/sys/unsupported/args.rs b/library/std/src/sys/unsupported/args.rs
new file mode 100644
index 000000000..a2d75a619
--- /dev/null
+++ b/library/std/src/sys/unsupported/args.rs
@@ -0,0 +1,36 @@
+use crate::ffi::OsString;
+use crate::fmt;
+
+pub struct Args {}
+
+pub fn args() -> Args {
+ Args {}
+}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().finish()
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ None
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ 0
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ None
+ }
+}
diff --git a/library/std/src/sys/unsupported/common.rs b/library/std/src/sys/unsupported/common.rs
new file mode 100644
index 000000000..4c9ade4a8
--- /dev/null
+++ b/library/std/src/sys/unsupported/common.rs
@@ -0,0 +1,36 @@
+use crate::io as std_io;
+
+pub mod memchr {
+ pub use core::slice::memchr::{memchr, memrchr};
+}
+
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+
+// SAFETY: must be called only once during runtime cleanup.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {}
+
+pub fn unsupported<T>() -> std_io::Result<T> {
+ Err(unsupported_err())
+}
+
+pub fn unsupported_err() -> std_io::Error {
+ std_io::const_io_error!(
+ std_io::ErrorKind::Unsupported,
+ "operation not supported on this platform",
+ )
+}
+
+pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind {
+ crate::io::ErrorKind::Uncategorized
+}
+
+pub fn abort_internal() -> ! {
+ core::intrinsics::abort();
+}
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ (1, 2)
+}
diff --git a/library/std/src/sys/unsupported/env.rs b/library/std/src/sys/unsupported/env.rs
new file mode 100644
index 000000000..d2efec506
--- /dev/null
+++ b/library/std/src/sys/unsupported/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = "";
+ pub const DLL_EXTENSION: &str = "";
+ pub const EXE_SUFFIX: &str = "";
+ pub const EXE_EXTENSION: &str = "";
+}
diff --git a/library/std/src/sys/unsupported/fs.rs b/library/std/src/sys/unsupported/fs.rs
new file mode 100644
index 000000000..0e1a6257e
--- /dev/null
+++ b/library/std/src/sys/unsupported/fs.rs
@@ -0,0 +1,324 @@
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::path::{Path, PathBuf};
+use crate::sys::time::SystemTime;
+use crate::sys::unsupported;
+
+pub struct File(!);
+
+pub struct FileAttr(!);
+
+pub struct ReadDir(!);
+
+pub struct DirEntry(!);
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {}
+
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {}
+
+pub struct FilePermissions(!);
+
+pub struct FileType(!);
+
+#[derive(Debug)]
+pub struct DirBuilder {}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.0
+ }
+
+ pub fn perm(&self) -> FilePermissions {
+ self.0
+ }
+
+ pub fn file_type(&self) -> FileType {
+ self.0
+ }
+
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ self.0
+ }
+}
+
+impl Clone for FileAttr {
+ fn clone(&self) -> FileAttr {
+ self.0
+ }
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ self.0
+ }
+
+ pub fn set_readonly(&mut self, _readonly: bool) {
+ self.0
+ }
+}
+
+impl Clone for FilePermissions {
+ fn clone(&self) -> FilePermissions {
+ self.0
+ }
+}
+
+impl PartialEq for FilePermissions {
+ fn eq(&self, _other: &FilePermissions) -> bool {
+ self.0
+ }
+}
+
+impl Eq for FilePermissions {}
+
+impl fmt::Debug for FilePermissions {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl FileTimes {
+ pub fn set_accessed(&mut self, _t: SystemTime) {}
+ pub fn set_modified(&mut self, _t: SystemTime) {}
+}
+
+impl FileType {
+ pub fn is_dir(&self) -> bool {
+ self.0
+ }
+
+ pub fn is_file(&self) -> bool {
+ self.0
+ }
+
+ pub fn is_symlink(&self) -> bool {
+ self.0
+ }
+}
+
+impl Clone for FileType {
+ fn clone(&self) -> FileType {
+ self.0
+ }
+}
+
+impl Copy for FileType {}
+
+impl PartialEq for FileType {
+ fn eq(&self, _other: &FileType) -> bool {
+ self.0
+ }
+}
+
+impl Eq for FileType {}
+
+impl Hash for FileType {
+ fn hash<H: Hasher>(&self, _h: &mut H) {
+ self.0
+ }
+}
+
+impl fmt::Debug for FileType {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ self.0
+ }
+}
+
+impl DirEntry {
+ pub fn path(&self) -> PathBuf {
+ self.0
+ }
+
+ pub fn file_name(&self) -> OsString {
+ self.0
+ }
+
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ self.0
+ }
+
+ pub fn file_type(&self) -> io::Result<FileType> {
+ self.0
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ OpenOptions {}
+ }
+
+ pub fn read(&mut self, _read: bool) {}
+ pub fn write(&mut self, _write: bool) {}
+ pub fn append(&mut self, _append: bool) {}
+ pub fn truncate(&mut self, _truncate: bool) {}
+ pub fn create(&mut self, _create: bool) {}
+ pub fn create_new(&mut self, _create_new: bool) {}
+}
+
+impl File {
+ pub fn open(_path: &Path, _opts: &OpenOptions) -> io::Result<File> {
+ unsupported()
+ }
+
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ self.0
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn truncate(&self, _size: u64) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn read_buf(&self, _buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn write(&self, _buf: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ self.0
+ }
+
+ pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder {}
+ }
+
+ pub fn mkdir(&self, _p: &Path) -> io::Result<()> {
+ unsupported()
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub fn readdir(_p: &Path) -> io::Result<ReadDir> {
+ unsupported()
+}
+
+pub fn unlink(_p: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> {
+ match perm.0 {}
+}
+
+pub fn rmdir(_p: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn remove_dir_all(_path: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn try_exists(_path: &Path) -> io::Result<bool> {
+ unsupported()
+}
+
+pub fn readlink(_p: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub fn stat(_p: &Path) -> io::Result<FileAttr> {
+ unsupported()
+}
+
+pub fn lstat(_p: &Path) -> io::Result<FileAttr> {
+ unsupported()
+}
+
+pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn copy(_from: &Path, _to: &Path) -> io::Result<u64> {
+ unsupported()
+}
diff --git a/library/std/src/sys/unsupported/io.rs b/library/std/src/sys/unsupported/io.rs
new file mode 100644
index 000000000..d5f475b43
--- /dev/null
+++ b/library/std/src/sys/unsupported/io.rs
@@ -0,0 +1,47 @@
+use crate::mem;
+
+#[derive(Copy, Clone)]
+pub struct IoSlice<'a>(&'a [u8]);
+
+impl<'a> IoSlice<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ IoSlice(buf)
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ self.0 = &self.0[n..]
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ self.0
+ }
+}
+
+pub struct IoSliceMut<'a>(&'a mut [u8]);
+
+impl<'a> IoSliceMut<'a> {
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ IoSliceMut(buf)
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ let slice = mem::replace(&mut self.0, &mut []);
+ let (_, remaining) = slice.split_at_mut(n);
+ self.0 = remaining;
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ self.0
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ self.0
+ }
+}
diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs
new file mode 100644
index 000000000..e703fd0d2
--- /dev/null
+++ b/library/std/src/sys/unsupported/locks/condvar.rs
@@ -0,0 +1,27 @@
+use crate::sys::locks::Mutex;
+use crate::time::Duration;
+
+pub struct Condvar {}
+
+pub type MovableCondvar = Condvar;
+
+impl Condvar {
+ #[inline]
+ pub const fn new() -> Condvar {
+ Condvar {}
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {}
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {}
+
+ pub unsafe fn wait(&self, _mutex: &Mutex) {
+ panic!("condvar wait not supported")
+ }
+
+ pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool {
+ panic!("condvar wait not supported");
+ }
+}
diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs
new file mode 100644
index 000000000..d412ff152
--- /dev/null
+++ b/library/std/src/sys/unsupported/locks/mod.rs
@@ -0,0 +1,6 @@
+mod condvar;
+mod mutex;
+mod rwlock;
+pub use condvar::{Condvar, MovableCondvar};
+pub use mutex::{MovableMutex, Mutex};
+pub use rwlock::{MovableRwLock, RwLock};
diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs
new file mode 100644
index 000000000..d7cb12e0c
--- /dev/null
+++ b/library/std/src/sys/unsupported/locks/mutex.rs
@@ -0,0 +1,36 @@
+use crate::cell::Cell;
+
+pub struct Mutex {
+ // This platform has no threads, so we can use a Cell here.
+ locked: Cell<bool>,
+}
+
+pub type MovableMutex = Mutex;
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {} // no threads on this platform
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex { locked: Cell::new(false) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ self.locked.set(false);
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.locked.replace(true) == false
+ }
+}
diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs
new file mode 100644
index 000000000..aca5fb715
--- /dev/null
+++ b/library/std/src/sys/unsupported/locks/rwlock.rs
@@ -0,0 +1,66 @@
+use crate::cell::Cell;
+
+pub struct RwLock {
+ // This platform has no threads, so we can use a Cell here.
+ mode: Cell<isize>,
+}
+
+pub type MovableRwLock = RwLock;
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {} // no threads on this platform
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock { mode: Cell::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ } else {
+ rtabort!("rwlock locked for writing");
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ if self.mode.replace(-1) != 0 {
+ rtabort!("rwlock locked for reading")
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ if self.mode.get() == 0 {
+ self.mode.set(-1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.mode.set(self.mode.get() - 1);
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ assert_eq!(self.mode.replace(0), -1);
+ }
+}
diff --git a/library/std/src/sys/unsupported/mod.rs b/library/std/src/sys/unsupported/mod.rs
new file mode 100644
index 000000000..7bf6d40b7
--- /dev/null
+++ b/library/std/src/sys/unsupported/mod.rs
@@ -0,0 +1,27 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod alloc;
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+pub mod fs;
+pub mod io;
+pub mod locks;
+pub mod net;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+pub mod pipe;
+pub mod process;
+pub mod stdio;
+pub mod thread;
+#[cfg(target_thread_local)]
+pub mod thread_local_dtor;
+pub mod thread_local_key;
+pub mod time;
+
+mod common;
+pub use common::*;
diff --git a/library/std/src/sys/unsupported/net.rs b/library/std/src/sys/unsupported/net.rs
new file mode 100644
index 000000000..a5204a084
--- /dev/null
+++ b/library/std/src/sys/unsupported/net.rs
@@ -0,0 +1,366 @@
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct TcpStream(!);
+
+impl TcpStream {
+ pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn write(&self, _: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ self.0
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct TcpListener(!);
+
+impl TcpListener {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct UdpSocket(!);
+
+impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ self.0
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ self.0
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.0
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.0
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.0
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.0
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ self.0
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+pub struct LookupHost(!);
+
+impl LookupHost {
+ pub fn port(&self) -> u16 {
+ self.0
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ self.0
+ }
+}
+
+impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: &str) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+#[allow(nonstandard_style)]
+pub mod netc {
+ pub const AF_INET: u8 = 0;
+ pub const AF_INET6: u8 = 1;
+ pub type sa_family_t = u8;
+
+ #[derive(Copy, Clone)]
+ pub struct in_addr {
+ pub s_addr: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in {
+ pub sin_family: sa_family_t,
+ pub sin_port: u16,
+ pub sin_addr: in_addr,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in6 {
+ pub sin6_family: sa_family_t,
+ pub sin6_port: u16,
+ pub sin6_addr: in6_addr,
+ pub sin6_flowinfo: u32,
+ pub sin6_scope_id: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr {}
+}
diff --git a/library/std/src/sys/unsupported/os.rs b/library/std/src/sys/unsupported/os.rs
new file mode 100644
index 000000000..e150ae143
--- /dev/null
+++ b/library/std/src/sys/unsupported/os.rs
@@ -0,0 +1,105 @@
+use super::unsupported;
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::path::{self, PathBuf};
+
+pub fn errno() -> i32 {
+ 0
+}
+
+pub fn error_string(_errno: i32) -> String {
+ "operation successful".to_string()
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub fn chdir(_: &path::Path) -> io::Result<()> {
+ unsupported()
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on this platform yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on this platform yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+
+pub struct Env(!);
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.0
+ }
+}
+
+pub fn env() -> Env {
+ panic!("not supported on this platform")
+}
+
+pub fn getenv(_: &OsStr) -> Option<OsString> {
+ None
+}
+
+pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
+}
+
+pub fn unsetenv(_: &OsStr) -> io::Result<()> {
+ Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on this platform")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(_code: i32) -> ! {
+ crate::intrinsics::abort()
+}
+
+pub fn getpid() -> u32 {
+ panic!("no pids on this platform")
+}
diff --git a/library/std/src/sys/unsupported/pipe.rs b/library/std/src/sys/unsupported/pipe.rs
new file mode 100644
index 000000000..25514c232
--- /dev/null
+++ b/library/std/src/sys/unsupported/pipe.rs
@@ -0,0 +1,37 @@
+use crate::io::{self, IoSlice, IoSliceMut};
+
+pub struct AnonPipe(!);
+
+impl AnonPipe {
+ pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn write(&self, _buf: &[u8]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ self.0
+ }
+
+ pub fn diverge(&self) -> ! {
+ self.0
+ }
+}
+
+pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> {
+ match p1.0 {}
+}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
new file mode 100644
index 000000000..42a1ff730
--- /dev/null
+++ b/library/std/src/sys/unsupported/process.rs
@@ -0,0 +1,211 @@
+use crate::ffi::OsStr;
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::num::NonZeroI32;
+use crate::path::Path;
+use crate::sys::fs::File;
+use crate::sys::pipe::AnonPipe;
+use crate::sys::unsupported;
+use crate::sys_common::process::{CommandEnv, CommandEnvs};
+
+pub use crate::ffi::OsString as EnvKey;
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct Command {
+ env: CommandEnv,
+}
+
+// passed back to std::process with the pipes connected to the child, if any
+// were requested
+pub struct StdioPipes {
+ pub stdin: Option<AnonPipe>,
+ pub stdout: Option<AnonPipe>,
+ pub stderr: Option<AnonPipe>,
+}
+
+pub enum Stdio {
+ Inherit,
+ Null,
+ MakePipe,
+}
+
+impl Command {
+ pub fn new(_program: &OsStr) -> Command {
+ Command { env: Default::default() }
+ }
+
+ pub fn arg(&mut self, _arg: &OsStr) {}
+
+ pub fn env_mut(&mut self) -> &mut CommandEnv {
+ &mut self.env
+ }
+
+ pub fn cwd(&mut self, _dir: &OsStr) {}
+
+ pub fn stdin(&mut self, _stdin: Stdio) {}
+
+ pub fn stdout(&mut self, _stdout: Stdio) {}
+
+ pub fn stderr(&mut self, _stderr: Stdio) {}
+
+ pub fn get_program(&self) -> &OsStr {
+ panic!("unsupported")
+ }
+
+ pub fn get_args(&self) -> CommandArgs<'_> {
+ CommandArgs { _p: PhantomData }
+ }
+
+ pub fn get_envs(&self) -> CommandEnvs<'_> {
+ self.env.iter()
+ }
+
+ pub fn get_current_dir(&self) -> Option<&Path> {
+ None
+ }
+
+ pub fn spawn(
+ &mut self,
+ _default: Stdio,
+ _needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ unsupported()
+ }
+}
+
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ pipe.diverge()
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(_file: File) -> Stdio {
+ panic!("unsupported")
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+pub struct ExitStatus(!);
+
+impl ExitStatus {
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ self.0
+ }
+
+ pub fn code(&self) -> Option<i32> {
+ self.0
+ }
+}
+
+impl Clone for ExitStatus {
+ fn clone(&self) -> ExitStatus {
+ self.0
+ }
+}
+
+impl Copy for ExitStatus {}
+
+impl PartialEq for ExitStatus {
+ fn eq(&self, _other: &ExitStatus) -> bool {
+ self.0
+ }
+}
+
+impl Eq for ExitStatus {}
+
+impl fmt::Debug for ExitStatus {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatusError(ExitStatus);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ self.0.0
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ self.0.0
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitCode(bool);
+
+impl ExitCode {
+ pub const SUCCESS: ExitCode = ExitCode(false);
+ pub const FAILURE: ExitCode = ExitCode(true);
+
+ pub fn as_i32(&self) -> i32 {
+ self.0 as i32
+ }
+}
+
+impl From<u8> for ExitCode {
+ fn from(code: u8) -> Self {
+ match code {
+ 0 => Self::SUCCESS,
+ 1..=255 => Self::FAILURE,
+ }
+ }
+}
+
+pub struct Process(!);
+
+impl Process {
+ pub fn id(&self) -> u32 {
+ self.0
+ }
+
+ pub fn kill(&mut self) -> io::Result<()> {
+ self.0
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ self.0
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ self.0
+ }
+}
+
+pub struct CommandArgs<'a> {
+ _p: PhantomData<&'a ()>,
+}
+
+impl<'a> Iterator for CommandArgs<'a> {
+ type Item = &'a OsStr;
+ fn next(&mut self) -> Option<&'a OsStr> {
+ None
+ }
+}
+
+impl<'a> ExactSizeIterator for CommandArgs<'a> {}
+
+impl<'a> fmt::Debug for CommandArgs<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().finish()
+ }
+}
diff --git a/library/std/src/sys/unsupported/stdio.rs b/library/std/src/sys/unsupported/stdio.rs
new file mode 100644
index 000000000..b5e3f5be9
--- /dev/null
+++ b/library/std/src/sys/unsupported/stdio.rs
@@ -0,0 +1,59 @@
+use crate::io;
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
+ Ok(0)
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = 0;
+
+pub fn is_ebadf(_err: &io::Error) -> bool {
+ true
+}
+
+pub fn panic_output() -> Option<Vec<u8>> {
+ None
+}
diff --git a/library/std/src/sys/unsupported/thread.rs b/library/std/src/sys/unsupported/thread.rs
new file mode 100644
index 000000000..a8db251de
--- /dev/null
+++ b/library/std/src/sys/unsupported/thread.rs
@@ -0,0 +1,46 @@
+use super::unsupported;
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::time::Duration;
+
+pub struct Thread(!);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+
+ pub fn yield_now() {
+ // do nothing
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ pub fn sleep(_dur: Duration) {
+ panic!("can't sleep");
+ }
+
+ pub fn join(self) {
+ self.0
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/unsupported/thread_local_dtor.rs b/library/std/src/sys/unsupported/thread_local_dtor.rs
new file mode 100644
index 000000000..85d660983
--- /dev/null
+++ b/library/std/src/sys/unsupported/thread_local_dtor.rs
@@ -0,0 +1,9 @@
+#![unstable(feature = "thread_local_internals", issue = "none")]
+
+pub unsafe fn register_dtor(_t: *mut u8, _dtor: unsafe extern "C" fn(*mut u8)) {
+ // FIXME: right now there is no concept of "thread exit", but this is likely
+ // going to show up at some point in the form of an exported symbol that the
+ // wasm runtime is going to be expected to call. For now we basically just
+ // ignore the arguments, but if such a function starts to exist it will
+ // likely look like the OSX implementation in `unix/fast_thread_local.rs`
+}
diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs
new file mode 100644
index 000000000..c31b61cbf
--- /dev/null
+++ b/library/std/src/sys/unsupported/thread_local_key.rs
@@ -0,0 +1,26 @@
+pub type Key = usize;
+
+#[inline]
+pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ panic!("should not be used on this target");
+}
+
+#[inline]
+pub unsafe fn set(_key: Key, _value: *mut u8) {
+ panic!("should not be used on this target");
+}
+
+#[inline]
+pub unsafe fn get(_key: Key) -> *mut u8 {
+ panic!("should not be used on this target");
+}
+
+#[inline]
+pub unsafe fn destroy(_key: Key) {
+ panic!("should not be used on this target");
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+ panic!("should not be used on this target");
+}
diff --git a/library/std/src/sys/unsupported/time.rs b/library/std/src/sys/unsupported/time.rs
new file mode 100644
index 000000000..6d67b538a
--- /dev/null
+++ b/library/std/src/sys/unsupported/time.rs
@@ -0,0 +1,45 @@
+use crate::time::Duration;
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(Duration);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(Duration);
+
+pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
+
+impl Instant {
+ pub fn now() -> Instant {
+ panic!("time not implemented on this platform")
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.0.checked_sub(other.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_sub(*other)?))
+ }
+}
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ panic!("time not implemented on this platform")
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_sub(*other)?))
+ }
+}
diff --git a/library/std/src/sys/wasi/args.rs b/library/std/src/sys/wasi/args.rs
new file mode 100644
index 000000000..c42c310e3
--- /dev/null
+++ b/library/std/src/sys/wasi/args.rs
@@ -0,0 +1,62 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::ffi::{CStr, OsStr, OsString};
+use crate::fmt;
+use crate::os::wasi::ffi::OsStrExt;
+use crate::vec;
+
+pub struct Args {
+ iter: vec::IntoIter<OsString>,
+}
+
+impl !Send for Args {}
+impl !Sync for Args {}
+
+/// Returns the command line arguments
+pub fn args() -> Args {
+ Args { iter: maybe_args().unwrap_or(Vec::new()).into_iter() }
+}
+
+fn maybe_args() -> Option<Vec<OsString>> {
+ unsafe {
+ let (argc, buf_size) = wasi::args_sizes_get().ok()?;
+ let mut argv = Vec::with_capacity(argc);
+ let mut buf = Vec::with_capacity(buf_size);
+ wasi::args_get(argv.as_mut_ptr(), buf.as_mut_ptr()).ok()?;
+ argv.set_len(argc);
+ let mut ret = Vec::with_capacity(argc);
+ for ptr in argv {
+ let s = CStr::from_ptr(ptr.cast());
+ ret.push(OsStr::from_bytes(s.to_bytes()).to_owned());
+ }
+ Some(ret)
+ }
+}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.iter.as_slice().fmt(f)
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.iter.next_back()
+ }
+}
diff --git a/library/std/src/sys/wasi/env.rs b/library/std/src/sys/wasi/env.rs
new file mode 100644
index 000000000..730e356d7
--- /dev/null
+++ b/library/std/src/sys/wasi/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".wasm";
+ pub const DLL_EXTENSION: &str = "wasm";
+ pub const EXE_SUFFIX: &str = ".wasm";
+ pub const EXE_EXTENSION: &str = "wasm";
+}
diff --git a/library/std/src/sys/wasi/fd.rs b/library/std/src/sys/wasi/fd.rs
new file mode 100644
index 000000000..0b9c8e61d
--- /dev/null
+++ b/library/std/src/sys/wasi/fd.rs
@@ -0,0 +1,307 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+#![allow(dead_code)]
+
+use super::err2io;
+use crate::io::{self, IoSlice, IoSliceMut, SeekFrom};
+use crate::mem;
+use crate::net::Shutdown;
+use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
+use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
+
+#[derive(Debug)]
+pub struct WasiFd {
+ fd: OwnedFd,
+}
+
+fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] {
+ assert_eq!(mem::size_of::<IoSliceMut<'_>>(), mem::size_of::<wasi::Iovec>());
+ assert_eq!(mem::align_of::<IoSliceMut<'_>>(), mem::align_of::<wasi::Iovec>());
+ // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout
+ unsafe { mem::transmute(a) }
+}
+
+fn ciovec<'a>(a: &'a [IoSlice<'_>]) -> &'a [wasi::Ciovec] {
+ assert_eq!(mem::size_of::<IoSlice<'_>>(), mem::size_of::<wasi::Ciovec>());
+ assert_eq!(mem::align_of::<IoSlice<'_>>(), mem::align_of::<wasi::Ciovec>());
+ // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout
+ unsafe { mem::transmute(a) }
+}
+
+impl WasiFd {
+ pub fn datasync(&self) -> io::Result<()> {
+ unsafe { wasi::fd_datasync(self.as_raw_fd() as wasi::Fd).map_err(err2io) }
+ }
+
+ pub fn pread(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> {
+ unsafe { wasi::fd_pread(self.as_raw_fd() as wasi::Fd, iovec(bufs), offset).map_err(err2io) }
+ }
+
+ pub fn pwrite(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> {
+ unsafe {
+ wasi::fd_pwrite(self.as_raw_fd() as wasi::Fd, ciovec(bufs), offset).map_err(err2io)
+ }
+ }
+
+ pub fn read(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ unsafe { wasi::fd_read(self.as_raw_fd() as wasi::Fd, iovec(bufs)).map_err(err2io) }
+ }
+
+ pub fn write(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ unsafe { wasi::fd_write(self.as_raw_fd() as wasi::Fd, ciovec(bufs)).map_err(err2io) }
+ }
+
+ pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
+ let (whence, offset) = match pos {
+ SeekFrom::Start(pos) => (wasi::WHENCE_SET, pos as i64),
+ SeekFrom::End(pos) => (wasi::WHENCE_END, pos),
+ SeekFrom::Current(pos) => (wasi::WHENCE_CUR, pos),
+ };
+ unsafe { wasi::fd_seek(self.as_raw_fd() as wasi::Fd, offset, whence).map_err(err2io) }
+ }
+
+ pub fn tell(&self) -> io::Result<u64> {
+ unsafe { wasi::fd_tell(self.as_raw_fd() as wasi::Fd).map_err(err2io) }
+ }
+
+ // FIXME: __wasi_fd_fdstat_get
+
+ pub fn set_flags(&self, flags: wasi::Fdflags) -> io::Result<()> {
+ unsafe { wasi::fd_fdstat_set_flags(self.as_raw_fd() as wasi::Fd, flags).map_err(err2io) }
+ }
+
+ pub fn set_rights(&self, base: wasi::Rights, inheriting: wasi::Rights) -> io::Result<()> {
+ unsafe {
+ wasi::fd_fdstat_set_rights(self.as_raw_fd() as wasi::Fd, base, inheriting)
+ .map_err(err2io)
+ }
+ }
+
+ pub fn sync(&self) -> io::Result<()> {
+ unsafe { wasi::fd_sync(self.as_raw_fd() as wasi::Fd).map_err(err2io) }
+ }
+
+ pub fn advise(&self, offset: u64, len: u64, advice: wasi::Advice) -> io::Result<()> {
+ unsafe {
+ wasi::fd_advise(self.as_raw_fd() as wasi::Fd, offset, len, advice).map_err(err2io)
+ }
+ }
+
+ pub fn allocate(&self, offset: u64, len: u64) -> io::Result<()> {
+ unsafe { wasi::fd_allocate(self.as_raw_fd() as wasi::Fd, offset, len).map_err(err2io) }
+ }
+
+ pub fn create_directory(&self, path: &str) -> io::Result<()> {
+ unsafe { wasi::path_create_directory(self.as_raw_fd() as wasi::Fd, path).map_err(err2io) }
+ }
+
+ pub fn link(
+ &self,
+ old_flags: wasi::Lookupflags,
+ old_path: &str,
+ new_fd: &WasiFd,
+ new_path: &str,
+ ) -> io::Result<()> {
+ unsafe {
+ wasi::path_link(
+ self.as_raw_fd() as wasi::Fd,
+ old_flags,
+ old_path,
+ new_fd.as_raw_fd() as wasi::Fd,
+ new_path,
+ )
+ .map_err(err2io)
+ }
+ }
+
+ pub fn open(
+ &self,
+ dirflags: wasi::Lookupflags,
+ path: &str,
+ oflags: wasi::Oflags,
+ fs_rights_base: wasi::Rights,
+ fs_rights_inheriting: wasi::Rights,
+ fs_flags: wasi::Fdflags,
+ ) -> io::Result<WasiFd> {
+ unsafe {
+ wasi::path_open(
+ self.as_raw_fd() as wasi::Fd,
+ dirflags,
+ path,
+ oflags,
+ fs_rights_base,
+ fs_rights_inheriting,
+ fs_flags,
+ )
+ .map(|fd| WasiFd::from_raw_fd(fd as RawFd))
+ .map_err(err2io)
+ }
+ }
+
+ pub fn readdir(&self, buf: &mut [u8], cookie: wasi::Dircookie) -> io::Result<usize> {
+ unsafe {
+ wasi::fd_readdir(self.as_raw_fd() as wasi::Fd, buf.as_mut_ptr(), buf.len(), cookie)
+ .map_err(err2io)
+ }
+ }
+
+ pub fn readlink(&self, path: &str, buf: &mut [u8]) -> io::Result<usize> {
+ unsafe {
+ wasi::path_readlink(self.as_raw_fd() as wasi::Fd, path, buf.as_mut_ptr(), buf.len())
+ .map_err(err2io)
+ }
+ }
+
+ pub fn rename(&self, old_path: &str, new_fd: &WasiFd, new_path: &str) -> io::Result<()> {
+ unsafe {
+ wasi::path_rename(
+ self.as_raw_fd() as wasi::Fd,
+ old_path,
+ new_fd.as_raw_fd() as wasi::Fd,
+ new_path,
+ )
+ .map_err(err2io)
+ }
+ }
+
+ pub fn filestat_get(&self) -> io::Result<wasi::Filestat> {
+ unsafe { wasi::fd_filestat_get(self.as_raw_fd() as wasi::Fd).map_err(err2io) }
+ }
+
+ pub fn filestat_set_times(
+ &self,
+ atim: wasi::Timestamp,
+ mtim: wasi::Timestamp,
+ fstflags: wasi::Fstflags,
+ ) -> io::Result<()> {
+ unsafe {
+ wasi::fd_filestat_set_times(self.as_raw_fd() as wasi::Fd, atim, mtim, fstflags)
+ .map_err(err2io)
+ }
+ }
+
+ pub fn filestat_set_size(&self, size: u64) -> io::Result<()> {
+ unsafe { wasi::fd_filestat_set_size(self.as_raw_fd() as wasi::Fd, size).map_err(err2io) }
+ }
+
+ pub fn path_filestat_get(
+ &self,
+ flags: wasi::Lookupflags,
+ path: &str,
+ ) -> io::Result<wasi::Filestat> {
+ unsafe {
+ wasi::path_filestat_get(self.as_raw_fd() as wasi::Fd, flags, path).map_err(err2io)
+ }
+ }
+
+ pub fn path_filestat_set_times(
+ &self,
+ flags: wasi::Lookupflags,
+ path: &str,
+ atim: wasi::Timestamp,
+ mtim: wasi::Timestamp,
+ fstflags: wasi::Fstflags,
+ ) -> io::Result<()> {
+ unsafe {
+ wasi::path_filestat_set_times(
+ self.as_raw_fd() as wasi::Fd,
+ flags,
+ path,
+ atim,
+ mtim,
+ fstflags,
+ )
+ .map_err(err2io)
+ }
+ }
+
+ pub fn symlink(&self, old_path: &str, new_path: &str) -> io::Result<()> {
+ unsafe {
+ wasi::path_symlink(old_path, self.as_raw_fd() as wasi::Fd, new_path).map_err(err2io)
+ }
+ }
+
+ pub fn unlink_file(&self, path: &str) -> io::Result<()> {
+ unsafe { wasi::path_unlink_file(self.as_raw_fd() as wasi::Fd, path).map_err(err2io) }
+ }
+
+ pub fn remove_directory(&self, path: &str) -> io::Result<()> {
+ unsafe { wasi::path_remove_directory(self.as_raw_fd() as wasi::Fd, path).map_err(err2io) }
+ }
+
+ pub fn sock_accept(&self, flags: wasi::Fdflags) -> io::Result<wasi::Fd> {
+ unsafe { wasi::sock_accept(self.as_raw_fd() as wasi::Fd, flags).map_err(err2io) }
+ }
+
+ pub fn sock_recv(
+ &self,
+ ri_data: &mut [IoSliceMut<'_>],
+ ri_flags: wasi::Riflags,
+ ) -> io::Result<(usize, wasi::Roflags)> {
+ unsafe {
+ wasi::sock_recv(self.as_raw_fd() as wasi::Fd, iovec(ri_data), ri_flags).map_err(err2io)
+ }
+ }
+
+ pub fn sock_send(&self, si_data: &[IoSlice<'_>], si_flags: wasi::Siflags) -> io::Result<usize> {
+ unsafe {
+ wasi::sock_send(self.as_raw_fd() as wasi::Fd, ciovec(si_data), si_flags).map_err(err2io)
+ }
+ }
+
+ pub fn sock_shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Read => wasi::SDFLAGS_RD,
+ Shutdown::Write => wasi::SDFLAGS_WR,
+ Shutdown::Both => wasi::SDFLAGS_WR | wasi::SDFLAGS_RD,
+ };
+ unsafe { wasi::sock_shutdown(self.as_raw_fd() as wasi::Fd, how).map_err(err2io) }
+ }
+}
+
+impl AsInner<OwnedFd> for WasiFd {
+ fn as_inner(&self) -> &OwnedFd {
+ &self.fd
+ }
+}
+
+impl AsInnerMut<OwnedFd> for WasiFd {
+ fn as_inner_mut(&mut self) -> &mut OwnedFd {
+ &mut self.fd
+ }
+}
+
+impl IntoInner<OwnedFd> for WasiFd {
+ fn into_inner(self) -> OwnedFd {
+ self.fd
+ }
+}
+
+impl FromInner<OwnedFd> for WasiFd {
+ fn from_inner(owned_fd: OwnedFd) -> Self {
+ Self { fd: owned_fd }
+ }
+}
+
+impl AsFd for WasiFd {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.fd.as_fd()
+ }
+}
+
+impl AsRawFd for WasiFd {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for WasiFd {
+ fn into_raw_fd(self) -> RawFd {
+ self.fd.into_raw_fd()
+ }
+}
+
+impl FromRawFd for WasiFd {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ unsafe { Self { fd: FromRawFd::from_raw_fd(raw_fd) } }
+ }
+}
diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs
new file mode 100644
index 000000000..6614ae397
--- /dev/null
+++ b/library/std/src/sys/wasi/fs.rs
@@ -0,0 +1,798 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use super::fd::WasiFd;
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::iter;
+use crate::mem::{self, ManuallyDrop};
+use crate::os::raw::c_int;
+use crate::os::wasi::ffi::{OsStrExt, OsStringExt};
+use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+use crate::path::{Path, PathBuf};
+use crate::ptr;
+use crate::sync::Arc;
+use crate::sys::time::SystemTime;
+use crate::sys::unsupported;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+pub use crate::sys_common::fs::try_exists;
+
+pub struct File {
+ fd: WasiFd,
+}
+
+#[derive(Clone)]
+pub struct FileAttr {
+ meta: wasi::Filestat,
+}
+
+pub struct ReadDir {
+ inner: Arc<ReadDirInner>,
+ cookie: Option<wasi::Dircookie>,
+ buf: Vec<u8>,
+ offset: usize,
+ cap: usize,
+}
+
+struct ReadDirInner {
+ root: PathBuf,
+ dir: File,
+}
+
+pub struct DirEntry {
+ meta: wasi::Dirent,
+ name: Vec<u8>,
+ inner: Arc<ReadDirInner>,
+}
+
+#[derive(Clone, Debug, Default)]
+pub struct OpenOptions {
+ read: bool,
+ write: bool,
+ append: bool,
+ dirflags: wasi::Lookupflags,
+ fdflags: wasi::Fdflags,
+ oflags: wasi::Oflags,
+ rights_base: Option<wasi::Rights>,
+ rights_inheriting: Option<wasi::Rights>,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct FilePermissions {
+ readonly: bool,
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {
+ accessed: Option<wasi::Timestamp>,
+ modified: Option<wasi::Timestamp>,
+}
+
+#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
+pub struct FileType {
+ bits: wasi::Filetype,
+}
+
+#[derive(Debug)]
+pub struct DirBuilder {}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.meta.size
+ }
+
+ pub fn perm(&self) -> FilePermissions {
+ // not currently implemented in wasi yet
+ FilePermissions { readonly: false }
+ }
+
+ pub fn file_type(&self) -> FileType {
+ FileType { bits: self.meta.filetype }
+ }
+
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_wasi_timestamp(self.meta.mtim))
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_wasi_timestamp(self.meta.atim))
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from_wasi_timestamp(self.meta.ctim))
+ }
+
+ pub fn as_wasi(&self) -> &wasi::Filestat {
+ &self.meta
+ }
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ self.readonly
+ }
+
+ pub fn set_readonly(&mut self, readonly: bool) {
+ self.readonly = readonly;
+ }
+}
+
+impl FileTimes {
+ pub fn set_accessed(&mut self, t: SystemTime) {
+ self.accessed = Some(t.to_wasi_timestamp_or_panic());
+ }
+
+ pub fn set_modified(&mut self, t: SystemTime) {
+ self.modified = Some(t.to_wasi_timestamp_or_panic());
+ }
+}
+
+impl FileType {
+ pub fn is_dir(&self) -> bool {
+ self.bits == wasi::FILETYPE_DIRECTORY
+ }
+
+ pub fn is_file(&self) -> bool {
+ self.bits == wasi::FILETYPE_REGULAR_FILE
+ }
+
+ pub fn is_symlink(&self) -> bool {
+ self.bits == wasi::FILETYPE_SYMBOLIC_LINK
+ }
+
+ pub fn bits(&self) -> wasi::Filetype {
+ self.bits
+ }
+}
+
+impl ReadDir {
+ fn new(dir: File, root: PathBuf) -> ReadDir {
+ ReadDir {
+ cookie: Some(0),
+ buf: vec![0; 128],
+ offset: 0,
+ cap: 0,
+ inner: Arc::new(ReadDirInner { dir, root }),
+ }
+ }
+}
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ReadDir").finish_non_exhaustive()
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ loop {
+ // If we've reached the capacity of our buffer then we need to read
+ // some more from the OS, otherwise we pick up at our old offset.
+ let offset = if self.offset == self.cap {
+ let cookie = self.cookie.take()?;
+ match self.inner.dir.fd.readdir(&mut self.buf, cookie) {
+ Ok(bytes) => self.cap = bytes,
+ Err(e) => return Some(Err(e)),
+ }
+ self.offset = 0;
+ self.cookie = Some(cookie);
+
+ // If we didn't actually read anything, this is in theory the
+ // end of the directory.
+ if self.cap == 0 {
+ self.cookie = None;
+ return None;
+ }
+
+ 0
+ } else {
+ self.offset
+ };
+ let data = &self.buf[offset..self.cap];
+
+ // If we're not able to read a directory entry then that means it
+ // must have been truncated at the end of the buffer, so reset our
+ // offset so we can go back and reread into the buffer, picking up
+ // where we last left off.
+ let dirent_size = mem::size_of::<wasi::Dirent>();
+ if data.len() < dirent_size {
+ assert!(self.cookie.is_some());
+ assert!(self.buf.len() >= dirent_size);
+ self.offset = self.cap;
+ continue;
+ }
+ let (dirent, data) = data.split_at(dirent_size);
+ let dirent = unsafe { ptr::read_unaligned(dirent.as_ptr() as *const wasi::Dirent) };
+
+ // If the file name was truncated, then we need to reinvoke
+ // `readdir` so we truncate our buffer to start over and reread this
+ // descriptor. Note that if our offset is 0 that means the file name
+ // is massive and we need a bigger buffer.
+ if data.len() < dirent.d_namlen as usize {
+ if offset == 0 {
+ let amt_to_add = self.buf.capacity();
+ self.buf.extend(iter::repeat(0).take(amt_to_add));
+ }
+ assert!(self.cookie.is_some());
+ self.offset = self.cap;
+ continue;
+ }
+ self.cookie = Some(dirent.d_next);
+ self.offset = offset + dirent_size + dirent.d_namlen as usize;
+
+ let name = &data[..(dirent.d_namlen as usize)];
+
+ // These names are skipped on all other platforms, so let's skip
+ // them here too
+ if name == b"." || name == b".." {
+ continue;
+ }
+
+ return Some(Ok(DirEntry {
+ meta: dirent,
+ name: name.to_vec(),
+ inner: self.inner.clone(),
+ }));
+ }
+ }
+}
+
+impl DirEntry {
+ pub fn path(&self) -> PathBuf {
+ let name = OsStr::from_bytes(&self.name);
+ self.inner.root.join(name)
+ }
+
+ pub fn file_name(&self) -> OsString {
+ OsString::from_vec(self.name.clone())
+ }
+
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ metadata_at(&self.inner.dir.fd, 0, OsStr::from_bytes(&self.name).as_ref())
+ }
+
+ pub fn file_type(&self) -> io::Result<FileType> {
+ Ok(FileType { bits: self.meta.d_type })
+ }
+
+ pub fn ino(&self) -> wasi::Inode {
+ self.meta.d_ino
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ let mut base = OpenOptions::default();
+ base.dirflags = wasi::LOOKUPFLAGS_SYMLINK_FOLLOW;
+ return base;
+ }
+
+ pub fn read(&mut self, read: bool) {
+ self.read = read;
+ }
+
+ pub fn write(&mut self, write: bool) {
+ self.write = write;
+ }
+
+ pub fn truncate(&mut self, truncate: bool) {
+ self.oflag(wasi::OFLAGS_TRUNC, truncate);
+ }
+
+ pub fn create(&mut self, create: bool) {
+ self.oflag(wasi::OFLAGS_CREAT, create);
+ }
+
+ pub fn create_new(&mut self, create_new: bool) {
+ self.oflag(wasi::OFLAGS_EXCL, create_new);
+ self.oflag(wasi::OFLAGS_CREAT, create_new);
+ }
+
+ pub fn directory(&mut self, directory: bool) {
+ self.oflag(wasi::OFLAGS_DIRECTORY, directory);
+ }
+
+ fn oflag(&mut self, bit: wasi::Oflags, set: bool) {
+ if set {
+ self.oflags |= bit;
+ } else {
+ self.oflags &= !bit;
+ }
+ }
+
+ pub fn append(&mut self, append: bool) {
+ self.append = append;
+ self.fdflag(wasi::FDFLAGS_APPEND, append);
+ }
+
+ pub fn dsync(&mut self, set: bool) {
+ self.fdflag(wasi::FDFLAGS_DSYNC, set);
+ }
+
+ pub fn nonblock(&mut self, set: bool) {
+ self.fdflag(wasi::FDFLAGS_NONBLOCK, set);
+ }
+
+ pub fn rsync(&mut self, set: bool) {
+ self.fdflag(wasi::FDFLAGS_RSYNC, set);
+ }
+
+ pub fn sync(&mut self, set: bool) {
+ self.fdflag(wasi::FDFLAGS_SYNC, set);
+ }
+
+ fn fdflag(&mut self, bit: wasi::Fdflags, set: bool) {
+ if set {
+ self.fdflags |= bit;
+ } else {
+ self.fdflags &= !bit;
+ }
+ }
+
+ pub fn fs_rights_base(&mut self, rights: wasi::Rights) {
+ self.rights_base = Some(rights);
+ }
+
+ pub fn fs_rights_inheriting(&mut self, rights: wasi::Rights) {
+ self.rights_inheriting = Some(rights);
+ }
+
+ fn rights_base(&self) -> wasi::Rights {
+ if let Some(rights) = self.rights_base {
+ return rights;
+ }
+
+ // If rights haven't otherwise been specified try to pick a reasonable
+ // set. This can always be overridden by users via extension traits, and
+ // implementations may give us fewer rights silently than we ask for. So
+ // given that, just look at `read` and `write` and bucket permissions
+ // based on that.
+ let mut base = 0;
+ if self.read {
+ base |= wasi::RIGHTS_FD_READ;
+ base |= wasi::RIGHTS_FD_READDIR;
+ }
+ if self.write || self.append {
+ base |= wasi::RIGHTS_FD_WRITE;
+ base |= wasi::RIGHTS_FD_DATASYNC;
+ base |= wasi::RIGHTS_FD_ALLOCATE;
+ base |= wasi::RIGHTS_FD_FILESTAT_SET_SIZE;
+ }
+
+ // FIXME: some of these should probably be read-only or write-only...
+ base |= wasi::RIGHTS_FD_ADVISE;
+ base |= wasi::RIGHTS_FD_FDSTAT_SET_FLAGS;
+ base |= wasi::RIGHTS_FD_FILESTAT_GET;
+ base |= wasi::RIGHTS_FD_FILESTAT_SET_TIMES;
+ base |= wasi::RIGHTS_FD_SEEK;
+ base |= wasi::RIGHTS_FD_SYNC;
+ base |= wasi::RIGHTS_FD_TELL;
+ base |= wasi::RIGHTS_PATH_CREATE_DIRECTORY;
+ base |= wasi::RIGHTS_PATH_CREATE_FILE;
+ base |= wasi::RIGHTS_PATH_FILESTAT_GET;
+ base |= wasi::RIGHTS_PATH_LINK_SOURCE;
+ base |= wasi::RIGHTS_PATH_LINK_TARGET;
+ base |= wasi::RIGHTS_PATH_OPEN;
+ base |= wasi::RIGHTS_PATH_READLINK;
+ base |= wasi::RIGHTS_PATH_REMOVE_DIRECTORY;
+ base |= wasi::RIGHTS_PATH_RENAME_SOURCE;
+ base |= wasi::RIGHTS_PATH_RENAME_TARGET;
+ base |= wasi::RIGHTS_PATH_SYMLINK;
+ base |= wasi::RIGHTS_PATH_UNLINK_FILE;
+ base |= wasi::RIGHTS_POLL_FD_READWRITE;
+
+ return base;
+ }
+
+ fn rights_inheriting(&self) -> wasi::Rights {
+ self.rights_inheriting.unwrap_or_else(|| self.rights_base())
+ }
+
+ pub fn lookup_flags(&mut self, flags: wasi::Lookupflags) {
+ self.dirflags = flags;
+ }
+}
+
+impl File {
+ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let (dir, file) = open_parent(path)?;
+ open_at(&dir, &file, opts)
+ }
+
+ pub fn open_at(&self, path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ open_at(&self.fd, path, opts)
+ }
+
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ self.fd.filestat_get().map(|meta| FileAttr { meta })
+ }
+
+ pub fn metadata_at(&self, flags: wasi::Lookupflags, path: &Path) -> io::Result<FileAttr> {
+ metadata_at(&self.fd, flags, path)
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ self.fd.sync()
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ self.fd.datasync()
+ }
+
+ pub fn truncate(&self, size: u64) -> io::Result<()> {
+ self.fd.filestat_set_size(size)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.read_vectored(&mut [IoSliceMut::new(buf)])
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.fd.read(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), buf)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(buf)])
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.fd.write(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+
+ pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
+ self.fd.seek(pos)
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ // https://github.com/CraneStation/wasmtime/blob/master/docs/WASI-rationale.md#why-no-dup
+ unsupported()
+ }
+
+ pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
+ // Permissions haven't been fully figured out in wasi yet, so this is
+ // likely temporary
+ unsupported()
+ }
+
+ pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ self.fd.filestat_set_times(
+ times.accessed.unwrap_or(0),
+ times.modified.unwrap_or(0),
+ times.accessed.map_or(0, |_| wasi::FSTFLAGS_ATIM)
+ | times.modified.map_or(0, |_| wasi::FSTFLAGS_MTIM),
+ )
+ }
+
+ pub fn read_link(&self, file: &Path) -> io::Result<PathBuf> {
+ read_link(&self.fd, file)
+ }
+}
+
+impl AsInner<WasiFd> for File {
+ fn as_inner(&self) -> &WasiFd {
+ &self.fd
+ }
+}
+
+impl IntoInner<WasiFd> for File {
+ fn into_inner(self) -> WasiFd {
+ self.fd
+ }
+}
+
+impl FromInner<WasiFd> for File {
+ fn from_inner(fd: WasiFd) -> File {
+ File { fd }
+ }
+}
+
+impl AsFd for File {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.fd.as_fd()
+ }
+}
+
+impl AsRawFd for File {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for File {
+ fn into_raw_fd(self) -> RawFd {
+ self.fd.into_raw_fd()
+ }
+}
+
+impl FromRawFd for File {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ unsafe { Self { fd: FromRawFd::from_raw_fd(raw_fd) } }
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder {}
+ }
+
+ pub fn mkdir(&self, p: &Path) -> io::Result<()> {
+ let (dir, file) = open_parent(p)?;
+ dir.create_directory(osstr2str(file.as_ref())?)
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("File").field("fd", &self.as_raw_fd()).finish()
+ }
+}
+
+pub fn readdir(p: &Path) -> io::Result<ReadDir> {
+ let mut opts = OpenOptions::new();
+ opts.directory(true);
+ opts.read(true);
+ let dir = File::open(p, &opts)?;
+ Ok(ReadDir::new(dir, p.to_path_buf()))
+}
+
+pub fn unlink(p: &Path) -> io::Result<()> {
+ let (dir, file) = open_parent(p)?;
+ dir.unlink_file(osstr2str(file.as_ref())?)
+}
+
+pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
+ let (old, old_file) = open_parent(old)?;
+ let (new, new_file) = open_parent(new)?;
+ old.rename(osstr2str(old_file.as_ref())?, &new, osstr2str(new_file.as_ref())?)
+}
+
+pub fn set_perm(_p: &Path, _perm: FilePermissions) -> io::Result<()> {
+ // Permissions haven't been fully figured out in wasi yet, so this is
+ // likely temporary
+ unsupported()
+}
+
+pub fn rmdir(p: &Path) -> io::Result<()> {
+ let (dir, file) = open_parent(p)?;
+ dir.remove_directory(osstr2str(file.as_ref())?)
+}
+
+pub fn readlink(p: &Path) -> io::Result<PathBuf> {
+ let (dir, file) = open_parent(p)?;
+ read_link(&dir, &file)
+}
+
+fn read_link(fd: &WasiFd, file: &Path) -> io::Result<PathBuf> {
+ // Try to get a best effort initial capacity for the vector we're going to
+ // fill. Note that if it's not a symlink we don't use a file to avoid
+ // allocating gigabytes if you read_link a huge movie file by accident.
+ // Additionally we add 1 to the initial size so if it doesn't change until
+ // when we call `readlink` the returned length will be less than the
+ // capacity, guaranteeing that we got all the data.
+ let meta = metadata_at(fd, 0, file)?;
+ let initial_size = if meta.file_type().is_symlink() {
+ (meta.size() as usize).saturating_add(1)
+ } else {
+ 1 // this'll fail in just a moment
+ };
+
+ // Now that we have an initial guess of how big to make our buffer, call
+ // `readlink` in a loop until it fails or reports it filled fewer bytes than
+ // we asked for, indicating we got everything.
+ let file = osstr2str(file.as_ref())?;
+ let mut destination = vec![0u8; initial_size];
+ loop {
+ let len = fd.readlink(file, &mut destination)?;
+ if len < destination.len() {
+ destination.truncate(len);
+ destination.shrink_to_fit();
+ return Ok(PathBuf::from(OsString::from_vec(destination)));
+ }
+ let amt_to_add = destination.len();
+ destination.extend(iter::repeat(0).take(amt_to_add));
+ }
+}
+
+pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
+ let (link, link_file) = open_parent(link)?;
+ link.symlink(osstr2str(original.as_ref())?, osstr2str(link_file.as_ref())?)
+}
+
+pub fn link(original: &Path, link: &Path) -> io::Result<()> {
+ let (original, original_file) = open_parent(original)?;
+ let (link, link_file) = open_parent(link)?;
+ // Pass 0 as the flags argument, meaning don't follow symlinks.
+ original.link(0, osstr2str(original_file.as_ref())?, &link, osstr2str(link_file.as_ref())?)
+}
+
+pub fn stat(p: &Path) -> io::Result<FileAttr> {
+ let (dir, file) = open_parent(p)?;
+ metadata_at(&dir, wasi::LOOKUPFLAGS_SYMLINK_FOLLOW, &file)
+}
+
+pub fn lstat(p: &Path) -> io::Result<FileAttr> {
+ let (dir, file) = open_parent(p)?;
+ metadata_at(&dir, 0, &file)
+}
+
+fn metadata_at(fd: &WasiFd, flags: wasi::Lookupflags, path: &Path) -> io::Result<FileAttr> {
+ let meta = fd.path_filestat_get(flags, osstr2str(path.as_ref())?)?;
+ Ok(FileAttr { meta })
+}
+
+pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> {
+ // This seems to not be in wasi's API yet, and we may need to end up
+ // emulating it ourselves. For now just return an error.
+ unsupported()
+}
+
+fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let fd = fd.open(
+ opts.dirflags,
+ osstr2str(path.as_ref())?,
+ opts.oflags,
+ opts.rights_base(),
+ opts.rights_inheriting(),
+ opts.fdflags,
+ )?;
+ Ok(File { fd })
+}
+
+/// Attempts to open a bare path `p`.
+///
+/// WASI has no fundamental capability to do this. All syscalls and operations
+/// are relative to already-open file descriptors. The C library, however,
+/// manages a map of pre-opened file descriptors to their path, and then the C
+/// library provides an API to look at this. In other words, when you want to
+/// open a path `p`, you have to find a previously opened file descriptor in a
+/// global table and then see if `p` is relative to that file descriptor.
+///
+/// This function, if successful, will return two items:
+///
+/// * The first is a `ManuallyDrop<WasiFd>`. This represents a pre-opened file
+/// descriptor which we don't have ownership of, but we can use. You shouldn't
+/// actually drop the `fd`.
+///
+/// * The second is a path that should be a part of `p` and represents a
+/// relative traversal from the file descriptor specified to the desired
+/// location `p`.
+///
+/// If successful you can use the returned file descriptor to perform
+/// file-descriptor-relative operations on the path returned as well. The
+/// `rights` argument indicates what operations are desired on the returned file
+/// descriptor, and if successful the returned file descriptor should have the
+/// appropriate rights for performing `rights` actions.
+///
+/// Note that this can fail if `p` doesn't look like it can be opened relative
+/// to any pre-opened file descriptor.
+fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> {
+ let p = CString::new(p.as_os_str().as_bytes())?;
+ let mut buf = Vec::<u8>::with_capacity(512);
+ loop {
+ unsafe {
+ let mut relative_path = buf.as_ptr().cast();
+ let mut abs_prefix = ptr::null();
+ let fd = __wasilibc_find_relpath(
+ p.as_ptr(),
+ &mut abs_prefix,
+ &mut relative_path,
+ buf.capacity(),
+ );
+ if fd == -1 {
+ if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) {
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity.
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ buf.reserve(1);
+ continue;
+ }
+ let msg = format!(
+ "failed to find a pre-opened file descriptor \
+ through which {:?} could be opened",
+ p
+ );
+ return Err(io::Error::new(io::ErrorKind::Uncategorized, msg));
+ }
+ let relative = CStr::from_ptr(relative_path).to_bytes().to_vec();
+
+ return Ok((
+ ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)),
+ PathBuf::from(OsString::from_vec(relative)),
+ ));
+ }
+ }
+
+ extern "C" {
+ pub fn __wasilibc_find_relpath(
+ path: *const libc::c_char,
+ abs_prefix: *mut *const libc::c_char,
+ relative_path: *mut *const libc::c_char,
+ relative_path_len: libc::size_t,
+ ) -> libc::c_int;
+ }
+}
+
+pub fn osstr2str(f: &OsStr) -> io::Result<&str> {
+ f.to_str()
+ .ok_or_else(|| io::const_io_error!(io::ErrorKind::Uncategorized, "input must be utf-8"))
+}
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ use crate::fs::File;
+
+ let mut reader = File::open(from)?;
+ let mut writer = File::create(to)?;
+
+ io::copy(&mut reader, &mut writer)
+}
+
+pub fn remove_dir_all(path: &Path) -> io::Result<()> {
+ let (parent, path) = open_parent(path)?;
+ remove_dir_all_recursive(&parent, &path)
+}
+
+fn remove_dir_all_recursive(parent: &WasiFd, path: &Path) -> io::Result<()> {
+ // Open up a file descriptor for the directory itself. Note that we don't
+ // follow symlinks here and we specifically open directories.
+ //
+ // At the root invocation of this function this will correctly handle
+ // symlinks passed to the top-level `remove_dir_all`. At the recursive
+ // level this will double-check that after the `readdir` call deduced this
+ // was a directory it's still a directory by the time we open it up.
+ //
+ // If the opened file was actually a symlink then the symlink is deleted,
+ // not the directory recursively.
+ let mut opts = OpenOptions::new();
+ opts.lookup_flags(0);
+ opts.directory(true);
+ opts.read(true);
+ let fd = open_at(parent, path, &opts)?;
+ if fd.file_attr()?.file_type().is_symlink() {
+ return parent.unlink_file(osstr2str(path.as_ref())?);
+ }
+
+ // this "root" is only used by `DirEntry::path` which we don't use below so
+ // it's ok for this to be a bogus value
+ let dummy_root = PathBuf::new();
+
+ // Iterate over all the entries in this directory, and travel recursively if
+ // necessary
+ for entry in ReadDir::new(fd, dummy_root) {
+ let entry = entry?;
+ let path = crate::str::from_utf8(&entry.name).map_err(|_| {
+ io::const_io_error!(io::ErrorKind::Uncategorized, "invalid utf-8 file name found")
+ })?;
+
+ if entry.file_type()?.is_dir() {
+ remove_dir_all_recursive(&entry.inner.dir.fd, path.as_ref())?;
+ } else {
+ entry.inner.dir.fd.unlink_file(path)?;
+ }
+ }
+
+ // Once all this directory's contents are deleted it should be safe to
+ // delete the directory tiself.
+ parent.remove_directory(osstr2str(path.as_ref())?)
+}
diff --git a/library/std/src/sys/wasi/io.rs b/library/std/src/sys/wasi/io.rs
new file mode 100644
index 000000000..ee017d13a
--- /dev/null
+++ b/library/std/src/sys/wasi/io.rs
@@ -0,0 +1,73 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::marker::PhantomData;
+use crate::slice;
+
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct IoSlice<'a> {
+ vec: wasi::Ciovec,
+ _p: PhantomData<&'a [u8]>,
+}
+
+impl<'a> IoSlice<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ IoSlice { vec: wasi::Ciovec { buf: buf.as_ptr(), buf_len: buf.len() }, _p: PhantomData }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.buf_len < n {
+ panic!("advancing IoSlice beyond its length");
+ }
+
+ unsafe {
+ self.vec.buf_len -= n;
+ self.vec.buf = self.vec.buf.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.buf as *const u8, self.vec.buf_len) }
+ }
+}
+
+#[repr(transparent)]
+pub struct IoSliceMut<'a> {
+ vec: wasi::Iovec,
+ _p: PhantomData<&'a mut [u8]>,
+}
+
+impl<'a> IoSliceMut<'a> {
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ IoSliceMut {
+ vec: wasi::Iovec { buf: buf.as_mut_ptr(), buf_len: buf.len() },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if self.vec.buf_len < n {
+ panic!("advancing IoSlice beyond its length");
+ }
+
+ unsafe {
+ self.vec.buf_len -= n;
+ self.vec.buf = self.vec.buf.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.buf as *const u8, self.vec.buf_len) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.buf_len) }
+ }
+}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
new file mode 100644
index 000000000..683a07a34
--- /dev/null
+++ b/library/std/src/sys/wasi/mod.rs
@@ -0,0 +1,100 @@
+//! System bindings for the wasm/web platform
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for wasm. Note that this wasm is *not* the emscripten
+//! wasm, so we have no runtime here.
+//!
+//! This is all super highly experimental and not actually intended for
+//! wide/production use yet, it's still all in the experimental category. This
+//! will likely change over time.
+//!
+//! Currently all functions here are basically stubs that immediately return
+//! errors. The hope is that with a portability lint we can turn actually just
+//! remove all this and just omit parts of the standard library if we're
+//! compiling for wasm. That way it's a compile time error for something that's
+//! guaranteed to be a runtime error!
+
+use crate::io as std_io;
+use crate::mem;
+
+#[path = "../unix/alloc.rs"]
+pub mod alloc;
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+pub mod fd;
+pub mod fs;
+pub mod io;
+#[path = "../unsupported/locks/mod.rs"]
+pub mod locks;
+pub mod net;
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+pub mod stdio;
+pub mod thread;
+#[path = "../unsupported/thread_local_dtor.rs"]
+pub mod thread_local_dtor;
+#[path = "../unsupported/thread_local_key.rs"]
+pub mod thread_local_key;
+pub mod time;
+
+#[path = "../unsupported/common.rs"]
+#[deny(unsafe_op_in_unsafe_fn)]
+#[allow(unused)]
+mod common;
+pub use common::*;
+
+pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind {
+ use std_io::ErrorKind::*;
+ if errno > u16::MAX as i32 || errno < 0 {
+ return Uncategorized;
+ }
+
+ match errno {
+ e if e == wasi::ERRNO_CONNREFUSED.raw().into() => ConnectionRefused,
+ e if e == wasi::ERRNO_CONNRESET.raw().into() => ConnectionReset,
+ e if e == wasi::ERRNO_PERM.raw().into() || e == wasi::ERRNO_ACCES.raw().into() => {
+ PermissionDenied
+ }
+ e if e == wasi::ERRNO_PIPE.raw().into() => BrokenPipe,
+ e if e == wasi::ERRNO_NOTCONN.raw().into() => NotConnected,
+ e if e == wasi::ERRNO_CONNABORTED.raw().into() => ConnectionAborted,
+ e if e == wasi::ERRNO_ADDRNOTAVAIL.raw().into() => AddrNotAvailable,
+ e if e == wasi::ERRNO_ADDRINUSE.raw().into() => AddrInUse,
+ e if e == wasi::ERRNO_NOENT.raw().into() => NotFound,
+ e if e == wasi::ERRNO_INTR.raw().into() => Interrupted,
+ e if e == wasi::ERRNO_INVAL.raw().into() => InvalidInput,
+ e if e == wasi::ERRNO_TIMEDOUT.raw().into() => TimedOut,
+ e if e == wasi::ERRNO_EXIST.raw().into() => AlreadyExists,
+ e if e == wasi::ERRNO_AGAIN.raw().into() => WouldBlock,
+ e if e == wasi::ERRNO_NOSYS.raw().into() => Unsupported,
+ e if e == wasi::ERRNO_NOMEM.raw().into() => OutOfMemory,
+ _ => Uncategorized,
+ }
+}
+
+pub fn abort_internal() -> ! {
+ unsafe { libc::abort() }
+}
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ let mut ret = (0u64, 0u64);
+ unsafe {
+ let base = &mut ret as *mut (u64, u64) as *mut u8;
+ let len = mem::size_of_val(&ret);
+ wasi::random_get(base, len).expect("random_get failure");
+ }
+ return ret;
+}
+
+fn err2io(err: wasi::Errno) -> std_io::Error {
+ std_io::Error::from_raw_os_error(err.raw().into())
+}
diff --git a/library/std/src/sys/wasi/net.rs b/library/std/src/sys/wasi/net.rs
new file mode 100644
index 000000000..590d268c3
--- /dev/null
+++ b/library/std/src/sys/wasi/net.rs
@@ -0,0 +1,527 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use super::err2io;
+use super::fd::WasiFd;
+use crate::fmt;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
+use crate::sys::unsupported;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::Duration;
+
+pub struct Socket(WasiFd);
+
+pub struct TcpStream {
+ inner: Socket,
+}
+
+impl AsInner<WasiFd> for Socket {
+ fn as_inner(&self) -> &WasiFd {
+ &self.0
+ }
+}
+
+impl IntoInner<WasiFd> for Socket {
+ fn into_inner(self) -> WasiFd {
+ self.0
+ }
+}
+
+impl FromInner<WasiFd> for Socket {
+ fn from_inner(inner: WasiFd) -> Socket {
+ Socket(inner)
+ }
+}
+
+impl AsFd for Socket {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
+ }
+}
+
+impl AsRawFd for Socket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+impl IntoRawFd for Socket {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+}
+
+impl FromRawFd for Socket {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ unsafe { Self(FromRawFd::from_raw_fd(raw_fd)) }
+ }
+}
+
+impl TcpStream {
+ pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.read_vectored(&mut [IoSliceMut::new(buf)])
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.socket().as_inner().read(bufs)
+ }
+
+ pub fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(buf)])
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.socket().as_inner().write(bufs)
+ }
+
+ pub fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ unsupported()
+ }
+
+ pub fn set_linger(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn set_nodelay(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, state: bool) -> io::Result<()> {
+ let fdstat = unsafe {
+ wasi::fd_fdstat_get(self.socket().as_inner().as_raw_fd() as wasi::Fd).map_err(err2io)?
+ };
+
+ let mut flags = fdstat.fs_flags;
+
+ if state {
+ flags |= wasi::FDFLAGS_NONBLOCK;
+ } else {
+ flags &= !wasi::FDFLAGS_NONBLOCK;
+ }
+
+ unsafe {
+ wasi::fd_fdstat_set_flags(self.socket().as_inner().as_raw_fd() as wasi::Fd, flags)
+ .map_err(err2io)
+ }
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+}
+
+impl FromInner<Socket> for TcpStream {
+ fn from_inner(socket: Socket) -> TcpStream {
+ TcpStream { inner: socket }
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TcpStream").field("fd", &self.inner.as_raw_fd()).finish()
+ }
+}
+
+pub struct TcpListener {
+ inner: Socket,
+}
+
+impl TcpListener {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let fd = unsafe {
+ wasi::sock_accept(self.as_inner().as_inner().as_raw_fd() as _, 0).map_err(err2io)?
+ };
+
+ Ok((
+ TcpStream::from_inner(unsafe { Socket::from_raw_fd(fd as _) }),
+ // WASI has no concept of SocketAddr yet
+ // return an unspecified IPv4Addr
+ SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0),
+ ))
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ unsupported()
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn set_only_v6(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, state: bool) -> io::Result<()> {
+ let fdstat = unsafe {
+ wasi::fd_fdstat_get(self.socket().as_inner().as_raw_fd() as wasi::Fd).map_err(err2io)?
+ };
+
+ let mut flags = fdstat.fs_flags;
+
+ if state {
+ flags |= wasi::FDFLAGS_NONBLOCK;
+ } else {
+ flags &= !wasi::FDFLAGS_NONBLOCK;
+ }
+
+ unsafe {
+ wasi::fd_fdstat_set_flags(self.socket().as_inner().as_raw_fd() as wasi::Fd, flags)
+ .map_err(err2io)
+ }
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+}
+
+impl AsInner<Socket> for TcpListener {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
+impl IntoInner<Socket> for TcpListener {
+ fn into_inner(self) -> Socket {
+ self.inner
+ }
+}
+
+impl FromInner<Socket> for TcpListener {
+ fn from_inner(inner: Socket) -> TcpListener {
+ TcpListener { inner }
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TcpListener").field("fd", &self.inner.as_raw_fd()).finish()
+ }
+}
+
+pub struct UdpSocket {
+ inner: Socket,
+}
+
+impl UdpSocket {
+ pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ unsupported()
+ }
+
+ pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsupported()
+ }
+
+ pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ unsupported()
+ }
+
+ pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ unsupported()
+ }
+
+ pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ unsupported()
+ }
+
+ pub fn set_broadcast(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ unsupported()
+ }
+
+ pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn set_ttl(&self, _: u32) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ unsupported()
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ unsupported()
+ }
+
+ pub fn set_nonblocking(&self, _: bool) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn send(&self, _: &[u8]) -> io::Result<usize> {
+ unsupported()
+ }
+
+ pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> {
+ unsupported()
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+}
+
+impl AsInner<Socket> for UdpSocket {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
+impl IntoInner<Socket> for UdpSocket {
+ fn into_inner(self) -> Socket {
+ self.inner
+ }
+}
+
+impl FromInner<Socket> for UdpSocket {
+ fn from_inner(inner: Socket) -> UdpSocket {
+ UdpSocket { inner }
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("UdpSocket").field("fd", &self.inner.as_raw_fd()).finish()
+ }
+}
+
+pub struct LookupHost(!);
+
+impl LookupHost {
+ pub fn port(&self) -> u16 {
+ self.0
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ self.0
+ }
+}
+
+impl<'a> TryFrom<&'a str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: &'a str) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> {
+ unsupported()
+ }
+}
+
+#[allow(nonstandard_style)]
+pub mod netc {
+ pub const AF_INET: u8 = 0;
+ pub const AF_INET6: u8 = 1;
+ pub type sa_family_t = u8;
+
+ #[derive(Copy, Clone)]
+ pub struct in_addr {
+ pub s_addr: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in {
+ pub sin_family: sa_family_t,
+ pub sin_port: u16,
+ pub sin_addr: in_addr,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr_in6 {
+ pub sin6_family: sa_family_t,
+ pub sin6_port: u16,
+ pub sin6_addr: in6_addr,
+ pub sin6_flowinfo: u32,
+ pub sin6_scope_id: u32,
+ }
+
+ #[derive(Copy, Clone)]
+ pub struct sockaddr {}
+}
diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs
new file mode 100644
index 000000000..c5229a188
--- /dev/null
+++ b/library/std/src/sys/wasi/os.rs
@@ -0,0 +1,243 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::any::Any;
+use crate::error::Error as StdError;
+use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::os::wasi::prelude::*;
+use crate::path::{self, PathBuf};
+use crate::str;
+use crate::sys::memchr;
+use crate::sys::unsupported;
+use crate::vec;
+
+// Add a few symbols not in upstream `libc` just yet.
+mod libc {
+ pub use libc::*;
+
+ extern "C" {
+ pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char;
+ pub fn chdir(dir: *const c_char) -> c_int;
+ }
+}
+
+#[cfg(not(target_feature = "atomics"))]
+pub unsafe fn env_lock() -> impl Any {
+ // No need for a lock if we're single-threaded, but this function will need
+ // to get implemented for multi-threaded scenarios
+}
+
+pub fn errno() -> i32 {
+ extern "C" {
+ #[thread_local]
+ static errno: libc::c_int;
+ }
+
+ unsafe { errno as i32 }
+}
+
+pub fn error_string(errno: i32) -> String {
+ let mut buf = [0 as libc::c_char; 1024];
+
+ let p = buf.as_mut_ptr();
+ unsafe {
+ if libc::strerror_r(errno as libc::c_int, p, buf.len()) < 0 {
+ panic!("strerror_r failure");
+ }
+ str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned()
+ }
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ let mut buf = Vec::with_capacity(512);
+ loop {
+ unsafe {
+ let ptr = buf.as_mut_ptr() as *mut libc::c_char;
+ if !libc::getcwd(ptr, buf.capacity()).is_null() {
+ let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len();
+ buf.set_len(len);
+ buf.shrink_to_fit();
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ } else {
+ let error = io::Error::last_os_error();
+ if error.raw_os_error() != Some(libc::ERANGE) {
+ return Err(error);
+ }
+ }
+
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity.
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ buf.reserve(1);
+ }
+ }
+}
+
+pub fn chdir(p: &path::Path) -> io::Result<()> {
+ let p: &OsStr = p.as_ref();
+ let p = CString::new(p.as_bytes())?;
+ unsafe {
+ match libc::chdir(p.as_ptr()) == (0 as libc::c_int) {
+ true => Ok(()),
+ false => Err(io::Error::last_os_error()),
+ }
+ }
+}
+
+pub struct SplitPaths<'a>(!, PhantomData<&'a ()>);
+
+pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> {
+ panic!("unsupported")
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ Err(JoinPathsError)
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "not supported on wasm yet".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "not supported on wasm yet"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ unsupported()
+}
+pub struct Env {
+ iter: vec::IntoIter<(OsString, OsString)>,
+}
+
+impl !Send for Env {}
+impl !Sync for Env {}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+pub fn env() -> Env {
+ unsafe {
+ let _guard = env_lock();
+ let mut environ = libc::environ;
+ let mut result = Vec::new();
+ if !environ.is_null() {
+ while !(*environ).is_null() {
+ if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) {
+ result.push(key_value);
+ }
+ environ = environ.add(1);
+ }
+ }
+ return Env { iter: result.into_iter() };
+ }
+
+ // See src/libstd/sys/unix/os.rs, same as that
+ fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
+ if input.is_empty() {
+ return None;
+ }
+ let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
+ pos.map(|p| {
+ (
+ OsStringExt::from_vec(input[..p].to_vec()),
+ OsStringExt::from_vec(input[p + 1..].to_vec()),
+ )
+ })
+ }
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ let k = CString::new(k.as_bytes()).ok()?;
+ unsafe {
+ let _guard = env_lock();
+ let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
+ }
+ }
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ let k = CString::new(k.as_bytes())?;
+ let v = CString::new(v.as_bytes())?;
+
+ unsafe {
+ let _guard = env_lock();
+ cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
+ }
+}
+
+pub fn unsetenv(n: &OsStr) -> io::Result<()> {
+ let nbuf = CString::new(n.as_bytes())?;
+
+ unsafe {
+ let _guard = env_lock();
+ cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
+ }
+}
+
+pub fn temp_dir() -> PathBuf {
+ panic!("no filesystem on wasm")
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ None
+}
+
+pub fn exit(code: i32) -> ! {
+ unsafe { libc::exit(code) }
+}
+
+pub fn getpid() -> u32 {
+ panic!("unsupported");
+}
+
+#[doc(hidden)]
+pub trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+macro_rules! impl_is_minus_one {
+ ($($t:ident)*) => ($(impl IsMinusOne for $t {
+ fn is_minus_one(&self) -> bool {
+ *self == -1
+ }
+ })*)
+}
+
+impl_is_minus_one! { i8 i16 i32 i64 isize }
+
+fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
+ if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) }
+}
diff --git a/library/std/src/sys/wasi/stdio.rs b/library/std/src/sys/wasi/stdio.rs
new file mode 100644
index 000000000..4cc0e4ed5
--- /dev/null
+++ b/library/std/src/sys/wasi/stdio.rs
@@ -0,0 +1,112 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use super::fd::WasiFd;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem::ManuallyDrop;
+use crate::os::raw;
+use crate::os::wasi::io::{AsRawFd, FromRawFd};
+
+pub struct Stdin;
+pub struct Stdout;
+pub struct Stderr;
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin
+ }
+}
+
+impl AsRawFd for Stdin {
+ #[inline]
+ fn as_raw_fd(&self) -> raw::c_int {
+ 0
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, data: &mut [u8]) -> io::Result<usize> {
+ self.read_vectored(&mut [IoSliceMut::new(data)])
+ }
+
+ fn read_vectored(&mut self, data: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ ManuallyDrop::new(unsafe { WasiFd::from_raw_fd(self.as_raw_fd()) }).read(data)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ true
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl AsRawFd for Stdout {
+ #[inline]
+ fn as_raw_fd(&self) -> raw::c_int {
+ 1
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(data)])
+ }
+
+ fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> {
+ ManuallyDrop::new(unsafe { WasiFd::from_raw_fd(self.as_raw_fd()) }).write(data)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl AsRawFd for Stderr {
+ #[inline]
+ fn as_raw_fd(&self) -> raw::c_int {
+ 2
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, data: &[u8]) -> io::Result<usize> {
+ self.write_vectored(&[IoSlice::new(data)])
+ }
+
+ fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> {
+ ManuallyDrop::new(unsafe { WasiFd::from_raw_fd(self.as_raw_fd()) }).write(data)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(wasi::ERRNO_BADF.raw().into())
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
diff --git a/library/std/src/sys/wasi/thread.rs b/library/std/src/sys/wasi/thread.rs
new file mode 100644
index 000000000..e7a6ab4be
--- /dev/null
+++ b/library/std/src/sys/wasi/thread.rs
@@ -0,0 +1,81 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::ffi::CStr;
+use crate::io;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct Thread(!);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+
+ pub fn yield_now() {
+ let ret = unsafe { wasi::sched_yield() };
+ debug_assert_eq!(ret, Ok(()));
+ }
+
+ pub fn set_name(_name: &CStr) {
+ // nope
+ }
+
+ pub fn sleep(dur: Duration) {
+ let nanos = dur.as_nanos();
+ assert!(nanos <= u64::MAX as u128);
+
+ const USERDATA: wasi::Userdata = 0x0123_45678;
+
+ let clock = wasi::SubscriptionClock {
+ id: wasi::CLOCKID_MONOTONIC,
+ timeout: nanos as u64,
+ precision: 0,
+ flags: 0,
+ };
+
+ let in_ = wasi::Subscription {
+ userdata: USERDATA,
+ u: wasi::SubscriptionU { tag: 0, u: wasi::SubscriptionUU { clock } },
+ };
+ unsafe {
+ let mut event: wasi::Event = mem::zeroed();
+ let res = wasi::poll_oneoff(&in_, &mut event, 1);
+ match (res, event) {
+ (
+ Ok(1),
+ wasi::Event {
+ userdata: USERDATA,
+ error: wasi::ERRNO_SUCCESS,
+ type_: wasi::EVENTTYPE_CLOCK,
+ ..
+ },
+ ) => {}
+ _ => panic!("thread::sleep(): unexpected result of poll_oneoff"),
+ }
+ }
+ }
+
+ pub fn join(self) {
+ self.0
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/wasi/time.rs b/library/std/src/sys/wasi/time.rs
new file mode 100644
index 000000000..3d326e491
--- /dev/null
+++ b/library/std/src/sys/wasi/time.rs
@@ -0,0 +1,65 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::time::Duration;
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct Instant(Duration);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(Duration);
+
+pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
+
+fn current_time(clock: wasi::Clockid) -> Duration {
+ let ts = unsafe {
+ wasi::clock_time_get(
+ clock, 1, // precision... seems ignored though?
+ )
+ .unwrap()
+ };
+ Duration::new((ts / 1_000_000_000) as u64, (ts % 1_000_000_000) as u32)
+}
+
+impl Instant {
+ pub fn now() -> Instant {
+ Instant(current_time(wasi::CLOCKID_MONOTONIC))
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ self.0.checked_sub(other.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant(self.0.checked_sub(*other)?))
+ }
+}
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ SystemTime(current_time(wasi::CLOCKID_REALTIME))
+ }
+
+ pub fn from_wasi_timestamp(ts: wasi::Timestamp) -> SystemTime {
+ SystemTime(Duration::from_nanos(ts))
+ }
+
+ pub fn to_wasi_timestamp_or_panic(&self) -> wasi::Timestamp {
+ self.0.as_nanos().try_into().expect("time does not fit in WASI timestamp")
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_add(*other)?))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ Some(SystemTime(self.0.checked_sub(*other)?))
+ }
+}
diff --git a/library/std/src/sys/wasm/alloc.rs b/library/std/src/sys/wasm/alloc.rs
new file mode 100644
index 000000000..6dceb1689
--- /dev/null
+++ b/library/std/src/sys/wasm/alloc.rs
@@ -0,0 +1,166 @@
+//! This is an implementation of a global allocator on wasm targets when
+//! emscripten is not in use. In that situation there's no actual runtime for us
+//! to lean on for allocation, so instead we provide our own!
+//!
+//! The wasm instruction set has two instructions for getting the current
+//! amount of memory and growing the amount of memory. These instructions are the
+//! foundation on which we're able to build an allocator, so we do so! Note that
+//! the instructions are also pretty "global" and this is the "global" allocator
+//! after all!
+//!
+//! The current allocator here is the `dlmalloc` crate which we've got included
+//! in the rust-lang/rust repository as a submodule. The crate is a port of
+//! dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
+//! for now which is currently technically required (can't link with C yet).
+//!
+//! The crate itself provides a global allocator which on wasm has no
+//! synchronization as there are no threads!
+
+use crate::alloc::{GlobalAlloc, Layout, System};
+
+static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling malloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling calloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling free() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
+ // Calling realloc() is safe because preconditions on this function match the trait method preconditions.
+ let _lock = lock::lock();
+ unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
+}
+
+#[cfg(target_feature = "atomics")]
+mod lock {
+ use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
+
+ static LOCKED: AtomicI32 = AtomicI32::new(0);
+
+ pub struct DropLock;
+
+ pub fn lock() -> DropLock {
+ loop {
+ if LOCKED.swap(1, SeqCst) == 0 {
+ return DropLock;
+ }
+ // Ok so here's where things get a little depressing. At this point
+ // in time we need to synchronously acquire a lock, but we're
+ // contending with some other thread. Typically we'd execute some
+ // form of `i32.atomic.wait` like so:
+ //
+ // unsafe {
+ // let r = core::arch::wasm32::i32_atomic_wait(
+ // LOCKED.as_mut_ptr(),
+ // 1, // expected value
+ // -1, // timeout
+ // );
+ // debug_assert!(r == 0 || r == 1);
+ // }
+ //
+ // Unfortunately though in doing so we would cause issues for the
+ // main thread. The main thread in a web browser *cannot ever
+ // block*, no exceptions. This means that the main thread can't
+ // actually execute the `i32.atomic.wait` instruction.
+ //
+ // As a result if we want to work within the context of browsers we
+ // need to figure out some sort of allocation scheme for the main
+ // thread where when there's contention on the global malloc lock we
+ // do... something.
+ //
+ // Possible ideas include:
+ //
+ // 1. Attempt to acquire the global lock. If it fails, fall back to
+ // memory allocation via `memory.grow`. Later just ... somehow
+ // ... inject this raw page back into the main allocator as it
+ // gets sliced up over time. This strategy has the downside of
+ // forcing allocation of a page to happen whenever the main
+ // thread contents with other threads, which is unfortunate.
+ //
+ // 2. Maintain a form of "two level" allocator scheme where the main
+ // thread has its own allocator. Somehow this allocator would
+ // also be balanced with a global allocator, not only to have
+ // allocations cross between threads but also to ensure that the
+ // two allocators stay "balanced" in terms of free'd memory and
+ // such. This, however, seems significantly complicated.
+ //
+ // Out of a lack of other ideas, the current strategy implemented
+ // here is to simply spin. Typical spin loop algorithms have some
+ // form of "hint" here to the CPU that it's what we're doing to
+ // ensure that the CPU doesn't get too hot, but wasm doesn't have
+ // such an instruction.
+ //
+ // To be clear, spinning here is not a great solution.
+ // Another thread with the lock may take quite a long time to wake
+ // up. For example it could be in `memory.grow` or it could be
+ // evicted from the CPU for a timeslice like 10ms. For these periods
+ // of time our thread will "helpfully" sit here and eat CPU time
+ // until it itself is evicted or the lock holder finishes. This
+ // means we're just burning and wasting CPU time to no one's
+ // benefit.
+ //
+ // Spinning does have the nice properties, though, of being
+ // semantically correct, being fair to all threads for memory
+ // allocation, and being simple enough to implement.
+ //
+ // This will surely (hopefully) be replaced in the future with a
+ // real memory allocator that can handle the restriction of the main
+ // thread.
+ //
+ //
+ // FIXME: We can also possibly add an optimization here to detect
+ // when a thread is the main thread or not and block on all
+ // non-main-thread threads. Currently, however, we have no way
+ // of knowing which wasm thread is on the browser main thread, but
+ // if we could figure out we could at least somewhat mitigate the
+ // cost of this spinning.
+ }
+ }
+
+ impl Drop for DropLock {
+ fn drop(&mut self) {
+ let r = LOCKED.swap(0, SeqCst);
+ debug_assert_eq!(r, 1);
+
+ // Note that due to the above logic we don't actually need to wake
+ // anyone up, but if we did it'd likely look something like this:
+ //
+ // unsafe {
+ // core::arch::wasm32::atomic_notify(
+ // LOCKED.as_mut_ptr(),
+ // 1, // only one thread
+ // );
+ // }
+ }
+ }
+}
+
+#[cfg(not(target_feature = "atomics"))]
+mod lock {
+ #[inline]
+ pub fn lock() {} // no atomics, no threads, that's easy!
+}
diff --git a/library/std/src/sys/wasm/atomics/futex.rs b/library/std/src/sys/wasm/atomics/futex.rs
new file mode 100644
index 000000000..f4fbe9f48
--- /dev/null
+++ b/library/std/src/sys/wasm/atomics/futex.rs
@@ -0,0 +1,34 @@
+use crate::arch::wasm32;
+use crate::sync::atomic::AtomicU32;
+use crate::time::Duration;
+
+/// Wait for a futex_wake operation to wake us.
+///
+/// Returns directly if the futex doesn't hold the expected value.
+///
+/// Returns false on timeout, and true in all other cases.
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1);
+ unsafe {
+ wasm32::memory_atomic_wait32(
+ futex as *const AtomicU32 as *mut i32,
+ expected as i32,
+ timeout,
+ ) < 2
+ }
+}
+
+/// Wake up one thread that's blocked on futex_wait on this futex.
+///
+/// Returns true if this actually woke up such a thread,
+/// or false if no thread was waiting on this futex.
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { wasm32::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, 1) > 0 }
+}
+
+/// Wake up all threads that are waiting on futex_wait on this futex.
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe {
+ wasm32::memory_atomic_notify(futex as *const AtomicU32 as *mut i32, i32::MAX as u32);
+ }
+}
diff --git a/library/std/src/sys/wasm/atomics/thread.rs b/library/std/src/sys/wasm/atomics/thread.rs
new file mode 100644
index 000000000..714b70492
--- /dev/null
+++ b/library/std/src/sys/wasm/atomics/thread.rs
@@ -0,0 +1,55 @@
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct Thread(!);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+
+ pub fn yield_now() {}
+
+ pub fn set_name(_name: &CStr) {}
+
+ pub fn sleep(dur: Duration) {
+ use crate::arch::wasm32;
+ use crate::cmp;
+
+ // Use an atomic wait to block the current thread artificially with a
+ // timeout listed. Note that we should never be notified (return value
+ // of 0) or our comparison should never fail (return value of 1) so we
+ // should always only resume execution through a timeout (return value
+ // 2).
+ let mut nanos = dur.as_nanos();
+ while nanos > 0 {
+ let amt = cmp::min(i64::MAX as u128, nanos);
+ let mut x = 0;
+ let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) };
+ debug_assert_eq!(val, 2);
+ nanos -= amt;
+ }
+ }
+
+ pub fn join(self) {}
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ unsupported()
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/wasm/env.rs b/library/std/src/sys/wasm/env.rs
new file mode 100644
index 000000000..730e356d7
--- /dev/null
+++ b/library/std/src/sys/wasm/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "";
+ pub const OS: &str = "";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".wasm";
+ pub const DLL_EXTENSION: &str = "wasm";
+ pub const EXE_SUFFIX: &str = ".wasm";
+ pub const EXE_EXTENSION: &str = "wasm";
+}
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
new file mode 100644
index 000000000..4159efe2a
--- /dev/null
+++ b/library/std/src/sys/wasm/mod.rs
@@ -0,0 +1,77 @@
+//! System bindings for the wasm/web platform
+//!
+//! This module contains the facade (aka platform-specific) implementations of
+//! OS level functionality for wasm. Note that this wasm is *not* the emscripten
+//! wasm, so we have no runtime here.
+//!
+//! This is all super highly experimental and not actually intended for
+//! wide/production use yet, it's still all in the experimental category. This
+//! will likely change over time.
+//!
+//! Currently all functions here are basically stubs that immediately return
+//! errors. The hope is that with a portability lint we can turn actually just
+//! remove all this and just omit parts of the standard library if we're
+//! compiling for wasm. That way it's a compile time error for something that's
+//! guaranteed to be a runtime error!
+
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod alloc;
+#[path = "../unsupported/args.rs"]
+pub mod args;
+#[path = "../unix/cmath.rs"]
+pub mod cmath;
+pub mod env;
+#[path = "../unsupported/fs.rs"]
+pub mod fs;
+#[path = "../unsupported/io.rs"]
+pub mod io;
+#[path = "../unsupported/net.rs"]
+pub mod net;
+#[path = "../unsupported/os.rs"]
+pub mod os;
+#[path = "../unix/os_str.rs"]
+pub mod os_str;
+#[path = "../unix/path.rs"]
+pub mod path;
+#[path = "../unsupported/pipe.rs"]
+pub mod pipe;
+#[path = "../unsupported/process.rs"]
+pub mod process;
+#[path = "../unsupported/stdio.rs"]
+pub mod stdio;
+#[path = "../unsupported/thread_local_dtor.rs"]
+pub mod thread_local_dtor;
+#[path = "../unsupported/thread_local_key.rs"]
+pub mod thread_local_key;
+#[path = "../unsupported/time.rs"]
+pub mod time;
+
+cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ #[path = "../unix/locks"]
+ pub mod locks {
+ #![allow(unsafe_op_in_unsafe_fn)]
+ mod futex_condvar;
+ mod futex_mutex;
+ mod futex_rwlock;
+ pub(crate) use futex_condvar::{Condvar, MovableCondvar};
+ pub(crate) use futex_mutex::{Mutex, MovableMutex};
+ pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ }
+ #[path = "atomics/futex.rs"]
+ pub mod futex;
+ #[path = "atomics/thread.rs"]
+ pub mod thread;
+ } else {
+ #[path = "../unsupported/locks/mod.rs"]
+ pub mod locks;
+ #[path = "../unsupported/thread.rs"]
+ pub mod thread;
+ }
+}
+
+#[path = "../unsupported/common.rs"]
+#[deny(unsafe_op_in_unsafe_fn)]
+mod common;
+pub use common::*;
diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs
new file mode 100644
index 000000000..fdc81cdea
--- /dev/null
+++ b/library/std/src/sys/windows/alloc.rs
@@ -0,0 +1,246 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ffi::c_void;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering};
+use crate::sys::c;
+use crate::sys::common::alloc::{realloc_fallback, MIN_ALIGN};
+
+#[cfg(test)]
+mod tests;
+
+// Heap memory management on Windows is done by using the system Heap API (heapapi.h)
+// See https://docs.microsoft.com/windows/win32/api/heapapi/
+
+// Flag to indicate that the memory returned by `HeapAlloc` should be zeroed.
+const HEAP_ZERO_MEMORY: c::DWORD = 0x00000008;
+
+extern "system" {
+ // Get a handle to the default heap of the current process, or null if the operation fails.
+ //
+ // SAFETY: Successful calls to this function within the same process are assumed to
+ // always return the same handle, which remains valid for the entire lifetime of the process.
+ //
+ // See https://docs.microsoft.com/windows/win32/api/heapapi/nf-heapapi-getprocessheap
+ fn GetProcessHeap() -> c::HANDLE;
+
+ // Allocate a block of `dwBytes` bytes of memory from a given heap `hHeap`.
+ // The allocated memory may be uninitialized, or zeroed if `dwFlags` is
+ // set to `HEAP_ZERO_MEMORY`.
+ //
+ // Returns a pointer to the newly-allocated memory or null if the operation fails.
+ // The returned pointer will be aligned to at least `MIN_ALIGN`.
+ //
+ // SAFETY:
+ // - `hHeap` must be a non-null handle returned by `GetProcessHeap`.
+ // - `dwFlags` must be set to either zero or `HEAP_ZERO_MEMORY`.
+ //
+ // Note that `dwBytes` is allowed to be zero, contrary to some other allocators.
+ //
+ // See https://docs.microsoft.com/windows/win32/api/heapapi/nf-heapapi-heapalloc
+ fn HeapAlloc(hHeap: c::HANDLE, dwFlags: c::DWORD, dwBytes: c::SIZE_T) -> c::LPVOID;
+
+ // Reallocate a block of memory behind a given pointer `lpMem` from a given heap `hHeap`,
+ // to a block of at least `dwBytes` bytes, either shrinking the block in place,
+ // or allocating at a new location, copying memory, and freeing the original location.
+ //
+ // Returns a pointer to the reallocated memory or null if the operation fails.
+ // The returned pointer will be aligned to at least `MIN_ALIGN`.
+ // If the operation fails the given block will never have been freed.
+ //
+ // SAFETY:
+ // - `hHeap` must be a non-null handle returned by `GetProcessHeap`.
+ // - `dwFlags` must be set to zero.
+ // - `lpMem` must be a non-null pointer to an allocated block returned by `HeapAlloc` or
+ // `HeapReAlloc`, that has not already been freed.
+ // If the block was successfully reallocated at a new location, pointers pointing to
+ // the freed memory, such as `lpMem`, must not be dereferenced ever again.
+ //
+ // Note that `dwBytes` is allowed to be zero, contrary to some other allocators.
+ //
+ // See https://docs.microsoft.com/windows/win32/api/heapapi/nf-heapapi-heaprealloc
+ fn HeapReAlloc(
+ hHeap: c::HANDLE,
+ dwFlags: c::DWORD,
+ lpMem: c::LPVOID,
+ dwBytes: c::SIZE_T,
+ ) -> c::LPVOID;
+
+ // Free a block of memory behind a given pointer `lpMem` from a given heap `hHeap`.
+ // Returns a nonzero value if the operation is successful, and zero if the operation fails.
+ //
+ // SAFETY:
+ // - `hHeap` must be a non-null handle returned by `GetProcessHeap`.
+ // - `dwFlags` must be set to zero.
+ // - `lpMem` must be a pointer to an allocated block returned by `HeapAlloc` or `HeapReAlloc`,
+ // that has not already been freed.
+ // If the block was successfully freed, pointers pointing to the freed memory, such as `lpMem`,
+ // must not be dereferenced ever again.
+ //
+ // Note that `lpMem` is allowed to be null, which will not cause the operation to fail.
+ //
+ // See https://docs.microsoft.com/windows/win32/api/heapapi/nf-heapapi-heapfree
+ fn HeapFree(hHeap: c::HANDLE, dwFlags: c::DWORD, lpMem: c::LPVOID) -> c::BOOL;
+}
+
+// Cached handle to the default heap of the current process.
+// Either a non-null handle returned by `GetProcessHeap`, or null when not yet initialized or `GetProcessHeap` failed.
+static HEAP: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
+
+// Get a handle to the default heap of the current process, or null if the operation fails.
+// If this operation is successful, `HEAP` will be successfully initialized and contain
+// a non-null handle returned by `GetProcessHeap`.
+#[inline]
+fn init_or_get_process_heap() -> c::HANDLE {
+ let heap = HEAP.load(Ordering::Relaxed);
+ if heap.is_null() {
+ // `HEAP` has not yet been successfully initialized
+ let heap = unsafe { GetProcessHeap() };
+ if !heap.is_null() {
+ // SAFETY: No locking is needed because within the same process,
+ // successful calls to `GetProcessHeap` will always return the same value, even on different threads.
+ HEAP.store(heap, Ordering::Release);
+
+ // SAFETY: `HEAP` contains a non-null handle returned by `GetProcessHeap`
+ heap
+ } else {
+ // Could not get the current process heap.
+ ptr::null_mut()
+ }
+ } else {
+ // SAFETY: `HEAP` contains a non-null handle returned by `GetProcessHeap`
+ heap
+ }
+}
+
+// Get a non-null handle to the default heap of the current process.
+// SAFETY: `HEAP` must have been successfully initialized.
+#[inline]
+unsafe fn get_process_heap() -> c::HANDLE {
+ HEAP.load(Ordering::Acquire)
+}
+
+// Header containing a pointer to the start of an allocated block.
+// SAFETY: Size and alignment must be <= `MIN_ALIGN`.
+#[repr(C)]
+struct Header(*mut u8);
+
+// Allocate a block of optionally zeroed memory for a given `layout`.
+// SAFETY: Returns a pointer satisfying the guarantees of `System` about allocated pointers,
+// or null if the operation fails. If this returns non-null `HEAP` will have been successfully
+// initialized.
+#[inline]
+unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
+ let heap = init_or_get_process_heap();
+ if heap.is_null() {
+ // Allocation has failed, could not get the current process heap.
+ return ptr::null_mut();
+ }
+
+ // Allocated memory will be either zeroed or uninitialized.
+ let flags = if zeroed { HEAP_ZERO_MEMORY } else { 0 };
+
+ if layout.align() <= MIN_ALIGN {
+ // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`.
+ // The returned pointer points to the start of an allocated block.
+ unsafe { HeapAlloc(heap, flags, layout.size()) as *mut u8 }
+ } else {
+ // Allocate extra padding in order to be able to satisfy the alignment.
+ let total = layout.align() + layout.size();
+
+ // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`.
+ let ptr = unsafe { HeapAlloc(heap, flags, total) as *mut u8 };
+ if ptr.is_null() {
+ // Allocation has failed.
+ return ptr::null_mut();
+ }
+
+ // Create a correctly aligned pointer offset from the start of the allocated block,
+ // and write a header before it.
+
+ let offset = layout.align() - (ptr.addr() & (layout.align() - 1));
+ // SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated
+ // block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned
+ // pointer inside the allocated block with at least `layout.size()` bytes after it and at
+ // least `MIN_ALIGN` bytes of padding before it.
+ let aligned = unsafe { ptr.add(offset) };
+ // SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
+ // is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
+ // it, it is safe to write a header directly before it.
+ unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
+
+ // SAFETY: The returned pointer does not point to the to the start of an allocated block,
+ // but there is a header readable directly before it containing the location of the start
+ // of the block.
+ aligned
+ }
+}
+
+// All pointers returned by this allocator have, in addition to the guarantees of `GlobalAlloc`, the
+// following properties:
+//
+// If the pointer was allocated or reallocated with a `layout` specifying an alignment <= `MIN_ALIGN`
+// the pointer will be aligned to at least `MIN_ALIGN` and point to the start of the allocated block.
+//
+// If the pointer was allocated or reallocated with a `layout` specifying an alignment > `MIN_ALIGN`
+// the pointer will be aligned to the specified alignment and not point to the start of the allocated block.
+// Instead there will be a header readable directly before the returned pointer, containing the actual
+// location of the start of the block.
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: Pointers returned by `allocate` satisfy the guarantees of `System`
+ let zeroed = false;
+ unsafe { allocate(layout, zeroed) }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: Pointers returned by `allocate` satisfy the guarantees of `System`
+ let zeroed = true;
+ unsafe { allocate(layout, zeroed) }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let block = {
+ if layout.align() <= MIN_ALIGN {
+ ptr
+ } else {
+ // The location of the start of the block is stored in the padding before `ptr`.
+
+ // SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
+ // and have a header readable directly before it.
+ unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
+ }
+ };
+
+ // SAFETY: because `ptr` has been successfully allocated with this allocator,
+ // `HEAP` must have been successfully initialized.
+ let heap = unsafe { get_process_heap() };
+
+ // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`,
+ // `block` is a pointer to the start of an allocated block.
+ unsafe { HeapFree(heap, 0, block as c::LPVOID) };
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN {
+ // SAFETY: because `ptr` has been successfully allocated with this allocator,
+ // `HEAP` must have been successfully initialized.
+ let heap = unsafe { get_process_heap() };
+
+ // SAFETY: `heap` is a non-null handle returned by `GetProcessHeap`,
+ // `ptr` is a pointer to the start of an allocated block.
+ // The returned pointer points to the start of an allocated block.
+ unsafe { HeapReAlloc(heap, 0, ptr as c::LPVOID, new_size) as *mut u8 }
+ } else {
+ // SAFETY: `realloc_fallback` is implemented using `dealloc` and `alloc`, which will
+ // correctly handle `ptr` and return a pointer satisfying the guarantees of `System`
+ unsafe { realloc_fallback(self, ptr, layout, new_size) }
+ }
+ }
+}
diff --git a/library/std/src/sys/windows/alloc/tests.rs b/library/std/src/sys/windows/alloc/tests.rs
new file mode 100644
index 000000000..674a3e1d9
--- /dev/null
+++ b/library/std/src/sys/windows/alloc/tests.rs
@@ -0,0 +1,9 @@
+use super::{Header, MIN_ALIGN};
+use crate::mem;
+
+#[test]
+fn alloc_header() {
+ // Header must fit in the padding before an aligned pointer
+ assert!(mem::size_of::<Header>() <= MIN_ALIGN);
+ assert!(mem::align_of::<Header>() <= MIN_ALIGN);
+}
diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs
new file mode 100644
index 000000000..01f262982
--- /dev/null
+++ b/library/std/src/sys/windows/args.rs
@@ -0,0 +1,406 @@
+//! The Windows command line is just a string
+//! <https://docs.microsoft.com/en-us/archive/blogs/larryosterman/the-windows-command-line-is-just-a-string>
+//!
+//! This module implements the parsing necessary to turn that string into a list of arguments.
+
+#[cfg(test)]
+mod tests;
+
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::num::NonZeroU16;
+use crate::os::windows::prelude::*;
+use crate::path::PathBuf;
+use crate::ptr::NonNull;
+use crate::sys::c;
+use crate::sys::process::ensure_no_nuls;
+use crate::sys::windows::os::current_exe;
+use crate::vec;
+
+use core::iter;
+
+/// This is the const equivalent to `NonZeroU16::new(n).unwrap()`
+///
+/// FIXME: This can be removed once `Option::unwrap` is stably const.
+/// See the `const_option` feature (#67441).
+const fn non_zero_u16(n: u16) -> NonZeroU16 {
+ match NonZeroU16::new(n) {
+ Some(n) => n,
+ None => panic!("called `unwrap` on a `None` value"),
+ }
+}
+
+pub fn args() -> Args {
+ // SAFETY: `GetCommandLineW` returns a pointer to a null terminated UTF-16
+ // string so it's safe for `WStrUnits` to use.
+ unsafe {
+ let lp_cmd_line = c::GetCommandLineW();
+ let parsed_args_list = parse_lp_cmd_line(WStrUnits::new(lp_cmd_line), || {
+ current_exe().map(PathBuf::into_os_string).unwrap_or_else(|_| OsString::new())
+ });
+
+ Args { parsed_args_list: parsed_args_list.into_iter() }
+ }
+}
+
+/// Implements the Windows command-line argument parsing algorithm.
+///
+/// Microsoft's documentation for the Windows CLI argument format can be found at
+/// <https://docs.microsoft.com/en-us/cpp/cpp/main-function-command-line-args?view=msvc-160#parsing-c-command-line-arguments>
+///
+/// A more in-depth explanation is here:
+/// <https://daviddeley.com/autohotkey/parameters/parameters.htm#WIN>
+///
+/// Windows includes a function to do command line parsing in shell32.dll.
+/// However, this is not used for two reasons:
+///
+/// 1. Linking with that DLL causes the process to be registered as a GUI application.
+/// GUI applications add a bunch of overhead, even if no windows are drawn. See
+/// <https://randomascii.wordpress.com/2018/12/03/a-not-called-function-can-cause-a-5x-slowdown/>.
+///
+/// 2. It does not follow the modern C/C++ argv rules outlined in the first two links above.
+///
+/// This function was tested for equivalence to the C/C++ parsing rules using an
+/// extensive test suite available at
+/// <https://github.com/ChrisDenton/winarg/tree/std>.
+fn parse_lp_cmd_line<'a, F: Fn() -> OsString>(
+ lp_cmd_line: Option<WStrUnits<'a>>,
+ exe_name: F,
+) -> Vec<OsString> {
+ const BACKSLASH: NonZeroU16 = non_zero_u16(b'\\' as u16);
+ const QUOTE: NonZeroU16 = non_zero_u16(b'"' as u16);
+ const TAB: NonZeroU16 = non_zero_u16(b'\t' as u16);
+ const SPACE: NonZeroU16 = non_zero_u16(b' ' as u16);
+
+ let mut ret_val = Vec::new();
+ // If the cmd line pointer is null or it points to an empty string then
+ // return the name of the executable as argv[0].
+ if lp_cmd_line.as_ref().and_then(|cmd| cmd.peek()).is_none() {
+ ret_val.push(exe_name());
+ return ret_val;
+ }
+ let mut code_units = lp_cmd_line.unwrap();
+
+ // The executable name at the beginning is special.
+ let mut in_quotes = false;
+ let mut cur = Vec::new();
+ for w in &mut code_units {
+ match w {
+ // A quote mark always toggles `in_quotes` no matter what because
+ // there are no escape characters when parsing the executable name.
+ QUOTE => in_quotes = !in_quotes,
+ // If not `in_quotes` then whitespace ends argv[0].
+ SPACE | TAB if !in_quotes => break,
+ // In all other cases the code unit is taken literally.
+ _ => cur.push(w.get()),
+ }
+ }
+ // Skip whitespace.
+ code_units.advance_while(|w| w == SPACE || w == TAB);
+ ret_val.push(OsString::from_wide(&cur));
+
+ // Parse the arguments according to these rules:
+ // * All code units are taken literally except space, tab, quote and backslash.
+ // * When not `in_quotes`, space and tab separate arguments. Consecutive spaces and tabs are
+ // treated as a single separator.
+ // * A space or tab `in_quotes` is taken literally.
+ // * A quote toggles `in_quotes` mode unless it's escaped. An escaped quote is taken literally.
+ // * A quote can be escaped if preceded by an odd number of backslashes.
+ // * If any number of backslashes is immediately followed by a quote then the number of
+ // backslashes is halved (rounding down).
+ // * Backslashes not followed by a quote are all taken literally.
+ // * If `in_quotes` then a quote can also be escaped using another quote
+ // (i.e. two consecutive quotes become one literal quote).
+ let mut cur = Vec::new();
+ let mut in_quotes = false;
+ while let Some(w) = code_units.next() {
+ match w {
+ // If not `in_quotes`, a space or tab ends the argument.
+ SPACE | TAB if !in_quotes => {
+ ret_val.push(OsString::from_wide(&cur[..]));
+ cur.truncate(0);
+
+ // Skip whitespace.
+ code_units.advance_while(|w| w == SPACE || w == TAB);
+ }
+ // Backslashes can escape quotes or backslashes but only if consecutive backslashes are followed by a quote.
+ BACKSLASH => {
+ let backslash_count = code_units.advance_while(|w| w == BACKSLASH) + 1;
+ if code_units.peek() == Some(QUOTE) {
+ cur.extend(iter::repeat(BACKSLASH.get()).take(backslash_count / 2));
+ // The quote is escaped if there are an odd number of backslashes.
+ if backslash_count % 2 == 1 {
+ code_units.next();
+ cur.push(QUOTE.get());
+ }
+ } else {
+ // If there is no quote on the end then there is no escaping.
+ cur.extend(iter::repeat(BACKSLASH.get()).take(backslash_count));
+ }
+ }
+ // If `in_quotes` and not backslash escaped (see above) then a quote either
+ // unsets `in_quote` or is escaped by another quote.
+ QUOTE if in_quotes => match code_units.peek() {
+ // Two consecutive quotes when `in_quotes` produces one literal quote.
+ Some(QUOTE) => {
+ cur.push(QUOTE.get());
+ code_units.next();
+ }
+ // Otherwise set `in_quotes`.
+ Some(_) => in_quotes = false,
+ // The end of the command line.
+ // Push `cur` even if empty, which we do by breaking while `in_quotes` is still set.
+ None => break,
+ },
+ // If not `in_quotes` and not BACKSLASH escaped (see above) then a quote sets `in_quote`.
+ QUOTE => in_quotes = true,
+ // Everything else is always taken literally.
+ _ => cur.push(w.get()),
+ }
+ }
+ // Push the final argument, if any.
+ if !cur.is_empty() || in_quotes {
+ ret_val.push(OsString::from_wide(&cur[..]));
+ }
+ ret_val
+}
+
+pub struct Args {
+ parsed_args_list: vec::IntoIter<OsString>,
+}
+
+impl fmt::Debug for Args {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.parsed_args_list.as_slice().fmt(f)
+ }
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.parsed_args_list.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.parsed_args_list.size_hint()
+ }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.parsed_args_list.next_back()
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize {
+ self.parsed_args_list.len()
+ }
+}
+
+/// A safe iterator over a LPWSTR
+/// (aka a pointer to a series of UTF-16 code units terminated by a NULL).
+struct WStrUnits<'a> {
+ // The pointer must never be null...
+ lpwstr: NonNull<u16>,
+ // ...and the memory it points to must be valid for this lifetime.
+ lifetime: PhantomData<&'a [u16]>,
+}
+impl WStrUnits<'_> {
+ /// Create the iterator. Returns `None` if `lpwstr` is null.
+ ///
+ /// SAFETY: `lpwstr` must point to a null-terminated wide string that lives
+ /// at least as long as the lifetime of this struct.
+ unsafe fn new(lpwstr: *const u16) -> Option<Self> {
+ Some(Self { lpwstr: NonNull::new(lpwstr as _)?, lifetime: PhantomData })
+ }
+ fn peek(&self) -> Option<NonZeroU16> {
+ // SAFETY: It's always safe to read the current item because we don't
+ // ever move out of the array's bounds.
+ unsafe { NonZeroU16::new(*self.lpwstr.as_ptr()) }
+ }
+ /// Advance the iterator while `predicate` returns true.
+ /// Returns the number of items it advanced by.
+ fn advance_while<P: FnMut(NonZeroU16) -> bool>(&mut self, mut predicate: P) -> usize {
+ let mut counter = 0;
+ while let Some(w) = self.peek() {
+ if !predicate(w) {
+ break;
+ }
+ counter += 1;
+ self.next();
+ }
+ counter
+ }
+}
+impl Iterator for WStrUnits<'_> {
+ // This can never return zero as that marks the end of the string.
+ type Item = NonZeroU16;
+ fn next(&mut self) -> Option<NonZeroU16> {
+ // SAFETY: If NULL is reached we immediately return.
+ // Therefore it's safe to advance the pointer after that.
+ unsafe {
+ let next = self.peek()?;
+ self.lpwstr = NonNull::new_unchecked(self.lpwstr.as_ptr().add(1));
+ Some(next)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(crate) enum Arg {
+ /// Add quotes (if needed)
+ Regular(OsString),
+ /// Append raw string without quoting
+ Raw(OsString),
+}
+
+enum Quote {
+ // Every arg is quoted
+ Always,
+ // Whitespace and empty args are quoted
+ Auto,
+ // Arg appended without any changes (#29494)
+ Never,
+}
+
+pub(crate) fn append_arg(cmd: &mut Vec<u16>, arg: &Arg, force_quotes: bool) -> io::Result<()> {
+ let (arg, quote) = match arg {
+ Arg::Regular(arg) => (arg, if force_quotes { Quote::Always } else { Quote::Auto }),
+ Arg::Raw(arg) => (arg, Quote::Never),
+ };
+
+ // If an argument has 0 characters then we need to quote it to ensure
+ // that it actually gets passed through on the command line or otherwise
+ // it will be dropped entirely when parsed on the other end.
+ ensure_no_nuls(arg)?;
+ let arg_bytes = arg.bytes();
+ let (quote, escape) = match quote {
+ Quote::Always => (true, true),
+ Quote::Auto => {
+ (arg_bytes.iter().any(|c| *c == b' ' || *c == b'\t') || arg_bytes.is_empty(), true)
+ }
+ Quote::Never => (false, false),
+ };
+ if quote {
+ cmd.push('"' as u16);
+ }
+
+ let mut backslashes: usize = 0;
+ for x in arg.encode_wide() {
+ if escape {
+ if x == '\\' as u16 {
+ backslashes += 1;
+ } else {
+ if x == '"' as u16 {
+ // Add n+1 backslashes to total 2n+1 before internal '"'.
+ cmd.extend((0..=backslashes).map(|_| '\\' as u16));
+ }
+ backslashes = 0;
+ }
+ }
+ cmd.push(x);
+ }
+
+ if quote {
+ // Add n backslashes to total 2n before ending '"'.
+ cmd.extend((0..backslashes).map(|_| '\\' as u16));
+ cmd.push('"' as u16);
+ }
+ Ok(())
+}
+
+pub(crate) fn make_bat_command_line(
+ script: &[u16],
+ args: &[Arg],
+ force_quotes: bool,
+) -> io::Result<Vec<u16>> {
+ // Set the start of the command line to `cmd.exe /c "`
+ // It is necessary to surround the command in an extra pair of quotes,
+ // hence the trailing quote here. It will be closed after all arguments
+ // have been added.
+ let mut cmd: Vec<u16> = "cmd.exe /c \"".encode_utf16().collect();
+
+ // Push the script name surrounded by its quote pair.
+ cmd.push(b'"' as u16);
+ // Windows file names cannot contain a `"` character or end with `\\`.
+ // If the script name does then return an error.
+ if script.contains(&(b'"' as u16)) || script.last() == Some(&(b'\\' as u16)) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "Windows file names may not contain `\"` or end with `\\`"
+ ));
+ }
+ cmd.extend_from_slice(script.strip_suffix(&[0]).unwrap_or(script));
+ cmd.push(b'"' as u16);
+
+ // Append the arguments.
+ // FIXME: This needs tests to ensure that the arguments are properly
+ // reconstructed by the batch script by default.
+ for arg in args {
+ cmd.push(' ' as u16);
+ append_arg(&mut cmd, arg, force_quotes)?;
+ }
+
+ // Close the quote we left opened earlier.
+ cmd.push(b'"' as u16);
+
+ Ok(cmd)
+}
+
+/// Takes a path and tries to return a non-verbatim path.
+///
+/// This is necessary because cmd.exe does not support verbatim paths.
+pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
+ use crate::ptr;
+ use crate::sys::windows::fill_utf16_buf;
+
+ // UTF-16 encoded code points, used in parsing and building UTF-16 paths.
+ // All of these are in the ASCII range so they can be cast directly to `u16`.
+ const SEP: u16 = b'\\' as _;
+ const QUERY: u16 = b'?' as _;
+ const COLON: u16 = b':' as _;
+ const U: u16 = b'U' as _;
+ const N: u16 = b'N' as _;
+ const C: u16 = b'C' as _;
+
+ // Early return if the path is too long to remove the verbatim prefix.
+ const LEGACY_MAX_PATH: usize = 260;
+ if path.len() > LEGACY_MAX_PATH {
+ return Ok(path);
+ }
+
+ match &path[..] {
+ // `\\?\C:\...` => `C:\...`
+ [SEP, SEP, QUERY, SEP, _, COLON, SEP, ..] => unsafe {
+ let lpfilename = path[4..].as_ptr();
+ fill_utf16_buf(
+ |buffer, size| c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()),
+ |full_path: &[u16]| {
+ if full_path == &path[4..path.len() - 1] { full_path.into() } else { path }
+ },
+ )
+ },
+ // `\\?\UNC\...` => `\\...`
+ [SEP, SEP, QUERY, SEP, U, N, C, SEP, ..] => unsafe {
+ // Change the `C` in `UNC\` to `\` so we can get a slice that starts with `\\`.
+ path[6] = b'\\' as u16;
+ let lpfilename = path[6..].as_ptr();
+ fill_utf16_buf(
+ |buffer, size| c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()),
+ |full_path: &[u16]| {
+ if full_path == &path[6..path.len() - 1] {
+ full_path.into()
+ } else {
+ // Restore the 'C' in "UNC".
+ path[6] = b'C' as u16;
+ path
+ }
+ },
+ )
+ },
+ // For everything else, leave the path unchanged.
+ _ => Ok(path),
+ }
+}
diff --git a/library/std/src/sys/windows/args/tests.rs b/library/std/src/sys/windows/args/tests.rs
new file mode 100644
index 000000000..82c32d08c
--- /dev/null
+++ b/library/std/src/sys/windows/args/tests.rs
@@ -0,0 +1,91 @@
+use crate::ffi::OsString;
+use crate::sys::windows::args::*;
+
+fn chk(string: &str, parts: &[&str]) {
+ let mut wide: Vec<u16> = OsString::from(string).encode_wide().collect();
+ wide.push(0);
+ let parsed =
+ unsafe { parse_lp_cmd_line(WStrUnits::new(wide.as_ptr()), || OsString::from("TEST.EXE")) };
+ let expected: Vec<OsString> = parts.iter().map(|k| OsString::from(k)).collect();
+ assert_eq!(parsed.as_slice(), expected.as_slice(), "{:?}", string);
+}
+
+#[test]
+fn empty() {
+ chk("", &["TEST.EXE"]);
+ chk("\0", &["TEST.EXE"]);
+}
+
+#[test]
+fn single_words() {
+ chk("EXE one_word", &["EXE", "one_word"]);
+ chk("EXE a", &["EXE", "a"]);
+ chk("EXE 😅", &["EXE", "😅"]);
+ chk("EXE 😅🤦", &["EXE", "😅🤦"]);
+}
+
+#[test]
+fn official_examples() {
+ chk(r#"EXE "abc" d e"#, &["EXE", "abc", "d", "e"]);
+ chk(r#"EXE a\\\b d"e f"g h"#, &["EXE", r"a\\\b", "de fg", "h"]);
+ chk(r#"EXE a\\\"b c d"#, &["EXE", r#"a\"b"#, "c", "d"]);
+ chk(r#"EXE a\\\\"b c" d e"#, &["EXE", r"a\\b c", "d", "e"]);
+}
+
+#[test]
+fn whitespace_behavior() {
+ chk(" test", &["", "test"]);
+ chk(" test", &["", "test"]);
+ chk(" test test2", &["", "test", "test2"]);
+ chk(" test test2", &["", "test", "test2"]);
+ chk("test test2 ", &["test", "test2"]);
+ chk("test test2 ", &["test", "test2"]);
+ chk("test ", &["test"]);
+}
+
+#[test]
+fn genius_quotes() {
+ chk(r#"EXE "" """#, &["EXE", "", ""]);
+ chk(r#"EXE "" """"#, &["EXE", "", r#"""#]);
+ chk(
+ r#"EXE "this is """all""" in the same argument""#,
+ &["EXE", r#"this is "all" in the same argument"#],
+ );
+ chk(r#"EXE "a"""#, &["EXE", r#"a""#]);
+ chk(r#"EXE "a"" a"#, &["EXE", r#"a" a"#]);
+ // quotes cannot be escaped in command names
+ chk(r#""EXE" check"#, &["EXE", "check"]);
+ chk(r#""EXE check""#, &["EXE check"]);
+ chk(r#""EXE """for""" check"#, &["EXE for check"]);
+ chk(r#""EXE \"for\" check"#, &[r"EXE \for\ check"]);
+ chk(r#""EXE \" for \" check"#, &[r"EXE \", "for", r#"""#, "check"]);
+ chk(r#"E"X"E test"#, &["EXE", "test"]);
+ chk(r#"EX""E test"#, &["EXE", "test"]);
+}
+
+// from https://daviddeley.com/autohotkey/parameters/parameters.htm#WINCRULESEX
+#[test]
+fn post_2008() {
+ chk("EXE CallMeIshmael", &["EXE", "CallMeIshmael"]);
+ chk(r#"EXE "Call Me Ishmael""#, &["EXE", "Call Me Ishmael"]);
+ chk(r#"EXE Cal"l Me I"shmael"#, &["EXE", "Call Me Ishmael"]);
+ chk(r#"EXE CallMe\"Ishmael"#, &["EXE", r#"CallMe"Ishmael"#]);
+ chk(r#"EXE "CallMe\"Ishmael""#, &["EXE", r#"CallMe"Ishmael"#]);
+ chk(r#"EXE "Call Me Ishmael\\""#, &["EXE", r"Call Me Ishmael\"]);
+ chk(r#"EXE "CallMe\\\"Ishmael""#, &["EXE", r#"CallMe\"Ishmael"#]);
+ chk(r#"EXE a\\\b"#, &["EXE", r"a\\\b"]);
+ chk(r#"EXE "a\\\b""#, &["EXE", r"a\\\b"]);
+ chk(r#"EXE "\"Call Me Ishmael\"""#, &["EXE", r#""Call Me Ishmael""#]);
+ chk(r#"EXE "C:\TEST A\\""#, &["EXE", r"C:\TEST A\"]);
+ chk(r#"EXE "\"C:\TEST A\\\"""#, &["EXE", r#""C:\TEST A\""#]);
+ chk(r#"EXE "a b c" d e"#, &["EXE", "a b c", "d", "e"]);
+ chk(r#"EXE "ab\"c" "\\" d"#, &["EXE", r#"ab"c"#, r"\", "d"]);
+ chk(r#"EXE a\\\b d"e f"g h"#, &["EXE", r"a\\\b", "de fg", "h"]);
+ chk(r#"EXE a\\\"b c d"#, &["EXE", r#"a\"b"#, "c", "d"]);
+ chk(r#"EXE a\\\\"b c" d e"#, &["EXE", r"a\\b c", "d", "e"]);
+ // Double Double Quotes
+ chk(r#"EXE "a b c"""#, &["EXE", r#"a b c""#]);
+ chk(r#"EXE """CallMeIshmael""" b c"#, &["EXE", r#""CallMeIshmael""#, "b", "c"]);
+ chk(r#"EXE """Call Me Ishmael""""#, &["EXE", r#""Call Me Ishmael""#]);
+ chk(r#"EXE """"Call Me Ishmael"" b c"#, &["EXE", r#""Call"#, "Me", "Ishmael", "b", "c"]);
+}
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
new file mode 100644
index 000000000..478068c73
--- /dev/null
+++ b/library/std/src/sys/windows/c.rs
@@ -0,0 +1,1340 @@
+//! C definitions used by libnative that don't belong in liblibc
+
+#![allow(nonstandard_style)]
+#![cfg_attr(test, allow(dead_code))]
+#![unstable(issue = "none", feature = "windows_c")]
+
+use crate::ffi::CStr;
+use crate::mem;
+use crate::os::raw::{c_char, c_int, c_long, c_longlong, c_uint, c_ulong, c_ushort};
+use crate::os::windows::io::{BorrowedHandle, HandleOrInvalid, HandleOrNull};
+use crate::ptr;
+use core::ffi::NonZero_c_ulong;
+
+use libc::{c_void, size_t, wchar_t};
+
+#[path = "c/errors.rs"] // c.rs is included from two places so we need to specify this
+mod errors;
+pub use errors::*;
+
+pub use self::EXCEPTION_DISPOSITION::*;
+pub use self::FILE_INFO_BY_HANDLE_CLASS::*;
+
+pub type DWORD_PTR = ULONG_PTR;
+pub type DWORD = c_ulong;
+pub type NonZeroDWORD = NonZero_c_ulong;
+pub type HANDLE = LPVOID;
+pub type HINSTANCE = HANDLE;
+pub type HMODULE = HINSTANCE;
+pub type HRESULT = LONG;
+pub type BOOL = c_int;
+pub type BYTE = u8;
+pub type BOOLEAN = BYTE;
+pub type GROUP = c_uint;
+pub type LARGE_INTEGER = c_longlong;
+pub type LONG = c_long;
+pub type UINT = c_uint;
+pub type WCHAR = u16;
+pub type USHORT = c_ushort;
+pub type SIZE_T = usize;
+pub type WORD = u16;
+pub type CHAR = c_char;
+pub type CCHAR = c_char;
+pub type ULONG_PTR = usize;
+pub type ULONG = c_ulong;
+pub type NTSTATUS = LONG;
+pub type ACCESS_MASK = DWORD;
+
+pub type LPBOOL = *mut BOOL;
+pub type LPBYTE = *mut BYTE;
+pub type LPCSTR = *const CHAR;
+pub type LPCWSTR = *const WCHAR;
+pub type LPDWORD = *mut DWORD;
+pub type LPHANDLE = *mut HANDLE;
+pub type LPOVERLAPPED = *mut OVERLAPPED;
+pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION;
+pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
+pub type LPSTARTUPINFO = *mut STARTUPINFO;
+pub type LPVOID = *mut c_void;
+pub type LPWCH = *mut WCHAR;
+pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
+pub type LPWSADATA = *mut WSADATA;
+pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO;
+pub type LPWSTR = *mut WCHAR;
+pub type LPFILETIME = *mut FILETIME;
+pub type LPSYSTEM_INFO = *mut SYSTEM_INFO;
+pub type LPWSABUF = *mut WSABUF;
+pub type LPWSAOVERLAPPED = *mut c_void;
+pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void;
+
+pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
+pub type PLARGE_INTEGER = *mut c_longlong;
+pub type PSRWLOCK = *mut SRWLOCK;
+
+pub type SOCKET = crate::os::windows::raw::SOCKET;
+pub type socklen_t = c_int;
+pub type ADDRESS_FAMILY = USHORT;
+
+pub const TRUE: BOOL = 1;
+pub const FALSE: BOOL = 0;
+
+pub const CSTR_LESS_THAN: c_int = 1;
+pub const CSTR_EQUAL: c_int = 2;
+pub const CSTR_GREATER_THAN: c_int = 3;
+
+pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x1;
+pub const FILE_ATTRIBUTE_DIRECTORY: DWORD = 0x10;
+pub const FILE_ATTRIBUTE_REPARSE_POINT: DWORD = 0x400;
+pub const INVALID_FILE_ATTRIBUTES: DWORD = DWORD::MAX;
+
+pub const FILE_SHARE_DELETE: DWORD = 0x4;
+pub const FILE_SHARE_READ: DWORD = 0x1;
+pub const FILE_SHARE_WRITE: DWORD = 0x2;
+
+pub const FILE_OPEN: ULONG = 0x00000001;
+pub const FILE_OPEN_REPARSE_POINT: ULONG = 0x200000;
+pub const OBJ_DONT_REPARSE: ULONG = 0x1000;
+
+pub const CREATE_ALWAYS: DWORD = 2;
+pub const CREATE_NEW: DWORD = 1;
+pub const OPEN_ALWAYS: DWORD = 4;
+pub const OPEN_EXISTING: DWORD = 3;
+pub const TRUNCATE_EXISTING: DWORD = 5;
+
+pub const FILE_LIST_DIRECTORY: DWORD = 0x1;
+pub const FILE_WRITE_DATA: DWORD = 0x00000002;
+pub const FILE_APPEND_DATA: DWORD = 0x00000004;
+pub const FILE_WRITE_EA: DWORD = 0x00000010;
+pub const FILE_WRITE_ATTRIBUTES: DWORD = 0x00000100;
+pub const DELETE: DWORD = 0x10000;
+pub const READ_CONTROL: DWORD = 0x00020000;
+pub const SYNCHRONIZE: DWORD = 0x00100000;
+pub const GENERIC_READ: DWORD = 0x80000000;
+pub const GENERIC_WRITE: DWORD = 0x40000000;
+pub const STANDARD_RIGHTS_WRITE: DWORD = READ_CONTROL;
+pub const FILE_GENERIC_WRITE: DWORD = STANDARD_RIGHTS_WRITE
+ | FILE_WRITE_DATA
+ | FILE_WRITE_ATTRIBUTES
+ | FILE_WRITE_EA
+ | FILE_APPEND_DATA
+ | SYNCHRONIZE;
+
+pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000;
+pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000;
+pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
+
+pub const FIONBIO: c_ulong = 0x8004667e;
+
+#[repr(C)]
+#[derive(Copy)]
+pub struct WIN32_FIND_DATAW {
+ pub dwFileAttributes: DWORD,
+ pub ftCreationTime: FILETIME,
+ pub ftLastAccessTime: FILETIME,
+ pub ftLastWriteTime: FILETIME,
+ pub nFileSizeHigh: DWORD,
+ pub nFileSizeLow: DWORD,
+ pub dwReserved0: DWORD,
+ pub dwReserved1: DWORD,
+ pub cFileName: [wchar_t; 260], // #define MAX_PATH 260
+ pub cAlternateFileName: [wchar_t; 14],
+}
+impl Clone for WIN32_FIND_DATAW {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+pub const WSA_FLAG_OVERLAPPED: DWORD = 0x01;
+pub const WSA_FLAG_NO_HANDLE_INHERIT: DWORD = 0x80;
+
+pub const WSADESCRIPTION_LEN: usize = 256;
+pub const WSASYS_STATUS_LEN: usize = 128;
+pub const WSAPROTOCOL_LEN: DWORD = 255;
+pub const INVALID_SOCKET: SOCKET = !0;
+
+pub const MAX_PROTOCOL_CHAIN: DWORD = 7;
+
+pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: usize = 16 * 1024;
+pub const FSCTL_GET_REPARSE_POINT: DWORD = 0x900a8;
+pub const IO_REPARSE_TAG_SYMLINK: DWORD = 0xa000000c;
+pub const IO_REPARSE_TAG_MOUNT_POINT: DWORD = 0xa0000003;
+pub const SYMLINK_FLAG_RELATIVE: DWORD = 0x00000001;
+pub const FSCTL_SET_REPARSE_POINT: DWORD = 0x900a4;
+
+pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
+pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
+
+// Note that these are not actually HANDLEs, just values to pass to GetStdHandle
+pub const STD_INPUT_HANDLE: DWORD = -10i32 as DWORD;
+pub const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
+pub const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
+
+pub const PROGRESS_CONTINUE: DWORD = 0;
+
+pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT;
+
+pub const INVALID_HANDLE_VALUE: HANDLE = ptr::invalid_mut(!0);
+
+pub const FACILITY_NT_BIT: DWORD = 0x1000_0000;
+
+pub const FORMAT_MESSAGE_FROM_SYSTEM: DWORD = 0x00001000;
+pub const FORMAT_MESSAGE_FROM_HMODULE: DWORD = 0x00000800;
+pub const FORMAT_MESSAGE_IGNORE_INSERTS: DWORD = 0x00000200;
+
+pub const TLS_OUT_OF_INDEXES: DWORD = 0xFFFFFFFF;
+
+pub const DLL_THREAD_DETACH: DWORD = 3;
+pub const DLL_PROCESS_DETACH: DWORD = 0;
+
+pub const INFINITE: DWORD = !0;
+
+pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
+
+pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() };
+pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() };
+
+pub const DETACHED_PROCESS: DWORD = 0x00000008;
+pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200;
+pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400;
+pub const STARTF_USESTDHANDLES: DWORD = 0x00000100;
+
+pub const AF_INET: c_int = 2;
+pub const AF_INET6: c_int = 23;
+pub const SD_BOTH: c_int = 2;
+pub const SD_RECEIVE: c_int = 0;
+pub const SD_SEND: c_int = 1;
+pub const SOCK_DGRAM: c_int = 2;
+pub const SOCK_STREAM: c_int = 1;
+pub const SOCKET_ERROR: c_int = -1;
+pub const SOL_SOCKET: c_int = 0xffff;
+pub const SO_LINGER: c_int = 0x0080;
+pub const SO_RCVTIMEO: c_int = 0x1006;
+pub const SO_SNDTIMEO: c_int = 0x1005;
+pub const IPPROTO_IP: c_int = 0;
+pub const IPPROTO_TCP: c_int = 6;
+pub const IPPROTO_IPV6: c_int = 41;
+pub const TCP_NODELAY: c_int = 0x0001;
+pub const IP_TTL: c_int = 4;
+pub const IPV6_V6ONLY: c_int = 27;
+pub const SO_ERROR: c_int = 0x1007;
+pub const SO_BROADCAST: c_int = 0x0020;
+pub const IP_MULTICAST_LOOP: c_int = 11;
+pub const IPV6_MULTICAST_LOOP: c_int = 11;
+pub const IP_MULTICAST_TTL: c_int = 10;
+pub const IP_ADD_MEMBERSHIP: c_int = 12;
+pub const IP_DROP_MEMBERSHIP: c_int = 13;
+pub const IPV6_ADD_MEMBERSHIP: c_int = 12;
+pub const IPV6_DROP_MEMBERSHIP: c_int = 13;
+pub const MSG_PEEK: c_int = 0x2;
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct linger {
+ pub l_onoff: c_ushort,
+ pub l_linger: c_ushort,
+}
+
+#[repr(C)]
+pub struct ip_mreq {
+ pub imr_multiaddr: in_addr,
+ pub imr_interface: in_addr,
+}
+
+#[repr(C)]
+pub struct ipv6_mreq {
+ pub ipv6mr_multiaddr: in6_addr,
+ pub ipv6mr_interface: c_uint,
+}
+
+pub const VOLUME_NAME_DOS: DWORD = 0x0;
+pub const MOVEFILE_REPLACE_EXISTING: DWORD = 1;
+
+pub const FILE_BEGIN: DWORD = 0;
+pub const FILE_CURRENT: DWORD = 1;
+pub const FILE_END: DWORD = 2;
+
+pub const WAIT_OBJECT_0: DWORD = 0x00000000;
+pub const WAIT_TIMEOUT: DWORD = 258;
+pub const WAIT_FAILED: DWORD = 0xFFFFFFFF;
+
+pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001;
+pub const PIPE_ACCESS_OUTBOUND: DWORD = 0x00000002;
+pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000;
+pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000;
+pub const PIPE_WAIT: DWORD = 0x00000000;
+pub const PIPE_TYPE_BYTE: DWORD = 0x00000000;
+pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008;
+pub const PIPE_READMODE_BYTE: DWORD = 0x00000000;
+
+pub const FD_SETSIZE: usize = 64;
+
+pub const STACK_SIZE_PARAM_IS_A_RESERVATION: DWORD = 0x00010000;
+
+pub const STATUS_SUCCESS: NTSTATUS = 0x00000000;
+pub const STATUS_DELETE_PENDING: NTSTATUS = 0xc0000056_u32 as _;
+pub const STATUS_INVALID_PARAMETER: NTSTATUS = 0xc000000d_u32 as _;
+
+pub const STATUS_PENDING: NTSTATUS = 0x103 as _;
+pub const STATUS_END_OF_FILE: NTSTATUS = 0xC0000011_u32 as _;
+pub const STATUS_NOT_IMPLEMENTED: NTSTATUS = 0xC0000002_u32 as _;
+
+// Equivalent to the `NT_SUCCESS` C preprocessor macro.
+// See: https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/using-ntstatus-values
+pub fn nt_success(status: NTSTATUS) -> bool {
+ status >= 0
+}
+
+pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002;
+
+#[repr(C)]
+pub struct UNICODE_STRING {
+ pub Length: u16,
+ pub MaximumLength: u16,
+ pub Buffer: *mut u16,
+}
+impl UNICODE_STRING {
+ pub fn from_ref(slice: &[u16]) -> Self {
+ let len = slice.len() * mem::size_of::<u16>();
+ Self { Length: len as _, MaximumLength: len as _, Buffer: slice.as_ptr() as _ }
+ }
+}
+#[repr(C)]
+pub struct OBJECT_ATTRIBUTES {
+ pub Length: ULONG,
+ pub RootDirectory: HANDLE,
+ pub ObjectName: *const UNICODE_STRING,
+ pub Attributes: ULONG,
+ pub SecurityDescriptor: *mut c_void,
+ pub SecurityQualityOfService: *mut c_void,
+}
+impl Default for OBJECT_ATTRIBUTES {
+ fn default() -> Self {
+ Self {
+ Length: mem::size_of::<Self>() as _,
+ RootDirectory: ptr::null_mut(),
+ ObjectName: ptr::null_mut(),
+ Attributes: 0,
+ SecurityDescriptor: ptr::null_mut(),
+ SecurityQualityOfService: ptr::null_mut(),
+ }
+ }
+}
+#[repr(C)]
+union IO_STATUS_BLOCK_union {
+ Status: NTSTATUS,
+ Pointer: *mut c_void,
+}
+impl Default for IO_STATUS_BLOCK_union {
+ fn default() -> Self {
+ let mut this = Self { Pointer: ptr::null_mut() };
+ this.Status = STATUS_PENDING;
+ this
+ }
+}
+#[repr(C)]
+#[derive(Default)]
+pub struct IO_STATUS_BLOCK {
+ u: IO_STATUS_BLOCK_union,
+ pub Information: usize,
+}
+impl IO_STATUS_BLOCK {
+ pub fn status(&self) -> NTSTATUS {
+ // SAFETY: If `self.u.Status` was set then this is obviously safe.
+ // If `self.u.Pointer` was set then this is the equivalent to converting
+ // the pointer to an integer, which is also safe.
+ // Currently the only safe way to construct `IO_STATUS_BLOCK` outside of
+ // this module is to call the `default` method, which sets the `Status`.
+ unsafe { self.u.Status }
+ }
+}
+
+pub type LPOVERLAPPED_COMPLETION_ROUTINE = unsafe extern "system" fn(
+ dwErrorCode: DWORD,
+ dwNumberOfBytesTransfered: DWORD,
+ lpOverlapped: *mut OVERLAPPED,
+);
+
+type IO_APC_ROUTINE = unsafe extern "system" fn(
+ ApcContext: *mut c_void,
+ IoStatusBlock: *mut IO_STATUS_BLOCK,
+ Reserved: ULONG,
+);
+
+#[repr(C)]
+#[cfg(not(target_pointer_width = "64"))]
+pub struct WSADATA {
+ pub wVersion: WORD,
+ pub wHighVersion: WORD,
+ pub szDescription: [u8; WSADESCRIPTION_LEN + 1],
+ pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1],
+ pub iMaxSockets: u16,
+ pub iMaxUdpDg: u16,
+ pub lpVendorInfo: *mut u8,
+}
+#[repr(C)]
+#[cfg(target_pointer_width = "64")]
+pub struct WSADATA {
+ pub wVersion: WORD,
+ pub wHighVersion: WORD,
+ pub iMaxSockets: u16,
+ pub iMaxUdpDg: u16,
+ pub lpVendorInfo: *mut u8,
+ pub szDescription: [u8; WSADESCRIPTION_LEN + 1],
+ pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1],
+}
+
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub struct WSABUF {
+ pub len: ULONG,
+ pub buf: *mut CHAR,
+}
+
+#[repr(C)]
+pub struct WSAPROTOCOL_INFO {
+ pub dwServiceFlags1: DWORD,
+ pub dwServiceFlags2: DWORD,
+ pub dwServiceFlags3: DWORD,
+ pub dwServiceFlags4: DWORD,
+ pub dwProviderFlags: DWORD,
+ pub ProviderId: GUID,
+ pub dwCatalogEntryId: DWORD,
+ pub ProtocolChain: WSAPROTOCOLCHAIN,
+ pub iVersion: c_int,
+ pub iAddressFamily: c_int,
+ pub iMaxSockAddr: c_int,
+ pub iMinSockAddr: c_int,
+ pub iSocketType: c_int,
+ pub iProtocol: c_int,
+ pub iProtocolMaxOffset: c_int,
+ pub iNetworkByteOrder: c_int,
+ pub iSecurityScheme: c_int,
+ pub dwMessageSize: DWORD,
+ pub dwProviderReserved: DWORD,
+ pub szProtocol: [u16; (WSAPROTOCOL_LEN as usize) + 1],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct WIN32_FILE_ATTRIBUTE_DATA {
+ pub dwFileAttributes: DWORD,
+ pub ftCreationTime: FILETIME,
+ pub ftLastAccessTime: FILETIME,
+ pub ftLastWriteTime: FILETIME,
+ pub nFileSizeHigh: DWORD,
+ pub nFileSizeLow: DWORD,
+}
+
+#[repr(C)]
+#[allow(dead_code)] // we only use some variants
+pub enum FILE_INFO_BY_HANDLE_CLASS {
+ FileBasicInfo = 0,
+ FileStandardInfo = 1,
+ FileNameInfo = 2,
+ FileRenameInfo = 3,
+ FileDispositionInfo = 4,
+ FileAllocationInfo = 5,
+ FileEndOfFileInfo = 6,
+ FileStreamInfo = 7,
+ FileCompressionInfo = 8,
+ FileAttributeTagInfo = 9,
+ FileIdBothDirectoryInfo = 10, // 0xA
+ FileIdBothDirectoryRestartInfo = 11, // 0xB
+ FileIoPriorityHintInfo = 12, // 0xC
+ FileRemoteProtocolInfo = 13, // 0xD
+ FileFullDirectoryInfo = 14, // 0xE
+ FileFullDirectoryRestartInfo = 15, // 0xF
+ FileStorageInfo = 16, // 0x10
+ FileAlignmentInfo = 17, // 0x11
+ FileIdInfo = 18, // 0x12
+ FileIdExtdDirectoryInfo = 19, // 0x13
+ FileIdExtdDirectoryRestartInfo = 20, // 0x14
+ FileDispositionInfoEx = 21, // 0x15, Windows 10 version 1607
+ MaximumFileInfoByHandlesClass,
+}
+
+#[repr(C)]
+pub struct FILE_DISPOSITION_INFO {
+ pub DeleteFile: BOOLEAN,
+}
+
+pub const FILE_DISPOSITION_DELETE: DWORD = 0x1;
+pub const FILE_DISPOSITION_POSIX_SEMANTICS: DWORD = 0x2;
+pub const FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: DWORD = 0x10;
+
+#[repr(C)]
+pub struct FILE_DISPOSITION_INFO_EX {
+ pub Flags: DWORD,
+}
+
+#[repr(C)]
+#[derive(Default)]
+pub struct FILE_ID_BOTH_DIR_INFO {
+ pub NextEntryOffset: DWORD,
+ pub FileIndex: DWORD,
+ pub CreationTime: LARGE_INTEGER,
+ pub LastAccessTime: LARGE_INTEGER,
+ pub LastWriteTime: LARGE_INTEGER,
+ pub ChangeTime: LARGE_INTEGER,
+ pub EndOfFile: LARGE_INTEGER,
+ pub AllocationSize: LARGE_INTEGER,
+ pub FileAttributes: DWORD,
+ pub FileNameLength: DWORD,
+ pub EaSize: DWORD,
+ pub ShortNameLength: CCHAR,
+ pub ShortName: [WCHAR; 12],
+ pub FileId: LARGE_INTEGER,
+ pub FileName: [WCHAR; 1],
+}
+#[repr(C)]
+pub struct FILE_BASIC_INFO {
+ pub CreationTime: LARGE_INTEGER,
+ pub LastAccessTime: LARGE_INTEGER,
+ pub LastWriteTime: LARGE_INTEGER,
+ pub ChangeTime: LARGE_INTEGER,
+ pub FileAttributes: DWORD,
+}
+
+#[repr(C)]
+pub struct FILE_END_OF_FILE_INFO {
+ pub EndOfFile: LARGE_INTEGER,
+}
+
+#[repr(C)]
+pub struct REPARSE_DATA_BUFFER {
+ pub ReparseTag: c_uint,
+ pub ReparseDataLength: c_ushort,
+ pub Reserved: c_ushort,
+ pub rest: (),
+}
+
+#[repr(C)]
+pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
+ pub SubstituteNameOffset: c_ushort,
+ pub SubstituteNameLength: c_ushort,
+ pub PrintNameOffset: c_ushort,
+ pub PrintNameLength: c_ushort,
+ pub Flags: c_ulong,
+ pub PathBuffer: WCHAR,
+}
+
+#[repr(C)]
+pub struct MOUNT_POINT_REPARSE_BUFFER {
+ pub SubstituteNameOffset: c_ushort,
+ pub SubstituteNameLength: c_ushort,
+ pub PrintNameOffset: c_ushort,
+ pub PrintNameLength: c_ushort,
+ pub PathBuffer: WCHAR,
+}
+
+pub type LPPROGRESS_ROUTINE = crate::option::Option<
+ unsafe extern "system" fn(
+ TotalFileSize: LARGE_INTEGER,
+ TotalBytesTransferred: LARGE_INTEGER,
+ StreamSize: LARGE_INTEGER,
+ StreamBytesTransferred: LARGE_INTEGER,
+ dwStreamNumber: DWORD,
+ dwCallbackReason: DWORD,
+ hSourceFile: HANDLE,
+ hDestinationFile: HANDLE,
+ lpData: LPVOID,
+ ) -> DWORD,
+>;
+
+#[repr(C)]
+pub struct CONDITION_VARIABLE {
+ pub ptr: LPVOID,
+}
+#[repr(C)]
+pub struct SRWLOCK {
+ pub ptr: LPVOID,
+}
+
+#[repr(C)]
+pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
+ pub ReparseTag: DWORD,
+ pub ReparseDataLength: DWORD,
+ pub Reserved: WORD,
+ pub ReparseTargetLength: WORD,
+ pub ReparseTargetMaximumLength: WORD,
+ pub Reserved1: WORD,
+ pub ReparseTarget: WCHAR,
+}
+
+#[repr(C)]
+pub struct GUID {
+ pub Data1: DWORD,
+ pub Data2: WORD,
+ pub Data3: WORD,
+ pub Data4: [BYTE; 8],
+}
+
+#[repr(C)]
+pub struct WSAPROTOCOLCHAIN {
+ pub ChainLen: c_int,
+ pub ChainEntries: [DWORD; MAX_PROTOCOL_CHAIN as usize],
+}
+
+#[repr(C)]
+pub struct SECURITY_ATTRIBUTES {
+ pub nLength: DWORD,
+ pub lpSecurityDescriptor: LPVOID,
+ pub bInheritHandle: BOOL,
+}
+
+#[repr(C)]
+pub struct PROCESS_INFORMATION {
+ pub hProcess: HANDLE,
+ pub hThread: HANDLE,
+ pub dwProcessId: DWORD,
+ pub dwThreadId: DWORD,
+}
+
+#[repr(C)]
+pub struct STARTUPINFO {
+ pub cb: DWORD,
+ pub lpReserved: LPWSTR,
+ pub lpDesktop: LPWSTR,
+ pub lpTitle: LPWSTR,
+ pub dwX: DWORD,
+ pub dwY: DWORD,
+ pub dwXSize: DWORD,
+ pub dwYSize: DWORD,
+ pub dwXCountChars: DWORD,
+ pub dwYCountCharts: DWORD,
+ pub dwFillAttribute: DWORD,
+ pub dwFlags: DWORD,
+ pub wShowWindow: WORD,
+ pub cbReserved2: WORD,
+ pub lpReserved2: LPBYTE,
+ pub hStdInput: HANDLE,
+ pub hStdOutput: HANDLE,
+ pub hStdError: HANDLE,
+}
+
+#[repr(C)]
+pub struct SOCKADDR {
+ pub sa_family: ADDRESS_FAMILY,
+ pub sa_data: [CHAR; 14],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FILETIME {
+ pub dwLowDateTime: DWORD,
+ pub dwHighDateTime: DWORD,
+}
+
+#[repr(C)]
+pub struct SYSTEM_INFO {
+ pub wProcessorArchitecture: WORD,
+ pub wReserved: WORD,
+ pub dwPageSize: DWORD,
+ pub lpMinimumApplicationAddress: LPVOID,
+ pub lpMaximumApplicationAddress: LPVOID,
+ pub dwActiveProcessorMask: DWORD_PTR,
+ pub dwNumberOfProcessors: DWORD,
+ pub dwProcessorType: DWORD,
+ pub dwAllocationGranularity: DWORD,
+ pub wProcessorLevel: WORD,
+ pub wProcessorRevision: WORD,
+}
+
+#[repr(C)]
+pub struct OVERLAPPED {
+ pub Internal: *mut c_ulong,
+ pub InternalHigh: *mut c_ulong,
+ pub Offset: DWORD,
+ pub OffsetHigh: DWORD,
+ pub hEvent: HANDLE,
+}
+
+#[repr(C)]
+#[allow(dead_code)] // we only use some variants
+pub enum ADDRESS_MODE {
+ AddrMode1616,
+ AddrMode1632,
+ AddrModeReal,
+ AddrModeFlat,
+}
+
+#[repr(C)]
+pub struct SOCKADDR_STORAGE_LH {
+ pub ss_family: ADDRESS_FAMILY,
+ pub __ss_pad1: [CHAR; 6],
+ pub __ss_align: i64,
+ pub __ss_pad2: [CHAR; 112],
+}
+
+#[repr(C)]
+pub struct ADDRINFOA {
+ pub ai_flags: c_int,
+ pub ai_family: c_int,
+ pub ai_socktype: c_int,
+ pub ai_protocol: c_int,
+ pub ai_addrlen: size_t,
+ pub ai_canonname: *mut c_char,
+ pub ai_addr: *mut SOCKADDR,
+ pub ai_next: *mut ADDRINFOA,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct sockaddr_in {
+ pub sin_family: ADDRESS_FAMILY,
+ pub sin_port: USHORT,
+ pub sin_addr: in_addr,
+ pub sin_zero: [CHAR; 8],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct sockaddr_in6 {
+ pub sin6_family: ADDRESS_FAMILY,
+ pub sin6_port: USHORT,
+ pub sin6_flowinfo: c_ulong,
+ pub sin6_addr: in6_addr,
+ pub sin6_scope_id: c_ulong,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct in_addr {
+ pub s_addr: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct in6_addr {
+ pub s6_addr: [u8; 16],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+#[allow(dead_code)] // we only use some variants
+pub enum EXCEPTION_DISPOSITION {
+ ExceptionContinueExecution,
+ ExceptionContinueSearch,
+ ExceptionNestedException,
+ ExceptionCollidedUnwind,
+}
+
+#[repr(C)]
+#[derive(Copy)]
+pub struct fd_set {
+ pub fd_count: c_uint,
+ pub fd_array: [SOCKET; FD_SETSIZE],
+}
+
+impl Clone for fd_set {
+ fn clone(&self) -> fd_set {
+ *self
+ }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct timeval {
+ pub tv_sec: c_long,
+ pub tv_usec: c_long,
+}
+
+// Desktop specific functions & types
+cfg_if::cfg_if! {
+if #[cfg(not(target_vendor = "uwp"))] {
+ pub const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
+ pub const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd;
+ pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15;
+
+ #[repr(C)]
+ pub struct EXCEPTION_RECORD {
+ pub ExceptionCode: DWORD,
+ pub ExceptionFlags: DWORD,
+ pub ExceptionRecord: *mut EXCEPTION_RECORD,
+ pub ExceptionAddress: LPVOID,
+ pub NumberParameters: DWORD,
+ pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS],
+ }
+
+ pub enum CONTEXT {}
+
+ #[repr(C)]
+ pub struct EXCEPTION_POINTERS {
+ pub ExceptionRecord: *mut EXCEPTION_RECORD,
+ pub ContextRecord: *mut CONTEXT,
+ }
+
+ pub type PVECTORED_EXCEPTION_HANDLER =
+ extern "system" fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
+
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct CONSOLE_READCONSOLE_CONTROL {
+ pub nLength: ULONG,
+ pub nInitialChars: ULONG,
+ pub dwCtrlWakeupMask: ULONG,
+ pub dwControlKeyState: ULONG,
+ }
+
+ pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
+
+ #[repr(C)]
+ pub struct BY_HANDLE_FILE_INFORMATION {
+ pub dwFileAttributes: DWORD,
+ pub ftCreationTime: FILETIME,
+ pub ftLastAccessTime: FILETIME,
+ pub ftLastWriteTime: FILETIME,
+ pub dwVolumeSerialNumber: DWORD,
+ pub nFileSizeHigh: DWORD,
+ pub nFileSizeLow: DWORD,
+ pub nNumberOfLinks: DWORD,
+ pub nFileIndexHigh: DWORD,
+ pub nFileIndexLow: DWORD,
+ }
+
+ pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
+ pub type LPCVOID = *const c_void;
+
+ pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001;
+
+ pub const TOKEN_READ: DWORD = 0x20008;
+
+ #[link(name = "advapi32")]
+ extern "system" {
+ // Forbidden when targeting UWP
+ #[link_name = "SystemFunction036"]
+ pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN;
+
+ // Allowed but unused by UWP
+ pub fn OpenProcessToken(
+ ProcessHandle: HANDLE,
+ DesiredAccess: DWORD,
+ TokenHandle: *mut HANDLE,
+ ) -> BOOL;
+ }
+
+ #[link(name = "userenv")]
+ extern "system" {
+ // Allowed but unused by UWP
+ pub fn GetUserProfileDirectoryW(
+ hToken: HANDLE,
+ lpProfileDir: LPWSTR,
+ lpcchSize: *mut DWORD,
+ ) -> BOOL;
+ }
+
+ #[link(name = "kernel32")]
+ extern "system" {
+ // Functions forbidden when targeting UWP
+ pub fn ReadConsoleW(
+ hConsoleInput: HANDLE,
+ lpBuffer: LPVOID,
+ nNumberOfCharsToRead: DWORD,
+ lpNumberOfCharsRead: LPDWORD,
+ pInputControl: PCONSOLE_READCONSOLE_CONTROL,
+ ) -> BOOL;
+
+ pub fn WriteConsoleW(
+ hConsoleOutput: HANDLE,
+ lpBuffer: LPCVOID,
+ nNumberOfCharsToWrite: DWORD,
+ lpNumberOfCharsWritten: LPDWORD,
+ lpReserved: LPVOID,
+ ) -> BOOL;
+
+ pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
+ // Allowed but unused by UWP
+ pub fn GetFileInformationByHandle(
+ hFile: HANDLE,
+ lpFileInformation: LPBY_HANDLE_FILE_INFORMATION,
+ ) -> BOOL;
+ pub fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) -> BOOL;
+ pub fn AddVectoredExceptionHandler(
+ FirstHandler: ULONG,
+ VectoredHandler: PVECTORED_EXCEPTION_HANDLER,
+ ) -> LPVOID;
+ pub fn CreateHardLinkW(
+ lpSymlinkFileName: LPCWSTR,
+ lpTargetFileName: LPCWSTR,
+ lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
+ ) -> BOOL;
+ pub fn SetThreadStackGuarantee(_size: *mut c_ulong) -> BOOL;
+ pub fn GetWindowsDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
+ }
+}
+}
+
+// UWP specific functions & types
+cfg_if::cfg_if! {
+if #[cfg(target_vendor = "uwp")] {
+ #[repr(C)]
+ pub struct FILE_STANDARD_INFO {
+ pub AllocationSize: LARGE_INTEGER,
+ pub EndOfFile: LARGE_INTEGER,
+ pub NumberOfLinks: DWORD,
+ pub DeletePending: BOOLEAN,
+ pub Directory: BOOLEAN,
+ }
+}
+}
+
+// Shared between Desktop & UWP
+
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn GetCurrentProcessId() -> DWORD;
+
+ pub fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
+ pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL;
+ pub fn SetFileAttributesW(lpFileName: LPCWSTR, dwFileAttributes: DWORD) -> BOOL;
+ pub fn SetFileTime(
+ hFile: BorrowedHandle<'_>,
+ lpCreationTime: Option<&FILETIME>,
+ lpLastAccessTime: Option<&FILETIME>,
+ lpLastWriteTime: Option<&FILETIME>,
+ ) -> BOOL;
+ pub fn SetLastError(dwErrCode: DWORD);
+ pub fn GetCommandLineW() -> LPWSTR;
+ pub fn GetTempPathW(nBufferLength: DWORD, lpBuffer: LPCWSTR) -> DWORD;
+ pub fn GetCurrentProcess() -> HANDLE;
+ pub fn GetCurrentThread() -> HANDLE;
+ pub fn GetStdHandle(which: DWORD) -> HANDLE;
+ pub fn ExitProcess(uExitCode: c_uint) -> !;
+ pub fn DeviceIoControl(
+ hDevice: HANDLE,
+ dwIoControlCode: DWORD,
+ lpInBuffer: LPVOID,
+ nInBufferSize: DWORD,
+ lpOutBuffer: LPVOID,
+ nOutBufferSize: DWORD,
+ lpBytesReturned: LPDWORD,
+ lpOverlapped: LPOVERLAPPED,
+ ) -> BOOL;
+ pub fn CreateThread(
+ lpThreadAttributes: LPSECURITY_ATTRIBUTES,
+ dwStackSize: SIZE_T,
+ lpStartAddress: extern "system" fn(*mut c_void) -> DWORD,
+ lpParameter: LPVOID,
+ dwCreationFlags: DWORD,
+ lpThreadId: LPDWORD,
+ ) -> HandleOrNull;
+ pub fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
+ pub fn SwitchToThread() -> BOOL;
+ pub fn Sleep(dwMilliseconds: DWORD);
+ pub fn SleepEx(dwMilliseconds: DWORD, bAlertable: BOOL) -> DWORD;
+ pub fn GetProcessId(handle: HANDLE) -> DWORD;
+ pub fn CopyFileExW(
+ lpExistingFileName: LPCWSTR,
+ lpNewFileName: LPCWSTR,
+ lpProgressRoutine: LPPROGRESS_ROUTINE,
+ lpData: LPVOID,
+ pbCancel: LPBOOL,
+ dwCopyFlags: DWORD,
+ ) -> BOOL;
+ pub fn FormatMessageW(
+ flags: DWORD,
+ lpSrc: LPVOID,
+ msgId: DWORD,
+ langId: DWORD,
+ buf: LPWSTR,
+ nsize: DWORD,
+ args: *const c_void,
+ ) -> DWORD;
+ pub fn TlsAlloc() -> DWORD;
+ pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
+ pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
+ pub fn GetLastError() -> DWORD;
+ pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL;
+ pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL;
+ pub fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: LPDWORD) -> BOOL;
+ pub fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) -> BOOL;
+ pub fn CreateProcessW(
+ lpApplicationName: LPCWSTR,
+ lpCommandLine: LPWSTR,
+ lpProcessAttributes: LPSECURITY_ATTRIBUTES,
+ lpThreadAttributes: LPSECURITY_ATTRIBUTES,
+ bInheritHandles: BOOL,
+ dwCreationFlags: DWORD,
+ lpEnvironment: LPVOID,
+ lpCurrentDirectory: LPCWSTR,
+ lpStartupInfo: LPSTARTUPINFO,
+ lpProcessInformation: LPPROCESS_INFORMATION,
+ ) -> BOOL;
+ pub fn GetEnvironmentVariableW(n: LPCWSTR, v: LPWSTR, nsize: DWORD) -> DWORD;
+ pub fn SetEnvironmentVariableW(n: LPCWSTR, v: LPCWSTR) -> BOOL;
+ pub fn GetEnvironmentStringsW() -> LPWCH;
+ pub fn FreeEnvironmentStringsW(env_ptr: LPWCH) -> BOOL;
+ pub fn GetModuleFileNameW(hModule: HMODULE, lpFilename: LPWSTR, nSize: DWORD) -> DWORD;
+ pub fn CreateDirectoryW(
+ lpPathName: LPCWSTR,
+ lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
+ ) -> BOOL;
+ pub fn DeleteFileW(lpPathName: LPCWSTR) -> BOOL;
+ pub fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: LPWSTR) -> DWORD;
+ pub fn SetCurrentDirectoryW(lpPathName: LPCWSTR) -> BOOL;
+ pub fn DuplicateHandle(
+ hSourceProcessHandle: HANDLE,
+ hSourceHandle: HANDLE,
+ hTargetProcessHandle: HANDLE,
+ lpTargetHandle: LPHANDLE,
+ dwDesiredAccess: DWORD,
+ bInheritHandle: BOOL,
+ dwOptions: DWORD,
+ ) -> BOOL;
+ pub fn ReadFile(
+ hFile: BorrowedHandle<'_>,
+ lpBuffer: LPVOID,
+ nNumberOfBytesToRead: DWORD,
+ lpNumberOfBytesRead: LPDWORD,
+ lpOverlapped: LPOVERLAPPED,
+ ) -> BOOL;
+ pub fn ReadFileEx(
+ hFile: BorrowedHandle<'_>,
+ lpBuffer: LPVOID,
+ nNumberOfBytesToRead: DWORD,
+ lpOverlapped: LPOVERLAPPED,
+ lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
+ ) -> BOOL;
+ pub fn WriteFileEx(
+ hFile: BorrowedHandle<'_>,
+ lpBuffer: LPVOID,
+ nNumberOfBytesToWrite: DWORD,
+ lpOverlapped: LPOVERLAPPED,
+ lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
+ ) -> BOOL;
+ pub fn CloseHandle(hObject: HANDLE) -> BOOL;
+ pub fn MoveFileExW(lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, dwFlags: DWORD)
+ -> BOOL;
+ pub fn SetFilePointerEx(
+ hFile: HANDLE,
+ liDistanceToMove: LARGE_INTEGER,
+ lpNewFilePointer: PLARGE_INTEGER,
+ dwMoveMethod: DWORD,
+ ) -> BOOL;
+ pub fn FlushFileBuffers(hFile: HANDLE) -> BOOL;
+ pub fn CreateFileW(
+ lpFileName: LPCWSTR,
+ dwDesiredAccess: DWORD,
+ dwShareMode: DWORD,
+ lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
+ dwCreationDisposition: DWORD,
+ dwFlagsAndAttributes: DWORD,
+ hTemplateFile: HANDLE,
+ ) -> HandleOrInvalid;
+
+ pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW) -> HANDLE;
+ pub fn FindNextFileW(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW) -> BOOL;
+ pub fn FindClose(findFile: HANDLE) -> BOOL;
+
+ pub fn GetProcAddress(handle: HMODULE, name: LPCSTR) -> *mut c_void;
+ pub fn GetModuleHandleA(lpModuleName: LPCSTR) -> HMODULE;
+ pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
+
+ pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME);
+ pub fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
+
+ pub fn CreateEventW(
+ lpEventAttributes: LPSECURITY_ATTRIBUTES,
+ bManualReset: BOOL,
+ bInitialState: BOOL,
+ lpName: LPCWSTR,
+ ) -> HANDLE;
+ pub fn WaitForMultipleObjects(
+ nCount: DWORD,
+ lpHandles: *const HANDLE,
+ bWaitAll: BOOL,
+ dwMilliseconds: DWORD,
+ ) -> DWORD;
+ pub fn CreateNamedPipeW(
+ lpName: LPCWSTR,
+ dwOpenMode: DWORD,
+ dwPipeMode: DWORD,
+ nMaxInstances: DWORD,
+ nOutBufferSize: DWORD,
+ nInBufferSize: DWORD,
+ nDefaultTimeOut: DWORD,
+ lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
+ ) -> HANDLE;
+ pub fn CancelIo(handle: HANDLE) -> BOOL;
+ pub fn GetOverlappedResult(
+ hFile: HANDLE,
+ lpOverlapped: LPOVERLAPPED,
+ lpNumberOfBytesTransferred: LPDWORD,
+ bWait: BOOL,
+ ) -> BOOL;
+ pub fn CreateSymbolicLinkW(
+ lpSymlinkFileName: LPCWSTR,
+ lpTargetFileName: LPCWSTR,
+ dwFlags: DWORD,
+ ) -> BOOLEAN;
+ pub fn GetFinalPathNameByHandleW(
+ hFile: HANDLE,
+ lpszFilePath: LPCWSTR,
+ cchFilePath: DWORD,
+ dwFlags: DWORD,
+ ) -> DWORD;
+ pub fn GetFileInformationByHandleEx(
+ hFile: HANDLE,
+ fileInfoClass: FILE_INFO_BY_HANDLE_CLASS,
+ lpFileInformation: LPVOID,
+ dwBufferSize: DWORD,
+ ) -> BOOL;
+ pub fn SetFileInformationByHandle(
+ hFile: HANDLE,
+ FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
+ lpFileInformation: LPVOID,
+ dwBufferSize: DWORD,
+ ) -> BOOL;
+ pub fn SleepConditionVariableSRW(
+ ConditionVariable: PCONDITION_VARIABLE,
+ SRWLock: PSRWLOCK,
+ dwMilliseconds: DWORD,
+ Flags: ULONG,
+ ) -> BOOL;
+
+ pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
+ pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
+
+ pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
+ pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
+ pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
+ pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
+ pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
+ pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
+
+ pub fn CompareStringOrdinal(
+ lpString1: LPCWSTR,
+ cchCount1: c_int,
+ lpString2: LPCWSTR,
+ cchCount2: c_int,
+ bIgnoreCase: BOOL,
+ ) -> c_int;
+ pub fn GetFullPathNameW(
+ lpFileName: LPCWSTR,
+ nBufferLength: DWORD,
+ lpBuffer: LPWSTR,
+ lpFilePart: *mut LPWSTR,
+ ) -> DWORD;
+ pub fn GetFileAttributesW(lpFileName: LPCWSTR) -> DWORD;
+}
+
+#[link(name = "ws2_32")]
+extern "system" {
+ pub fn WSAStartup(wVersionRequested: WORD, lpWSAData: LPWSADATA) -> c_int;
+ pub fn WSACleanup() -> c_int;
+ pub fn WSAGetLastError() -> c_int;
+ pub fn WSADuplicateSocketW(
+ s: SOCKET,
+ dwProcessId: DWORD,
+ lpProtocolInfo: LPWSAPROTOCOL_INFO,
+ ) -> c_int;
+ pub fn WSASend(
+ s: SOCKET,
+ lpBuffers: LPWSABUF,
+ dwBufferCount: DWORD,
+ lpNumberOfBytesSent: LPDWORD,
+ dwFlags: DWORD,
+ lpOverlapped: LPWSAOVERLAPPED,
+ lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
+ ) -> c_int;
+ pub fn WSARecv(
+ s: SOCKET,
+ lpBuffers: LPWSABUF,
+ dwBufferCount: DWORD,
+ lpNumberOfBytesRecvd: LPDWORD,
+ lpFlags: LPDWORD,
+ lpOverlapped: LPWSAOVERLAPPED,
+ lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
+ ) -> c_int;
+ pub fn WSASocketW(
+ af: c_int,
+ kind: c_int,
+ protocol: c_int,
+ lpProtocolInfo: LPWSAPROTOCOL_INFO,
+ g: GROUP,
+ dwFlags: DWORD,
+ ) -> SOCKET;
+ pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int;
+ pub fn closesocket(socket: SOCKET) -> c_int;
+ pub fn recv(socket: SOCKET, buf: *mut c_void, len: c_int, flags: c_int) -> c_int;
+ pub fn send(socket: SOCKET, buf: *const c_void, len: c_int, flags: c_int) -> c_int;
+ pub fn recvfrom(
+ socket: SOCKET,
+ buf: *mut c_void,
+ len: c_int,
+ flags: c_int,
+ addr: *mut SOCKADDR,
+ addrlen: *mut c_int,
+ ) -> c_int;
+ pub fn sendto(
+ socket: SOCKET,
+ buf: *const c_void,
+ len: c_int,
+ flags: c_int,
+ addr: *const SOCKADDR,
+ addrlen: c_int,
+ ) -> c_int;
+ pub fn shutdown(socket: SOCKET, how: c_int) -> c_int;
+ pub fn accept(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> SOCKET;
+ pub fn getsockopt(
+ s: SOCKET,
+ level: c_int,
+ optname: c_int,
+ optval: *mut c_char,
+ optlen: *mut c_int,
+ ) -> c_int;
+ pub fn setsockopt(
+ s: SOCKET,
+ level: c_int,
+ optname: c_int,
+ optval: *const c_void,
+ optlen: c_int,
+ ) -> c_int;
+ pub fn getsockname(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int;
+ pub fn getpeername(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int;
+ pub fn bind(socket: SOCKET, address: *const SOCKADDR, address_len: socklen_t) -> c_int;
+ pub fn listen(socket: SOCKET, backlog: c_int) -> c_int;
+ pub fn connect(socket: SOCKET, address: *const SOCKADDR, len: c_int) -> c_int;
+ pub fn getaddrinfo(
+ node: *const c_char,
+ service: *const c_char,
+ hints: *const ADDRINFOA,
+ res: *mut *mut ADDRINFOA,
+ ) -> c_int;
+ pub fn freeaddrinfo(res: *mut ADDRINFOA);
+ pub fn select(
+ nfds: c_int,
+ readfds: *mut fd_set,
+ writefds: *mut fd_set,
+ exceptfds: *mut fd_set,
+ timeout: *const timeval,
+ ) -> c_int;
+}
+
+#[link(name = "bcrypt")]
+extern "system" {
+ // >= Vista / Server 2008
+ // https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
+ pub fn BCryptGenRandom(
+ hAlgorithm: LPVOID,
+ pBuffer: *mut u8,
+ cbBuffer: ULONG,
+ dwFlags: ULONG,
+ ) -> NTSTATUS;
+}
+
+// Functions that aren't available on every version of Windows that we support,
+// but we still use them and just provide some form of a fallback implementation.
+compat_fn_with_fallback! {
+ pub static KERNEL32: &CStr = ansi_str!("kernel32");
+
+ // >= Win10 1607
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
+ pub fn SetThreadDescription(hThread: HANDLE,
+ lpThreadDescription: LPCWSTR) -> HRESULT {
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); E_NOTIMPL
+ }
+
+ // >= Win8 / Server 2012
+ // https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimepreciseasfiletime
+ pub fn GetSystemTimePreciseAsFileTime(lpSystemTimeAsFileTime: LPFILETIME)
+ -> () {
+ GetSystemTimeAsFileTime(lpSystemTimeAsFileTime)
+ }
+
+ // >= Win11 / Server 2022
+ // https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-gettemppath2a
+ pub fn GetTempPath2W(nBufferLength: DWORD, lpBuffer: LPCWSTR) -> DWORD {
+ GetTempPathW(nBufferLength, lpBuffer)
+ }
+}
+
+compat_fn_optional! {
+ pub static SYNCH_API: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
+
+ // >= Windows 8 / Server 2012
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitonaddress
+ pub fn WaitOnAddress(
+ Address: LPVOID,
+ CompareAddress: LPVOID,
+ AddressSize: SIZE_T,
+ dwMilliseconds: DWORD
+ ) -> BOOL;
+ pub fn WakeByAddressSingle(Address: LPVOID) -> ();
+}
+
+compat_fn_with_fallback! {
+ pub static NTDLL: &CStr = ansi_str!("ntdll");
+
+ pub fn NtCreateFile(
+ FileHandle: *mut HANDLE,
+ DesiredAccess: ACCESS_MASK,
+ ObjectAttributes: *const OBJECT_ATTRIBUTES,
+ IoStatusBlock: *mut IO_STATUS_BLOCK,
+ AllocationSize: *mut i64,
+ FileAttributes: ULONG,
+ ShareAccess: ULONG,
+ CreateDisposition: ULONG,
+ CreateOptions: ULONG,
+ EaBuffer: *mut c_void,
+ EaLength: ULONG
+ ) -> NTSTATUS {
+ STATUS_NOT_IMPLEMENTED
+ }
+ pub fn NtReadFile(
+ FileHandle: BorrowedHandle<'_>,
+ Event: HANDLE,
+ ApcRoutine: Option<IO_APC_ROUTINE>,
+ ApcContext: *mut c_void,
+ IoStatusBlock: &mut IO_STATUS_BLOCK,
+ Buffer: *mut crate::mem::MaybeUninit<u8>,
+ Length: ULONG,
+ ByteOffset: Option<&LARGE_INTEGER>,
+ Key: Option<&ULONG>
+ ) -> NTSTATUS {
+ STATUS_NOT_IMPLEMENTED
+ }
+ pub fn NtWriteFile(
+ FileHandle: BorrowedHandle<'_>,
+ Event: HANDLE,
+ ApcRoutine: Option<IO_APC_ROUTINE>,
+ ApcContext: *mut c_void,
+ IoStatusBlock: &mut IO_STATUS_BLOCK,
+ Buffer: *const u8,
+ Length: ULONG,
+ ByteOffset: Option<&LARGE_INTEGER>,
+ Key: Option<&ULONG>
+ ) -> NTSTATUS {
+ STATUS_NOT_IMPLEMENTED
+ }
+ pub fn RtlNtStatusToDosError(
+ Status: NTSTATUS
+ ) -> ULONG {
+ Status as ULONG
+ }
+ pub fn NtCreateKeyedEvent(
+ KeyedEventHandle: LPHANDLE,
+ DesiredAccess: ACCESS_MASK,
+ ObjectAttributes: LPVOID,
+ Flags: ULONG
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+ pub fn NtReleaseKeyedEvent(
+ EventHandle: HANDLE,
+ Key: LPVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+ pub fn NtWaitForKeyedEvent(
+ EventHandle: HANDLE,
+ Key: LPVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+}
diff --git a/library/std/src/sys/windows/c/errors.rs b/library/std/src/sys/windows/c/errors.rs
new file mode 100644
index 000000000..23dcc119d
--- /dev/null
+++ b/library/std/src/sys/windows/c/errors.rs
@@ -0,0 +1,1883 @@
+// List of Windows system error codes with descriptions:
+// https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes#system-error-codes
+
+#![allow(dead_code)]
+
+use super::{c_int, DWORD};
+
+pub const ERROR_DIRECTORY_NOT_SUPPORTED: DWORD = 336;
+pub const ERROR_DRIVER_CANCEL_TIMEOUT: DWORD = 594;
+pub const ERROR_DISK_QUOTA_EXCEEDED: DWORD = 1295;
+pub const ERROR_RESOURCE_CALL_TIMED_OUT: DWORD = 5910;
+pub const FRS_ERR_SYSVOL_POPULATE_TIMEOUT: DWORD = 8014;
+pub const DNS_ERROR_RECORD_TIMED_OUT: DWORD = 9705;
+
+// The followiung list was obtained from
+// `/usr/x86_64-w64-mingw32/include/winerror.h`
+// in the Debian package
+// mingw-w64_6.0.0-3_all.deb
+//
+// The header of that file says:
+// * This file has no copyright assigned and is placed in the Public Domain.
+// * This file is part of the mingw-w64 runtime package.
+// * No warranty is given; refer to the file DISCLAIMER.PD within this package.
+//
+// The text here is the result of the following rune:
+// grep -P '#define ERROR' /usr/x86_64-w64-mingw32/include/winerror.h >>library/std/src/sys/windows/c/errors.rs
+// grep -P '#define WSA' /usr/x86_64-w64-mingw32/include/winerror.h >>library/std/src/sys/windows/c/errors.rs
+// and then using some manually-invented but rather obvious editor search-and-replace
+// invocations, plus some straightforward manual fixups, to turn it into Rust syntax
+// and remove all the duplicates from the manual table above.
+
+pub const ERROR_SUCCESS: DWORD = 0;
+pub const ERROR_INVALID_FUNCTION: DWORD = 1;
+pub const ERROR_FILE_NOT_FOUND: DWORD = 2;
+pub const ERROR_PATH_NOT_FOUND: DWORD = 3;
+pub const ERROR_TOO_MANY_OPEN_FILES: DWORD = 4;
+pub const ERROR_ACCESS_DENIED: DWORD = 5;
+pub const ERROR_INVALID_HANDLE: DWORD = 6;
+pub const ERROR_ARENA_TRASHED: DWORD = 7;
+pub const ERROR_NOT_ENOUGH_MEMORY: DWORD = 8;
+pub const ERROR_INVALID_BLOCK: DWORD = 9;
+pub const ERROR_BAD_ENVIRONMENT: DWORD = 10;
+pub const ERROR_BAD_FORMAT: DWORD = 11;
+pub const ERROR_INVALID_ACCESS: DWORD = 12;
+pub const ERROR_INVALID_DATA: DWORD = 13;
+pub const ERROR_OUTOFMEMORY: DWORD = 14;
+pub const ERROR_INVALID_DRIVE: DWORD = 15;
+pub const ERROR_CURRENT_DIRECTORY: DWORD = 16;
+pub const ERROR_NOT_SAME_DEVICE: DWORD = 17;
+pub const ERROR_NO_MORE_FILES: DWORD = 18;
+pub const ERROR_WRITE_PROTECT: DWORD = 19;
+pub const ERROR_BAD_UNIT: DWORD = 20;
+pub const ERROR_NOT_READY: DWORD = 21;
+pub const ERROR_BAD_COMMAND: DWORD = 22;
+pub const ERROR_CRC: DWORD = 23;
+pub const ERROR_BAD_LENGTH: DWORD = 24;
+pub const ERROR_SEEK: DWORD = 25;
+pub const ERROR_NOT_DOS_DISK: DWORD = 26;
+pub const ERROR_SECTOR_NOT_FOUND: DWORD = 27;
+pub const ERROR_OUT_OF_PAPER: DWORD = 28;
+pub const ERROR_WRITE_FAULT: DWORD = 29;
+pub const ERROR_READ_FAULT: DWORD = 30;
+pub const ERROR_GEN_FAILURE: DWORD = 31;
+pub const ERROR_SHARING_VIOLATION: DWORD = 32;
+pub const ERROR_LOCK_VIOLATION: DWORD = 33;
+pub const ERROR_WRONG_DISK: DWORD = 34;
+pub const ERROR_SHARING_BUFFER_EXCEEDED: DWORD = 36;
+pub const ERROR_HANDLE_EOF: DWORD = 38;
+pub const ERROR_HANDLE_DISK_FULL: DWORD = 39;
+pub const ERROR_NOT_SUPPORTED: DWORD = 50;
+pub const ERROR_REM_NOT_LIST: DWORD = 51;
+pub const ERROR_DUP_NAME: DWORD = 52;
+pub const ERROR_BAD_NETPATH: DWORD = 53;
+pub const ERROR_NETWORK_BUSY: DWORD = 54;
+pub const ERROR_DEV_NOT_EXIST: DWORD = 55;
+pub const ERROR_TOO_MANY_CMDS: DWORD = 56;
+pub const ERROR_ADAP_HDW_ERR: DWORD = 57;
+pub const ERROR_BAD_NET_RESP: DWORD = 58;
+pub const ERROR_UNEXP_NET_ERR: DWORD = 59;
+pub const ERROR_BAD_REM_ADAP: DWORD = 60;
+pub const ERROR_PRINTQ_FULL: DWORD = 61;
+pub const ERROR_NO_SPOOL_SPACE: DWORD = 62;
+pub const ERROR_PRINT_CANCELLED: DWORD = 63;
+pub const ERROR_NETNAME_DELETED: DWORD = 64;
+pub const ERROR_NETWORK_ACCESS_DENIED: DWORD = 65;
+pub const ERROR_BAD_DEV_TYPE: DWORD = 66;
+pub const ERROR_BAD_NET_NAME: DWORD = 67;
+pub const ERROR_TOO_MANY_NAMES: DWORD = 68;
+pub const ERROR_TOO_MANY_SESS: DWORD = 69;
+pub const ERROR_SHARING_PAUSED: DWORD = 70;
+pub const ERROR_REQ_NOT_ACCEP: DWORD = 71;
+pub const ERROR_REDIR_PAUSED: DWORD = 72;
+pub const ERROR_FILE_EXISTS: DWORD = 80;
+pub const ERROR_CANNOT_MAKE: DWORD = 82;
+pub const ERROR_FAIL_I24: DWORD = 83;
+pub const ERROR_OUT_OF_STRUCTURES: DWORD = 84;
+pub const ERROR_ALREADY_ASSIGNED: DWORD = 85;
+pub const ERROR_INVALID_PASSWORD: DWORD = 86;
+pub const ERROR_INVALID_PARAMETER: DWORD = 87;
+pub const ERROR_NET_WRITE_FAULT: DWORD = 88;
+pub const ERROR_NO_PROC_SLOTS: DWORD = 89;
+pub const ERROR_TOO_MANY_SEMAPHORES: DWORD = 100;
+pub const ERROR_EXCL_SEM_ALREADY_OWNED: DWORD = 101;
+pub const ERROR_SEM_IS_SET: DWORD = 102;
+pub const ERROR_TOO_MANY_SEM_REQUESTS: DWORD = 103;
+pub const ERROR_INVALID_AT_INTERRUPT_TIME: DWORD = 104;
+pub const ERROR_SEM_OWNER_DIED: DWORD = 105;
+pub const ERROR_SEM_USER_LIMIT: DWORD = 106;
+pub const ERROR_DISK_CHANGE: DWORD = 107;
+pub const ERROR_DRIVE_LOCKED: DWORD = 108;
+pub const ERROR_BROKEN_PIPE: DWORD = 109;
+pub const ERROR_OPEN_FAILED: DWORD = 110;
+pub const ERROR_BUFFER_OVERFLOW: DWORD = 111;
+pub const ERROR_DISK_FULL: DWORD = 112;
+pub const ERROR_NO_MORE_SEARCH_HANDLES: DWORD = 113;
+pub const ERROR_INVALID_TARGET_HANDLE: DWORD = 114;
+pub const ERROR_INVALID_CATEGORY: DWORD = 117;
+pub const ERROR_INVALID_VERIFY_SWITCH: DWORD = 118;
+pub const ERROR_BAD_DRIVER_LEVEL: DWORD = 119;
+pub const ERROR_CALL_NOT_IMPLEMENTED: DWORD = 120;
+pub const ERROR_SEM_TIMEOUT: DWORD = 121;
+pub const ERROR_INSUFFICIENT_BUFFER: DWORD = 122;
+pub const ERROR_INVALID_NAME: DWORD = 123;
+pub const ERROR_INVALID_LEVEL: DWORD = 124;
+pub const ERROR_NO_VOLUME_LABEL: DWORD = 125;
+pub const ERROR_MOD_NOT_FOUND: DWORD = 126;
+pub const ERROR_PROC_NOT_FOUND: DWORD = 127;
+pub const ERROR_WAIT_NO_CHILDREN: DWORD = 128;
+pub const ERROR_CHILD_NOT_COMPLETE: DWORD = 129;
+pub const ERROR_DIRECT_ACCESS_HANDLE: DWORD = 130;
+pub const ERROR_NEGATIVE_SEEK: DWORD = 131;
+pub const ERROR_SEEK_ON_DEVICE: DWORD = 132;
+pub const ERROR_IS_JOIN_TARGET: DWORD = 133;
+pub const ERROR_IS_JOINED: DWORD = 134;
+pub const ERROR_IS_SUBSTED: DWORD = 135;
+pub const ERROR_NOT_JOINED: DWORD = 136;
+pub const ERROR_NOT_SUBSTED: DWORD = 137;
+pub const ERROR_JOIN_TO_JOIN: DWORD = 138;
+pub const ERROR_SUBST_TO_SUBST: DWORD = 139;
+pub const ERROR_JOIN_TO_SUBST: DWORD = 140;
+pub const ERROR_SUBST_TO_JOIN: DWORD = 141;
+pub const ERROR_BUSY_DRIVE: DWORD = 142;
+pub const ERROR_SAME_DRIVE: DWORD = 143;
+pub const ERROR_DIR_NOT_ROOT: DWORD = 144;
+pub const ERROR_DIR_NOT_EMPTY: DWORD = 145;
+pub const ERROR_IS_SUBST_PATH: DWORD = 146;
+pub const ERROR_IS_JOIN_PATH: DWORD = 147;
+pub const ERROR_PATH_BUSY: DWORD = 148;
+pub const ERROR_IS_SUBST_TARGET: DWORD = 149;
+pub const ERROR_SYSTEM_TRACE: DWORD = 150;
+pub const ERROR_INVALID_EVENT_COUNT: DWORD = 151;
+pub const ERROR_TOO_MANY_MUXWAITERS: DWORD = 152;
+pub const ERROR_INVALID_LIST_FORMAT: DWORD = 153;
+pub const ERROR_LABEL_TOO_LONG: DWORD = 154;
+pub const ERROR_TOO_MANY_TCBS: DWORD = 155;
+pub const ERROR_SIGNAL_REFUSED: DWORD = 156;
+pub const ERROR_DISCARDED: DWORD = 157;
+pub const ERROR_NOT_LOCKED: DWORD = 158;
+pub const ERROR_BAD_THREADID_ADDR: DWORD = 159;
+pub const ERROR_BAD_ARGUMENTS: DWORD = 160;
+pub const ERROR_BAD_PATHNAME: DWORD = 161;
+pub const ERROR_SIGNAL_PENDING: DWORD = 162;
+pub const ERROR_MAX_THRDS_REACHED: DWORD = 164;
+pub const ERROR_LOCK_FAILED: DWORD = 167;
+pub const ERROR_BUSY: DWORD = 170;
+pub const ERROR_CANCEL_VIOLATION: DWORD = 173;
+pub const ERROR_ATOMIC_LOCKS_NOT_SUPPORTED: DWORD = 174;
+pub const ERROR_INVALID_SEGMENT_NUMBER: DWORD = 180;
+pub const ERROR_INVALID_ORDINAL: DWORD = 182;
+pub const ERROR_ALREADY_EXISTS: DWORD = 183;
+pub const ERROR_INVALID_FLAG_NUMBER: DWORD = 186;
+pub const ERROR_SEM_NOT_FOUND: DWORD = 187;
+pub const ERROR_INVALID_STARTING_CODESEG: DWORD = 188;
+pub const ERROR_INVALID_STACKSEG: DWORD = 189;
+pub const ERROR_INVALID_MODULETYPE: DWORD = 190;
+pub const ERROR_INVALID_EXE_SIGNATURE: DWORD = 191;
+pub const ERROR_EXE_MARKED_INVALID: DWORD = 192;
+pub const ERROR_BAD_EXE_FORMAT: DWORD = 193;
+pub const ERROR_ITERATED_DATA_EXCEEDS_64k: DWORD = 194;
+pub const ERROR_INVALID_MINALLOCSIZE: DWORD = 195;
+pub const ERROR_DYNLINK_FROM_INVALID_RING: DWORD = 196;
+pub const ERROR_IOPL_NOT_ENABLED: DWORD = 197;
+pub const ERROR_INVALID_SEGDPL: DWORD = 198;
+pub const ERROR_AUTODATASEG_EXCEEDS_64k: DWORD = 199;
+pub const ERROR_RING2SEG_MUST_BE_MOVABLE: DWORD = 200;
+pub const ERROR_RELOC_CHAIN_XEEDS_SEGLIM: DWORD = 201;
+pub const ERROR_INFLOOP_IN_RELOC_CHAIN: DWORD = 202;
+pub const ERROR_ENVVAR_NOT_FOUND: DWORD = 203;
+pub const ERROR_NO_SIGNAL_SENT: DWORD = 205;
+pub const ERROR_FILENAME_EXCED_RANGE: DWORD = 206;
+pub const ERROR_RING2_STACK_IN_USE: DWORD = 207;
+pub const ERROR_META_EXPANSION_TOO_LONG: DWORD = 208;
+pub const ERROR_INVALID_SIGNAL_NUMBER: DWORD = 209;
+pub const ERROR_THREAD_1_INACTIVE: DWORD = 210;
+pub const ERROR_LOCKED: DWORD = 212;
+pub const ERROR_TOO_MANY_MODULES: DWORD = 214;
+pub const ERROR_NESTING_NOT_ALLOWED: DWORD = 215;
+pub const ERROR_EXE_MACHINE_TYPE_MISMATCH: DWORD = 216;
+pub const ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY: DWORD = 217;
+pub const ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY: DWORD = 218;
+pub const ERROR_FILE_CHECKED_OUT: DWORD = 220;
+pub const ERROR_CHECKOUT_REQUIRED: DWORD = 221;
+pub const ERROR_BAD_FILE_TYPE: DWORD = 222;
+pub const ERROR_FILE_TOO_LARGE: DWORD = 223;
+pub const ERROR_FORMS_AUTH_REQUIRED: DWORD = 224;
+pub const ERROR_PIPE_LOCAL: DWORD = 229;
+pub const ERROR_BAD_PIPE: DWORD = 230;
+pub const ERROR_PIPE_BUSY: DWORD = 231;
+pub const ERROR_NO_DATA: DWORD = 232;
+pub const ERROR_PIPE_NOT_CONNECTED: DWORD = 233;
+pub const ERROR_MORE_DATA: DWORD = 234;
+pub const ERROR_VC_DISCONNECTED: DWORD = 240;
+pub const ERROR_INVALID_EA_NAME: DWORD = 254;
+pub const ERROR_EA_LIST_INCONSISTENT: DWORD = 255;
+pub const ERROR_NO_MORE_ITEMS: DWORD = 259;
+pub const ERROR_CANNOT_COPY: DWORD = 266;
+pub const ERROR_DIRECTORY: DWORD = 267;
+pub const ERROR_EAS_DIDNT_FIT: DWORD = 275;
+pub const ERROR_EA_FILE_CORRUPT: DWORD = 276;
+pub const ERROR_EA_TABLE_FULL: DWORD = 277;
+pub const ERROR_INVALID_EA_HANDLE: DWORD = 278;
+pub const ERROR_EAS_NOT_SUPPORTED: DWORD = 282;
+pub const ERROR_NOT_OWNER: DWORD = 288;
+pub const ERROR_TOO_MANY_POSTS: DWORD = 298;
+pub const ERROR_PARTIAL_COPY: DWORD = 299;
+pub const ERROR_OPLOCK_NOT_GRANTED: DWORD = 300;
+pub const ERROR_INVALID_OPLOCK_PROTOCOL: DWORD = 301;
+pub const ERROR_DISK_TOO_FRAGMENTED: DWORD = 302;
+pub const ERROR_DELETE_PENDING: DWORD = 303;
+pub const ERROR_INVALID_TOKEN: DWORD = 315;
+pub const ERROR_MR_MID_NOT_FOUND: DWORD = 317;
+pub const ERROR_SCOPE_NOT_FOUND: DWORD = 318;
+pub const ERROR_INVALID_ADDRESS: DWORD = 487;
+pub const ERROR_ARITHMETIC_OVERFLOW: DWORD = 534;
+pub const ERROR_PIPE_CONNECTED: DWORD = 535;
+pub const ERROR_PIPE_LISTENING: DWORD = 536;
+pub const ERROR_WAKE_SYSTEM: DWORD = 730;
+pub const ERROR_WAIT_1: DWORD = 731;
+pub const ERROR_WAIT_2: DWORD = 732;
+pub const ERROR_WAIT_3: DWORD = 733;
+pub const ERROR_WAIT_63: DWORD = 734;
+pub const ERROR_ABANDONED_WAIT_0: DWORD = 735;
+pub const ERROR_ABANDONED_WAIT_63: DWORD = 736;
+pub const ERROR_USER_APC: DWORD = 737;
+pub const ERROR_KERNEL_APC: DWORD = 738;
+pub const ERROR_ALERTED: DWORD = 739;
+pub const ERROR_EA_ACCESS_DENIED: DWORD = 994;
+pub const ERROR_OPERATION_ABORTED: DWORD = 995;
+pub const ERROR_IO_INCOMPLETE: DWORD = 996;
+pub const ERROR_IO_PENDING: DWORD = 997;
+pub const ERROR_NOACCESS: DWORD = 998;
+pub const ERROR_SWAPERROR: DWORD = 999;
+pub const ERROR_STACK_OVERFLOW: DWORD = 1001;
+pub const ERROR_INVALID_MESSAGE: DWORD = 1002;
+pub const ERROR_CAN_NOT_COMPLETE: DWORD = 1003;
+pub const ERROR_INVALID_FLAGS: DWORD = 1004;
+pub const ERROR_UNRECOGNIZED_VOLUME: DWORD = 1005;
+pub const ERROR_FILE_INVALID: DWORD = 1006;
+pub const ERROR_FULLSCREEN_MODE: DWORD = 1007;
+pub const ERROR_NO_TOKEN: DWORD = 1008;
+pub const ERROR_BADDB: DWORD = 1009;
+pub const ERROR_BADKEY: DWORD = 1010;
+pub const ERROR_CANTOPEN: DWORD = 1011;
+pub const ERROR_CANTREAD: DWORD = 1012;
+pub const ERROR_CANTWRITE: DWORD = 1013;
+pub const ERROR_REGISTRY_RECOVERED: DWORD = 1014;
+pub const ERROR_REGISTRY_CORRUPT: DWORD = 1015;
+pub const ERROR_REGISTRY_IO_FAILED: DWORD = 1016;
+pub const ERROR_NOT_REGISTRY_FILE: DWORD = 1017;
+pub const ERROR_KEY_DELETED: DWORD = 1018;
+pub const ERROR_NO_LOG_SPACE: DWORD = 1019;
+pub const ERROR_KEY_HAS_CHILDREN: DWORD = 1020;
+pub const ERROR_CHILD_MUST_BE_VOLATILE: DWORD = 1021;
+pub const ERROR_NOTIFY_ENUM_DIR: DWORD = 1022;
+pub const ERROR_DEPENDENT_SERVICES_RUNNING: DWORD = 1051;
+pub const ERROR_INVALID_SERVICE_CONTROL: DWORD = 1052;
+pub const ERROR_SERVICE_REQUEST_TIMEOUT: DWORD = 1053;
+pub const ERROR_SERVICE_NO_THREAD: DWORD = 1054;
+pub const ERROR_SERVICE_DATABASE_LOCKED: DWORD = 1055;
+pub const ERROR_SERVICE_ALREADY_RUNNING: DWORD = 1056;
+pub const ERROR_INVALID_SERVICE_ACCOUNT: DWORD = 1057;
+pub const ERROR_SERVICE_DISABLED: DWORD = 1058;
+pub const ERROR_CIRCULAR_DEPENDENCY: DWORD = 1059;
+pub const ERROR_SERVICE_DOES_NOT_EXIST: DWORD = 1060;
+pub const ERROR_SERVICE_CANNOT_ACCEPT_CTRL: DWORD = 1061;
+pub const ERROR_SERVICE_NOT_ACTIVE: DWORD = 1062;
+pub const ERROR_FAILED_SERVICE_CONTROLLER_CONNECT: DWORD = 1063;
+pub const ERROR_EXCEPTION_IN_SERVICE: DWORD = 1064;
+pub const ERROR_DATABASE_DOES_NOT_EXIST: DWORD = 1065;
+pub const ERROR_SERVICE_SPECIFIC_ERROR: DWORD = 1066;
+pub const ERROR_PROCESS_ABORTED: DWORD = 1067;
+pub const ERROR_SERVICE_DEPENDENCY_FAIL: DWORD = 1068;
+pub const ERROR_SERVICE_LOGON_FAILED: DWORD = 1069;
+pub const ERROR_SERVICE_START_HANG: DWORD = 1070;
+pub const ERROR_INVALID_SERVICE_LOCK: DWORD = 1071;
+pub const ERROR_SERVICE_MARKED_FOR_DELETE: DWORD = 1072;
+pub const ERROR_SERVICE_EXISTS: DWORD = 1073;
+pub const ERROR_ALREADY_RUNNING_LKG: DWORD = 1074;
+pub const ERROR_SERVICE_DEPENDENCY_DELETED: DWORD = 1075;
+pub const ERROR_BOOT_ALREADY_ACCEPTED: DWORD = 1076;
+pub const ERROR_SERVICE_NEVER_STARTED: DWORD = 1077;
+pub const ERROR_DUPLICATE_SERVICE_NAME: DWORD = 1078;
+pub const ERROR_DIFFERENT_SERVICE_ACCOUNT: DWORD = 1079;
+pub const ERROR_CANNOT_DETECT_DRIVER_FAILURE: DWORD = 1080;
+pub const ERROR_CANNOT_DETECT_PROCESS_ABORT: DWORD = 1081;
+pub const ERROR_NO_RECOVERY_PROGRAM: DWORD = 1082;
+pub const ERROR_SERVICE_NOT_IN_EXE: DWORD = 1083;
+pub const ERROR_NOT_SAFEBOOT_SERVICE: DWORD = 1084;
+pub const ERROR_END_OF_MEDIA: DWORD = 1100;
+pub const ERROR_FILEMARK_DETECTED: DWORD = 1101;
+pub const ERROR_BEGINNING_OF_MEDIA: DWORD = 1102;
+pub const ERROR_SETMARK_DETECTED: DWORD = 1103;
+pub const ERROR_NO_DATA_DETECTED: DWORD = 1104;
+pub const ERROR_PARTITION_FAILURE: DWORD = 1105;
+pub const ERROR_INVALID_BLOCK_LENGTH: DWORD = 1106;
+pub const ERROR_DEVICE_NOT_PARTITIONED: DWORD = 1107;
+pub const ERROR_UNABLE_TO_LOCK_MEDIA: DWORD = 1108;
+pub const ERROR_UNABLE_TO_UNLOAD_MEDIA: DWORD = 1109;
+pub const ERROR_MEDIA_CHANGED: DWORD = 1110;
+pub const ERROR_BUS_RESET: DWORD = 1111;
+pub const ERROR_NO_MEDIA_IN_DRIVE: DWORD = 1112;
+pub const ERROR_NO_UNICODE_TRANSLATION: DWORD = 1113;
+pub const ERROR_DLL_INIT_FAILED: DWORD = 1114;
+pub const ERROR_SHUTDOWN_IN_PROGRESS: DWORD = 1115;
+pub const ERROR_NO_SHUTDOWN_IN_PROGRESS: DWORD = 1116;
+pub const ERROR_IO_DEVICE: DWORD = 1117;
+pub const ERROR_SERIAL_NO_DEVICE: DWORD = 1118;
+pub const ERROR_IRQ_BUSY: DWORD = 1119;
+pub const ERROR_MORE_WRITES: DWORD = 1120;
+pub const ERROR_COUNTER_TIMEOUT: DWORD = 1121;
+pub const ERROR_FLOPPY_ID_MARK_NOT_FOUND: DWORD = 1122;
+pub const ERROR_FLOPPY_WRONG_CYLINDER: DWORD = 1123;
+pub const ERROR_FLOPPY_UNKNOWN_ERROR: DWORD = 1124;
+pub const ERROR_FLOPPY_BAD_REGISTERS: DWORD = 1125;
+pub const ERROR_DISK_RECALIBRATE_FAILED: DWORD = 1126;
+pub const ERROR_DISK_OPERATION_FAILED: DWORD = 1127;
+pub const ERROR_DISK_RESET_FAILED: DWORD = 1128;
+pub const ERROR_EOM_OVERFLOW: DWORD = 1129;
+pub const ERROR_NOT_ENOUGH_SERVER_MEMORY: DWORD = 1130;
+pub const ERROR_POSSIBLE_DEADLOCK: DWORD = 1131;
+pub const ERROR_MAPPED_ALIGNMENT: DWORD = 1132;
+pub const ERROR_SET_POWER_STATE_VETOED: DWORD = 1140;
+pub const ERROR_SET_POWER_STATE_FAILED: DWORD = 1141;
+pub const ERROR_TOO_MANY_LINKS: DWORD = 1142;
+pub const ERROR_OLD_WIN_VERSION: DWORD = 1150;
+pub const ERROR_APP_WRONG_OS: DWORD = 1151;
+pub const ERROR_SINGLE_INSTANCE_APP: DWORD = 1152;
+pub const ERROR_RMODE_APP: DWORD = 1153;
+pub const ERROR_INVALID_DLL: DWORD = 1154;
+pub const ERROR_NO_ASSOCIATION: DWORD = 1155;
+pub const ERROR_DDE_FAIL: DWORD = 1156;
+pub const ERROR_DLL_NOT_FOUND: DWORD = 1157;
+pub const ERROR_NO_MORE_USER_HANDLES: DWORD = 1158;
+pub const ERROR_MESSAGE_SYNC_ONLY: DWORD = 1159;
+pub const ERROR_SOURCE_ELEMENT_EMPTY: DWORD = 1160;
+pub const ERROR_DESTINATION_ELEMENT_FULL: DWORD = 1161;
+pub const ERROR_ILLEGAL_ELEMENT_ADDRESS: DWORD = 1162;
+pub const ERROR_MAGAZINE_NOT_PRESENT: DWORD = 1163;
+pub const ERROR_DEVICE_REINITIALIZATION_NEEDED: DWORD = 1164;
+pub const ERROR_DEVICE_REQUIRES_CLEANING: DWORD = 1165;
+pub const ERROR_DEVICE_DOOR_OPEN: DWORD = 1166;
+pub const ERROR_DEVICE_NOT_CONNECTED: DWORD = 1167;
+pub const ERROR_NOT_FOUND: DWORD = 1168;
+pub const ERROR_NO_MATCH: DWORD = 1169;
+pub const ERROR_SET_NOT_FOUND: DWORD = 1170;
+pub const ERROR_POINT_NOT_FOUND: DWORD = 1171;
+pub const ERROR_NO_TRACKING_SERVICE: DWORD = 1172;
+pub const ERROR_NO_VOLUME_ID: DWORD = 1173;
+pub const ERROR_UNABLE_TO_REMOVE_REPLACED: DWORD = 1175;
+pub const ERROR_UNABLE_TO_MOVE_REPLACEMENT: DWORD = 1176;
+pub const ERROR_UNABLE_TO_MOVE_REPLACEMENT_2: DWORD = 1177;
+pub const ERROR_JOURNAL_DELETE_IN_PROGRESS: DWORD = 1178;
+pub const ERROR_JOURNAL_NOT_ACTIVE: DWORD = 1179;
+pub const ERROR_POTENTIAL_FILE_FOUND: DWORD = 1180;
+pub const ERROR_JOURNAL_ENTRY_DELETED: DWORD = 1181;
+pub const ERROR_BAD_DEVICE: DWORD = 1200;
+pub const ERROR_CONNECTION_UNAVAIL: DWORD = 1201;
+pub const ERROR_DEVICE_ALREADY_REMEMBERED: DWORD = 1202;
+pub const ERROR_NO_NET_OR_BAD_PATH: DWORD = 1203;
+pub const ERROR_BAD_PROVIDER: DWORD = 1204;
+pub const ERROR_CANNOT_OPEN_PROFILE: DWORD = 1205;
+pub const ERROR_BAD_PROFILE: DWORD = 1206;
+pub const ERROR_NOT_CONTAINER: DWORD = 1207;
+pub const ERROR_EXTENDED_ERROR: DWORD = 1208;
+pub const ERROR_INVALID_GROUPNAME: DWORD = 1209;
+pub const ERROR_INVALID_COMPUTERNAME: DWORD = 1210;
+pub const ERROR_INVALID_EVENTNAME: DWORD = 1211;
+pub const ERROR_INVALID_DOMAINNAME: DWORD = 1212;
+pub const ERROR_INVALID_SERVICENAME: DWORD = 1213;
+pub const ERROR_INVALID_NETNAME: DWORD = 1214;
+pub const ERROR_INVALID_SHARENAME: DWORD = 1215;
+pub const ERROR_INVALID_PASSWORDNAME: DWORD = 1216;
+pub const ERROR_INVALID_MESSAGENAME: DWORD = 1217;
+pub const ERROR_INVALID_MESSAGEDEST: DWORD = 1218;
+pub const ERROR_SESSION_CREDENTIAL_CONFLICT: DWORD = 1219;
+pub const ERROR_REMOTE_SESSION_LIMIT_EXCEEDED: DWORD = 1220;
+pub const ERROR_DUP_DOMAINNAME: DWORD = 1221;
+pub const ERROR_NO_NETWORK: DWORD = 1222;
+pub const ERROR_CANCELLED: DWORD = 1223;
+pub const ERROR_USER_MAPPED_FILE: DWORD = 1224;
+pub const ERROR_CONNECTION_REFUSED: DWORD = 1225;
+pub const ERROR_GRACEFUL_DISCONNECT: DWORD = 1226;
+pub const ERROR_ADDRESS_ALREADY_ASSOCIATED: DWORD = 1227;
+pub const ERROR_ADDRESS_NOT_ASSOCIATED: DWORD = 1228;
+pub const ERROR_CONNECTION_INVALID: DWORD = 1229;
+pub const ERROR_CONNECTION_ACTIVE: DWORD = 1230;
+pub const ERROR_NETWORK_UNREACHABLE: DWORD = 1231;
+pub const ERROR_HOST_UNREACHABLE: DWORD = 1232;
+pub const ERROR_PROTOCOL_UNREACHABLE: DWORD = 1233;
+pub const ERROR_PORT_UNREACHABLE: DWORD = 1234;
+pub const ERROR_REQUEST_ABORTED: DWORD = 1235;
+pub const ERROR_CONNECTION_ABORTED: DWORD = 1236;
+pub const ERROR_RETRY: DWORD = 1237;
+pub const ERROR_CONNECTION_COUNT_LIMIT: DWORD = 1238;
+pub const ERROR_LOGIN_TIME_RESTRICTION: DWORD = 1239;
+pub const ERROR_LOGIN_WKSTA_RESTRICTION: DWORD = 1240;
+pub const ERROR_INCORRECT_ADDRESS: DWORD = 1241;
+pub const ERROR_ALREADY_REGISTERED: DWORD = 1242;
+pub const ERROR_SERVICE_NOT_FOUND: DWORD = 1243;
+pub const ERROR_NOT_AUTHENTICATED: DWORD = 1244;
+pub const ERROR_NOT_LOGGED_ON: DWORD = 1245;
+pub const ERROR_CONTINUE: DWORD = 1246;
+pub const ERROR_ALREADY_INITIALIZED: DWORD = 1247;
+pub const ERROR_NO_MORE_DEVICES: DWORD = 1248;
+pub const ERROR_NO_SUCH_SITE: DWORD = 1249;
+pub const ERROR_DOMAIN_CONTROLLER_EXISTS: DWORD = 1250;
+pub const ERROR_ONLY_IF_CONNECTED: DWORD = 1251;
+pub const ERROR_OVERRIDE_NOCHANGES: DWORD = 1252;
+pub const ERROR_BAD_USER_PROFILE: DWORD = 1253;
+pub const ERROR_NOT_SUPPORTED_ON_SBS: DWORD = 1254;
+pub const ERROR_SERVER_SHUTDOWN_IN_PROGRESS: DWORD = 1255;
+pub const ERROR_HOST_DOWN: DWORD = 1256;
+pub const ERROR_NON_ACCOUNT_SID: DWORD = 1257;
+pub const ERROR_NON_DOMAIN_SID: DWORD = 1258;
+pub const ERROR_APPHELP_BLOCK: DWORD = 1259;
+pub const ERROR_ACCESS_DISABLED_BY_POLICY: DWORD = 1260;
+pub const ERROR_REG_NAT_CONSUMPTION: DWORD = 1261;
+pub const ERROR_CSCSHARE_OFFLINE: DWORD = 1262;
+pub const ERROR_PKINIT_FAILURE: DWORD = 1263;
+pub const ERROR_SMARTCARD_SUBSYSTEM_FAILURE: DWORD = 1264;
+pub const ERROR_DOWNGRADE_DETECTED: DWORD = 1265;
+pub const ERROR_MACHINE_LOCKED: DWORD = 1271;
+pub const ERROR_CALLBACK_SUPPLIED_INVALID_DATA: DWORD = 1273;
+pub const ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED: DWORD = 1274;
+pub const ERROR_DRIVER_BLOCKED: DWORD = 1275;
+pub const ERROR_INVALID_IMPORT_OF_NON_DLL: DWORD = 1276;
+pub const ERROR_ACCESS_DISABLED_WEBBLADE: DWORD = 1277;
+pub const ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER: DWORD = 1278;
+pub const ERROR_RECOVERY_FAILURE: DWORD = 1279;
+pub const ERROR_ALREADY_FIBER: DWORD = 1280;
+pub const ERROR_ALREADY_THREAD: DWORD = 1281;
+pub const ERROR_STACK_BUFFER_OVERRUN: DWORD = 1282;
+pub const ERROR_PARAMETER_QUOTA_EXCEEDED: DWORD = 1283;
+pub const ERROR_DEBUGGER_INACTIVE: DWORD = 1284;
+pub const ERROR_DELAY_LOAD_FAILED: DWORD = 1285;
+pub const ERROR_VDM_DISALLOWED: DWORD = 1286;
+pub const ERROR_UNIDENTIFIED_ERROR: DWORD = 1287;
+pub const ERROR_NOT_ALL_ASSIGNED: DWORD = 1300;
+pub const ERROR_SOME_NOT_MAPPED: DWORD = 1301;
+pub const ERROR_NO_QUOTAS_FOR_ACCOUNT: DWORD = 1302;
+pub const ERROR_LOCAL_USER_SESSION_KEY: DWORD = 1303;
+pub const ERROR_NULL_LM_PASSWORD: DWORD = 1304;
+pub const ERROR_UNKNOWN_REVISION: DWORD = 1305;
+pub const ERROR_REVISION_MISMATCH: DWORD = 1306;
+pub const ERROR_INVALID_OWNER: DWORD = 1307;
+pub const ERROR_INVALID_PRIMARY_GROUP: DWORD = 1308;
+pub const ERROR_NO_IMPERSONATION_TOKEN: DWORD = 1309;
+pub const ERROR_CANT_DISABLE_MANDATORY: DWORD = 1310;
+pub const ERROR_NO_LOGON_SERVERS: DWORD = 1311;
+pub const ERROR_NO_SUCH_LOGON_SESSION: DWORD = 1312;
+pub const ERROR_NO_SUCH_PRIVILEGE: DWORD = 1313;
+pub const ERROR_PRIVILEGE_NOT_HELD: DWORD = 1314;
+pub const ERROR_INVALID_ACCOUNT_NAME: DWORD = 1315;
+pub const ERROR_USER_EXISTS: DWORD = 1316;
+pub const ERROR_NO_SUCH_USER: DWORD = 1317;
+pub const ERROR_GROUP_EXISTS: DWORD = 1318;
+pub const ERROR_NO_SUCH_GROUP: DWORD = 1319;
+pub const ERROR_MEMBER_IN_GROUP: DWORD = 1320;
+pub const ERROR_MEMBER_NOT_IN_GROUP: DWORD = 1321;
+pub const ERROR_LAST_ADMIN: DWORD = 1322;
+pub const ERROR_WRONG_PASSWORD: DWORD = 1323;
+pub const ERROR_ILL_FORMED_PASSWORD: DWORD = 1324;
+pub const ERROR_PASSWORD_RESTRICTION: DWORD = 1325;
+pub const ERROR_LOGON_FAILURE: DWORD = 1326;
+pub const ERROR_ACCOUNT_RESTRICTION: DWORD = 1327;
+pub const ERROR_INVALID_LOGON_HOURS: DWORD = 1328;
+pub const ERROR_INVALID_WORKSTATION: DWORD = 1329;
+pub const ERROR_PASSWORD_EXPIRED: DWORD = 1330;
+pub const ERROR_ACCOUNT_DISABLED: DWORD = 1331;
+pub const ERROR_NONE_MAPPED: DWORD = 1332;
+pub const ERROR_TOO_MANY_LUIDS_REQUESTED: DWORD = 1333;
+pub const ERROR_LUIDS_EXHAUSTED: DWORD = 1334;
+pub const ERROR_INVALID_SUB_AUTHORITY: DWORD = 1335;
+pub const ERROR_INVALID_ACL: DWORD = 1336;
+pub const ERROR_INVALID_SID: DWORD = 1337;
+pub const ERROR_INVALID_SECURITY_DESCR: DWORD = 1338;
+pub const ERROR_BAD_INHERITANCE_ACL: DWORD = 1340;
+pub const ERROR_SERVER_DISABLED: DWORD = 1341;
+pub const ERROR_SERVER_NOT_DISABLED: DWORD = 1342;
+pub const ERROR_INVALID_ID_AUTHORITY: DWORD = 1343;
+pub const ERROR_ALLOTTED_SPACE_EXCEEDED: DWORD = 1344;
+pub const ERROR_INVALID_GROUP_ATTRIBUTES: DWORD = 1345;
+pub const ERROR_BAD_IMPERSONATION_LEVEL: DWORD = 1346;
+pub const ERROR_CANT_OPEN_ANONYMOUS: DWORD = 1347;
+pub const ERROR_BAD_VALIDATION_CLASS: DWORD = 1348;
+pub const ERROR_BAD_TOKEN_TYPE: DWORD = 1349;
+pub const ERROR_NO_SECURITY_ON_OBJECT: DWORD = 1350;
+pub const ERROR_CANT_ACCESS_DOMAIN_INFO: DWORD = 1351;
+pub const ERROR_INVALID_SERVER_STATE: DWORD = 1352;
+pub const ERROR_INVALID_DOMAIN_STATE: DWORD = 1353;
+pub const ERROR_INVALID_DOMAIN_ROLE: DWORD = 1354;
+pub const ERROR_NO_SUCH_DOMAIN: DWORD = 1355;
+pub const ERROR_DOMAIN_EXISTS: DWORD = 1356;
+pub const ERROR_DOMAIN_LIMIT_EXCEEDED: DWORD = 1357;
+pub const ERROR_INTERNAL_DB_CORRUPTION: DWORD = 1358;
+pub const ERROR_INTERNAL_ERROR: DWORD = 1359;
+pub const ERROR_GENERIC_NOT_MAPPED: DWORD = 1360;
+pub const ERROR_BAD_DESCRIPTOR_FORMAT: DWORD = 1361;
+pub const ERROR_NOT_LOGON_PROCESS: DWORD = 1362;
+pub const ERROR_LOGON_SESSION_EXISTS: DWORD = 1363;
+pub const ERROR_NO_SUCH_PACKAGE: DWORD = 1364;
+pub const ERROR_BAD_LOGON_SESSION_STATE: DWORD = 1365;
+pub const ERROR_LOGON_SESSION_COLLISION: DWORD = 1366;
+pub const ERROR_INVALID_LOGON_TYPE: DWORD = 1367;
+pub const ERROR_CANNOT_IMPERSONATE: DWORD = 1368;
+pub const ERROR_RXACT_INVALID_STATE: DWORD = 1369;
+pub const ERROR_RXACT_COMMIT_FAILURE: DWORD = 1370;
+pub const ERROR_SPECIAL_ACCOUNT: DWORD = 1371;
+pub const ERROR_SPECIAL_GROUP: DWORD = 1372;
+pub const ERROR_SPECIAL_USER: DWORD = 1373;
+pub const ERROR_MEMBERS_PRIMARY_GROUP: DWORD = 1374;
+pub const ERROR_TOKEN_ALREADY_IN_USE: DWORD = 1375;
+pub const ERROR_NO_SUCH_ALIAS: DWORD = 1376;
+pub const ERROR_MEMBER_NOT_IN_ALIAS: DWORD = 1377;
+pub const ERROR_MEMBER_IN_ALIAS: DWORD = 1378;
+pub const ERROR_ALIAS_EXISTS: DWORD = 1379;
+pub const ERROR_LOGON_NOT_GRANTED: DWORD = 1380;
+pub const ERROR_TOO_MANY_SECRETS: DWORD = 1381;
+pub const ERROR_SECRET_TOO_LONG: DWORD = 1382;
+pub const ERROR_INTERNAL_DB_ERROR: DWORD = 1383;
+pub const ERROR_TOO_MANY_CONTEXT_IDS: DWORD = 1384;
+pub const ERROR_LOGON_TYPE_NOT_GRANTED: DWORD = 1385;
+pub const ERROR_NT_CROSS_ENCRYPTION_REQUIRED: DWORD = 1386;
+pub const ERROR_NO_SUCH_MEMBER: DWORD = 1387;
+pub const ERROR_INVALID_MEMBER: DWORD = 1388;
+pub const ERROR_TOO_MANY_SIDS: DWORD = 1389;
+pub const ERROR_LM_CROSS_ENCRYPTION_REQUIRED: DWORD = 1390;
+pub const ERROR_NO_INHERITANCE: DWORD = 1391;
+pub const ERROR_FILE_CORRUPT: DWORD = 1392;
+pub const ERROR_DISK_CORRUPT: DWORD = 1393;
+pub const ERROR_NO_USER_SESSION_KEY: DWORD = 1394;
+pub const ERROR_LICENSE_QUOTA_EXCEEDED: DWORD = 1395;
+pub const ERROR_WRONG_TARGET_NAME: DWORD = 1396;
+pub const ERROR_MUTUAL_AUTH_FAILED: DWORD = 1397;
+pub const ERROR_TIME_SKEW: DWORD = 1398;
+pub const ERROR_CURRENT_DOMAIN_NOT_ALLOWED: DWORD = 1399;
+pub const ERROR_INVALID_WINDOW_HANDLE: DWORD = 1400;
+pub const ERROR_INVALID_MENU_HANDLE: DWORD = 1401;
+pub const ERROR_INVALID_CURSOR_HANDLE: DWORD = 1402;
+pub const ERROR_INVALID_ACCEL_HANDLE: DWORD = 1403;
+pub const ERROR_INVALID_HOOK_HANDLE: DWORD = 1404;
+pub const ERROR_INVALID_DWP_HANDLE: DWORD = 1405;
+pub const ERROR_TLW_WITH_WSCHILD: DWORD = 1406;
+pub const ERROR_CANNOT_FIND_WND_CLASS: DWORD = 1407;
+pub const ERROR_WINDOW_OF_OTHER_THREAD: DWORD = 1408;
+pub const ERROR_HOTKEY_ALREADY_REGISTERED: DWORD = 1409;
+pub const ERROR_CLASS_ALREADY_EXISTS: DWORD = 1410;
+pub const ERROR_CLASS_DOES_NOT_EXIST: DWORD = 1411;
+pub const ERROR_CLASS_HAS_WINDOWS: DWORD = 1412;
+pub const ERROR_INVALID_INDEX: DWORD = 1413;
+pub const ERROR_INVALID_ICON_HANDLE: DWORD = 1414;
+pub const ERROR_PRIVATE_DIALOG_INDEX: DWORD = 1415;
+pub const ERROR_LISTBOX_ID_NOT_FOUND: DWORD = 1416;
+pub const ERROR_NO_WILDCARD_CHARACTERS: DWORD = 1417;
+pub const ERROR_CLIPBOARD_NOT_OPEN: DWORD = 1418;
+pub const ERROR_HOTKEY_NOT_REGISTERED: DWORD = 1419;
+pub const ERROR_WINDOW_NOT_DIALOG: DWORD = 1420;
+pub const ERROR_CONTROL_ID_NOT_FOUND: DWORD = 1421;
+pub const ERROR_INVALID_COMBOBOX_MESSAGE: DWORD = 1422;
+pub const ERROR_WINDOW_NOT_COMBOBOX: DWORD = 1423;
+pub const ERROR_INVALID_EDIT_HEIGHT: DWORD = 1424;
+pub const ERROR_DC_NOT_FOUND: DWORD = 1425;
+pub const ERROR_INVALID_HOOK_FILTER: DWORD = 1426;
+pub const ERROR_INVALID_FILTER_PROC: DWORD = 1427;
+pub const ERROR_HOOK_NEEDS_HMOD: DWORD = 1428;
+pub const ERROR_GLOBAL_ONLY_HOOK: DWORD = 1429;
+pub const ERROR_JOURNAL_HOOK_SET: DWORD = 1430;
+pub const ERROR_HOOK_NOT_INSTALLED: DWORD = 1431;
+pub const ERROR_INVALID_LB_MESSAGE: DWORD = 1432;
+pub const ERROR_SETCOUNT_ON_BAD_LB: DWORD = 1433;
+pub const ERROR_LB_WITHOUT_TABSTOPS: DWORD = 1434;
+pub const ERROR_DESTROY_OBJECT_OF_OTHER_THREAD: DWORD = 1435;
+pub const ERROR_CHILD_WINDOW_MENU: DWORD = 1436;
+pub const ERROR_NO_SYSTEM_MENU: DWORD = 1437;
+pub const ERROR_INVALID_MSGBOX_STYLE: DWORD = 1438;
+pub const ERROR_INVALID_SPI_VALUE: DWORD = 1439;
+pub const ERROR_SCREEN_ALREADY_LOCKED: DWORD = 1440;
+pub const ERROR_HWNDS_HAVE_DIFF_PARENT: DWORD = 1441;
+pub const ERROR_NOT_CHILD_WINDOW: DWORD = 1442;
+pub const ERROR_INVALID_GW_COMMAND: DWORD = 1443;
+pub const ERROR_INVALID_THREAD_ID: DWORD = 1444;
+pub const ERROR_NON_MDICHILD_WINDOW: DWORD = 1445;
+pub const ERROR_POPUP_ALREADY_ACTIVE: DWORD = 1446;
+pub const ERROR_NO_SCROLLBARS: DWORD = 1447;
+pub const ERROR_INVALID_SCROLLBAR_RANGE: DWORD = 1448;
+pub const ERROR_INVALID_SHOWWIN_COMMAND: DWORD = 1449;
+pub const ERROR_NO_SYSTEM_RESOURCES: DWORD = 1450;
+pub const ERROR_NONPAGED_SYSTEM_RESOURCES: DWORD = 1451;
+pub const ERROR_PAGED_SYSTEM_RESOURCES: DWORD = 1452;
+pub const ERROR_WORKING_SET_QUOTA: DWORD = 1453;
+pub const ERROR_PAGEFILE_QUOTA: DWORD = 1454;
+pub const ERROR_COMMITMENT_LIMIT: DWORD = 1455;
+pub const ERROR_MENU_ITEM_NOT_FOUND: DWORD = 1456;
+pub const ERROR_INVALID_KEYBOARD_HANDLE: DWORD = 1457;
+pub const ERROR_HOOK_TYPE_NOT_ALLOWED: DWORD = 1458;
+pub const ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION: DWORD = 1459;
+pub const ERROR_TIMEOUT: DWORD = 1460;
+pub const ERROR_INVALID_MONITOR_HANDLE: DWORD = 1461;
+pub const ERROR_INCORRECT_SIZE: DWORD = 1462;
+pub const ERROR_SYMLINK_CLASS_DISABLED: DWORD = 1463;
+pub const ERROR_SYMLINK_NOT_SUPPORTED: DWORD = 1464;
+pub const ERROR_XML_PARSE_ERROR: DWORD = 1465;
+pub const ERROR_XMLDSIG_ERROR: DWORD = 1466;
+pub const ERROR_RESTART_APPLICATION: DWORD = 1467;
+pub const ERROR_WRONG_COMPARTMENT: DWORD = 1468;
+pub const ERROR_AUTHIP_FAILURE: DWORD = 1469;
+pub const ERROR_NO_NVRAM_RESOURCES: DWORD = 1470;
+pub const ERROR_NOT_GUI_PROCESS: DWORD = 1471;
+pub const ERROR_EVENTLOG_FILE_CORRUPT: DWORD = 1500;
+pub const ERROR_EVENTLOG_CANT_START: DWORD = 1501;
+pub const ERROR_LOG_FILE_FULL: DWORD = 1502;
+pub const ERROR_EVENTLOG_FILE_CHANGED: DWORD = 1503;
+pub const ERROR_INSTALL_SERVICE_FAILURE: DWORD = 1601;
+pub const ERROR_INSTALL_USEREXIT: DWORD = 1602;
+pub const ERROR_INSTALL_FAILURE: DWORD = 1603;
+pub const ERROR_INSTALL_SUSPEND: DWORD = 1604;
+pub const ERROR_UNKNOWN_PRODUCT: DWORD = 1605;
+pub const ERROR_UNKNOWN_FEATURE: DWORD = 1606;
+pub const ERROR_UNKNOWN_COMPONENT: DWORD = 1607;
+pub const ERROR_UNKNOWN_PROPERTY: DWORD = 1608;
+pub const ERROR_INVALID_HANDLE_STATE: DWORD = 1609;
+pub const ERROR_BAD_CONFIGURATION: DWORD = 1610;
+pub const ERROR_INDEX_ABSENT: DWORD = 1611;
+pub const ERROR_INSTALL_SOURCE_ABSENT: DWORD = 1612;
+pub const ERROR_INSTALL_PACKAGE_VERSION: DWORD = 1613;
+pub const ERROR_PRODUCT_UNINSTALLED: DWORD = 1614;
+pub const ERROR_BAD_QUERY_SYNTAX: DWORD = 1615;
+pub const ERROR_INVALID_FIELD: DWORD = 1616;
+pub const ERROR_DEVICE_REMOVED: DWORD = 1617;
+pub const ERROR_INSTALL_ALREADY_RUNNING: DWORD = 1618;
+pub const ERROR_INSTALL_PACKAGE_OPEN_FAILED: DWORD = 1619;
+pub const ERROR_INSTALL_PACKAGE_INVALID: DWORD = 1620;
+pub const ERROR_INSTALL_UI_FAILURE: DWORD = 1621;
+pub const ERROR_INSTALL_LOG_FAILURE: DWORD = 1622;
+pub const ERROR_INSTALL_LANGUAGE_UNSUPPORTED: DWORD = 1623;
+pub const ERROR_INSTALL_TRANSFORM_FAILURE: DWORD = 1624;
+pub const ERROR_INSTALL_PACKAGE_REJECTED: DWORD = 1625;
+pub const ERROR_FUNCTION_NOT_CALLED: DWORD = 1626;
+pub const ERROR_FUNCTION_FAILED: DWORD = 1627;
+pub const ERROR_INVALID_TABLE: DWORD = 1628;
+pub const ERROR_DATATYPE_MISMATCH: DWORD = 1629;
+pub const ERROR_UNSUPPORTED_TYPE: DWORD = 1630;
+pub const ERROR_CREATE_FAILED: DWORD = 1631;
+pub const ERROR_INSTALL_TEMP_UNWRITABLE: DWORD = 1632;
+pub const ERROR_INSTALL_PLATFORM_UNSUPPORTED: DWORD = 1633;
+pub const ERROR_INSTALL_NOTUSED: DWORD = 1634;
+pub const ERROR_PATCH_PACKAGE_OPEN_FAILED: DWORD = 1635;
+pub const ERROR_PATCH_PACKAGE_INVALID: DWORD = 1636;
+pub const ERROR_PATCH_PACKAGE_UNSUPPORTED: DWORD = 1637;
+pub const ERROR_PRODUCT_VERSION: DWORD = 1638;
+pub const ERROR_INVALID_COMMAND_LINE: DWORD = 1639;
+pub const ERROR_INSTALL_REMOTE_DISALLOWED: DWORD = 1640;
+pub const ERROR_SUCCESS_REBOOT_INITIATED: DWORD = 1641;
+pub const ERROR_PATCH_TARGET_NOT_FOUND: DWORD = 1642;
+pub const ERROR_PATCH_PACKAGE_REJECTED: DWORD = 1643;
+pub const ERROR_INSTALL_TRANSFORM_REJECTED: DWORD = 1644;
+pub const ERROR_INSTALL_REMOTE_PROHIBITED: DWORD = 1645;
+pub const ERROR_INVALID_USER_BUFFER: DWORD = 1784;
+pub const ERROR_UNRECOGNIZED_MEDIA: DWORD = 1785;
+pub const ERROR_NO_TRUST_LSA_SECRET: DWORD = 1786;
+pub const ERROR_NO_TRUST_SAM_ACCOUNT: DWORD = 1787;
+pub const ERROR_TRUSTED_DOMAIN_FAILURE: DWORD = 1788;
+pub const ERROR_TRUSTED_RELATIONSHIP_FAILURE: DWORD = 1789;
+pub const ERROR_TRUST_FAILURE: DWORD = 1790;
+pub const ERROR_NETLOGON_NOT_STARTED: DWORD = 1792;
+pub const ERROR_ACCOUNT_EXPIRED: DWORD = 1793;
+pub const ERROR_REDIRECTOR_HAS_OPEN_HANDLES: DWORD = 1794;
+pub const ERROR_PRINTER_DRIVER_ALREADY_INSTALLED: DWORD = 1795;
+pub const ERROR_UNKNOWN_PORT: DWORD = 1796;
+pub const ERROR_UNKNOWN_PRINTER_DRIVER: DWORD = 1797;
+pub const ERROR_UNKNOWN_PRINTPROCESSOR: DWORD = 1798;
+pub const ERROR_INVALID_SEPARATOR_FILE: DWORD = 1799;
+pub const ERROR_INVALID_PRIORITY: DWORD = 1800;
+pub const ERROR_INVALID_PRINTER_NAME: DWORD = 1801;
+pub const ERROR_PRINTER_ALREADY_EXISTS: DWORD = 1802;
+pub const ERROR_INVALID_PRINTER_COMMAND: DWORD = 1803;
+pub const ERROR_INVALID_DATATYPE: DWORD = 1804;
+pub const ERROR_INVALID_ENVIRONMENT: DWORD = 1805;
+pub const ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT: DWORD = 1807;
+pub const ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT: DWORD = 1808;
+pub const ERROR_NOLOGON_SERVER_TRUST_ACCOUNT: DWORD = 1809;
+pub const ERROR_DOMAIN_TRUST_INCONSISTENT: DWORD = 1810;
+pub const ERROR_SERVER_HAS_OPEN_HANDLES: DWORD = 1811;
+pub const ERROR_RESOURCE_DATA_NOT_FOUND: DWORD = 1812;
+pub const ERROR_RESOURCE_TYPE_NOT_FOUND: DWORD = 1813;
+pub const ERROR_RESOURCE_NAME_NOT_FOUND: DWORD = 1814;
+pub const ERROR_RESOURCE_LANG_NOT_FOUND: DWORD = 1815;
+pub const ERROR_NOT_ENOUGH_QUOTA: DWORD = 1816;
+pub const ERROR_INVALID_TIME: DWORD = 1901;
+pub const ERROR_INVALID_FORM_NAME: DWORD = 1902;
+pub const ERROR_INVALID_FORM_SIZE: DWORD = 1903;
+pub const ERROR_ALREADY_WAITING: DWORD = 1904;
+pub const ERROR_PRINTER_DELETED: DWORD = 1905;
+pub const ERROR_INVALID_PRINTER_STATE: DWORD = 1906;
+pub const ERROR_PASSWORD_MUST_CHANGE: DWORD = 1907;
+pub const ERROR_DOMAIN_CONTROLLER_NOT_FOUND: DWORD = 1908;
+pub const ERROR_ACCOUNT_LOCKED_OUT: DWORD = 1909;
+pub const ERROR_NO_SITENAME: DWORD = 1919;
+pub const ERROR_CANT_ACCESS_FILE: DWORD = 1920;
+pub const ERROR_CANT_RESOLVE_FILENAME: DWORD = 1921;
+pub const ERROR_KM_DRIVER_BLOCKED: DWORD = 1930;
+pub const ERROR_CONTEXT_EXPIRED: DWORD = 1931;
+pub const ERROR_PER_USER_TRUST_QUOTA_EXCEEDED: DWORD = 1932;
+pub const ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED: DWORD = 1933;
+pub const ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED: DWORD = 1934;
+pub const ERROR_AUTHENTICATION_FIREWALL_FAILED: DWORD = 1935;
+pub const ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED: DWORD = 1936;
+pub const ERROR_INVALID_PIXEL_FORMAT: DWORD = 2000;
+pub const ERROR_BAD_DRIVER: DWORD = 2001;
+pub const ERROR_INVALID_WINDOW_STYLE: DWORD = 2002;
+pub const ERROR_METAFILE_NOT_SUPPORTED: DWORD = 2003;
+pub const ERROR_TRANSFORM_NOT_SUPPORTED: DWORD = 2004;
+pub const ERROR_CLIPPING_NOT_SUPPORTED: DWORD = 2005;
+pub const ERROR_INVALID_CMM: DWORD = 2010;
+pub const ERROR_INVALID_PROFILE: DWORD = 2011;
+pub const ERROR_TAG_NOT_FOUND: DWORD = 2012;
+pub const ERROR_TAG_NOT_PRESENT: DWORD = 2013;
+pub const ERROR_DUPLICATE_TAG: DWORD = 2014;
+pub const ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE: DWORD = 2015;
+pub const ERROR_PROFILE_NOT_FOUND: DWORD = 2016;
+pub const ERROR_INVALID_COLORSPACE: DWORD = 2017;
+pub const ERROR_ICM_NOT_ENABLED: DWORD = 2018;
+pub const ERROR_DELETING_ICM_XFORM: DWORD = 2019;
+pub const ERROR_INVALID_TRANSFORM: DWORD = 2020;
+pub const ERROR_COLORSPACE_MISMATCH: DWORD = 2021;
+pub const ERROR_INVALID_COLORINDEX: DWORD = 2022;
+pub const ERROR_CONNECTED_OTHER_PASSWORD: DWORD = 2108;
+pub const ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT: DWORD = 2109;
+pub const ERROR_BAD_USERNAME: DWORD = 2202;
+pub const ERROR_NOT_CONNECTED: DWORD = 2250;
+pub const ERROR_OPEN_FILES: DWORD = 2401;
+pub const ERROR_ACTIVE_CONNECTIONS: DWORD = 2402;
+pub const ERROR_DEVICE_IN_USE: DWORD = 2404;
+pub const ERROR_UNKNOWN_PRINT_MONITOR: DWORD = 3000;
+pub const ERROR_PRINTER_DRIVER_IN_USE: DWORD = 3001;
+pub const ERROR_SPOOL_FILE_NOT_FOUND: DWORD = 3002;
+pub const ERROR_SPL_NO_STARTDOC: DWORD = 3003;
+pub const ERROR_SPL_NO_ADDJOB: DWORD = 3004;
+pub const ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED: DWORD = 3005;
+pub const ERROR_PRINT_MONITOR_ALREADY_INSTALLED: DWORD = 3006;
+pub const ERROR_INVALID_PRINT_MONITOR: DWORD = 3007;
+pub const ERROR_PRINT_MONITOR_IN_USE: DWORD = 3008;
+pub const ERROR_PRINTER_HAS_JOBS_QUEUED: DWORD = 3009;
+pub const ERROR_SUCCESS_REBOOT_REQUIRED: DWORD = 3010;
+pub const ERROR_SUCCESS_RESTART_REQUIRED: DWORD = 3011;
+pub const ERROR_PRINTER_NOT_FOUND: DWORD = 3012;
+pub const ERROR_PRINTER_DRIVER_WARNED: DWORD = 3013;
+pub const ERROR_PRINTER_DRIVER_BLOCKED: DWORD = 3014;
+pub const ERROR_WINS_INTERNAL: DWORD = 4000;
+pub const ERROR_CAN_NOT_DEL_LOCAL_WINS: DWORD = 4001;
+pub const ERROR_STATIC_INIT: DWORD = 4002;
+pub const ERROR_INC_BACKUP: DWORD = 4003;
+pub const ERROR_FULL_BACKUP: DWORD = 4004;
+pub const ERROR_REC_NON_EXISTENT: DWORD = 4005;
+pub const ERROR_RPL_NOT_ALLOWED: DWORD = 4006;
+pub const ERROR_DHCP_ADDRESS_CONFLICT: DWORD = 4100;
+pub const ERROR_WMI_GUID_NOT_FOUND: DWORD = 4200;
+pub const ERROR_WMI_INSTANCE_NOT_FOUND: DWORD = 4201;
+pub const ERROR_WMI_ITEMID_NOT_FOUND: DWORD = 4202;
+pub const ERROR_WMI_TRY_AGAIN: DWORD = 4203;
+pub const ERROR_WMI_DP_NOT_FOUND: DWORD = 4204;
+pub const ERROR_WMI_UNRESOLVED_INSTANCE_REF: DWORD = 4205;
+pub const ERROR_WMI_ALREADY_ENABLED: DWORD = 4206;
+pub const ERROR_WMI_GUID_DISCONNECTED: DWORD = 4207;
+pub const ERROR_WMI_SERVER_UNAVAILABLE: DWORD = 4208;
+pub const ERROR_WMI_DP_FAILED: DWORD = 4209;
+pub const ERROR_WMI_INVALID_MOF: DWORD = 4210;
+pub const ERROR_WMI_INVALID_REGINFO: DWORD = 4211;
+pub const ERROR_WMI_ALREADY_DISABLED: DWORD = 4212;
+pub const ERROR_WMI_READ_ONLY: DWORD = 4213;
+pub const ERROR_WMI_SET_FAILURE: DWORD = 4214;
+pub const ERROR_INVALID_MEDIA: DWORD = 4300;
+pub const ERROR_INVALID_LIBRARY: DWORD = 4301;
+pub const ERROR_INVALID_MEDIA_POOL: DWORD = 4302;
+pub const ERROR_DRIVE_MEDIA_MISMATCH: DWORD = 4303;
+pub const ERROR_MEDIA_OFFLINE: DWORD = 4304;
+pub const ERROR_LIBRARY_OFFLINE: DWORD = 4305;
+pub const ERROR_EMPTY: DWORD = 4306;
+pub const ERROR_NOT_EMPTY: DWORD = 4307;
+pub const ERROR_MEDIA_UNAVAILABLE: DWORD = 4308;
+pub const ERROR_RESOURCE_DISABLED: DWORD = 4309;
+pub const ERROR_INVALID_CLEANER: DWORD = 4310;
+pub const ERROR_UNABLE_TO_CLEAN: DWORD = 4311;
+pub const ERROR_OBJECT_NOT_FOUND: DWORD = 4312;
+pub const ERROR_DATABASE_FAILURE: DWORD = 4313;
+pub const ERROR_DATABASE_FULL: DWORD = 4314;
+pub const ERROR_MEDIA_INCOMPATIBLE: DWORD = 4315;
+pub const ERROR_RESOURCE_NOT_PRESENT: DWORD = 4316;
+pub const ERROR_INVALID_OPERATION: DWORD = 4317;
+pub const ERROR_MEDIA_NOT_AVAILABLE: DWORD = 4318;
+pub const ERROR_DEVICE_NOT_AVAILABLE: DWORD = 4319;
+pub const ERROR_REQUEST_REFUSED: DWORD = 4320;
+pub const ERROR_INVALID_DRIVE_OBJECT: DWORD = 4321;
+pub const ERROR_LIBRARY_FULL: DWORD = 4322;
+pub const ERROR_MEDIUM_NOT_ACCESSIBLE: DWORD = 4323;
+pub const ERROR_UNABLE_TO_LOAD_MEDIUM: DWORD = 4324;
+pub const ERROR_UNABLE_TO_INVENTORY_DRIVE: DWORD = 4325;
+pub const ERROR_UNABLE_TO_INVENTORY_SLOT: DWORD = 4326;
+pub const ERROR_UNABLE_TO_INVENTORY_TRANSPORT: DWORD = 4327;
+pub const ERROR_TRANSPORT_FULL: DWORD = 4328;
+pub const ERROR_CONTROLLING_IEPORT: DWORD = 4329;
+pub const ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA: DWORD = 4330;
+pub const ERROR_CLEANER_SLOT_SET: DWORD = 4331;
+pub const ERROR_CLEANER_SLOT_NOT_SET: DWORD = 4332;
+pub const ERROR_CLEANER_CARTRIDGE_SPENT: DWORD = 4333;
+pub const ERROR_UNEXPECTED_OMID: DWORD = 4334;
+pub const ERROR_CANT_DELETE_LAST_ITEM: DWORD = 4335;
+pub const ERROR_MESSAGE_EXCEEDS_MAX_SIZE: DWORD = 4336;
+pub const ERROR_VOLUME_CONTAINS_SYS_FILES: DWORD = 4337;
+pub const ERROR_INDIGENOUS_TYPE: DWORD = 4338;
+pub const ERROR_NO_SUPPORTING_DRIVES: DWORD = 4339;
+pub const ERROR_CLEANER_CARTRIDGE_INSTALLED: DWORD = 4340;
+pub const ERROR_IEPORT_FULL: DWORD = 4341;
+pub const ERROR_FILE_OFFLINE: DWORD = 4350;
+pub const ERROR_REMOTE_STORAGE_NOT_ACTIVE: DWORD = 4351;
+pub const ERROR_REMOTE_STORAGE_MEDIA_ERROR: DWORD = 4352;
+pub const ERROR_NOT_A_REPARSE_POINT: DWORD = 4390;
+pub const ERROR_REPARSE_ATTRIBUTE_CONFLICT: DWORD = 4391;
+pub const ERROR_INVALID_REPARSE_DATA: DWORD = 4392;
+pub const ERROR_REPARSE_TAG_INVALID: DWORD = 4393;
+pub const ERROR_REPARSE_TAG_MISMATCH: DWORD = 4394;
+pub const ERROR_VOLUME_NOT_SIS_ENABLED: DWORD = 4500;
+pub const ERROR_DEPENDENT_RESOURCE_EXISTS: DWORD = 5001;
+pub const ERROR_DEPENDENCY_NOT_FOUND: DWORD = 5002;
+pub const ERROR_DEPENDENCY_ALREADY_EXISTS: DWORD = 5003;
+pub const ERROR_RESOURCE_NOT_ONLINE: DWORD = 5004;
+pub const ERROR_HOST_NODE_NOT_AVAILABLE: DWORD = 5005;
+pub const ERROR_RESOURCE_NOT_AVAILABLE: DWORD = 5006;
+pub const ERROR_RESOURCE_NOT_FOUND: DWORD = 5007;
+pub const ERROR_SHUTDOWN_CLUSTER: DWORD = 5008;
+pub const ERROR_CANT_EVICT_ACTIVE_NODE: DWORD = 5009;
+pub const ERROR_OBJECT_ALREADY_EXISTS: DWORD = 5010;
+pub const ERROR_OBJECT_IN_LIST: DWORD = 5011;
+pub const ERROR_GROUP_NOT_AVAILABLE: DWORD = 5012;
+pub const ERROR_GROUP_NOT_FOUND: DWORD = 5013;
+pub const ERROR_GROUP_NOT_ONLINE: DWORD = 5014;
+pub const ERROR_HOST_NODE_NOT_RESOURCE_OWNER: DWORD = 5015;
+pub const ERROR_HOST_NODE_NOT_GROUP_OWNER: DWORD = 5016;
+pub const ERROR_RESMON_CREATE_FAILED: DWORD = 5017;
+pub const ERROR_RESMON_ONLINE_FAILED: DWORD = 5018;
+pub const ERROR_RESOURCE_ONLINE: DWORD = 5019;
+pub const ERROR_QUORUM_RESOURCE: DWORD = 5020;
+pub const ERROR_NOT_QUORUM_CAPABLE: DWORD = 5021;
+pub const ERROR_CLUSTER_SHUTTING_DOWN: DWORD = 5022;
+pub const ERROR_INVALID_STATE: DWORD = 5023;
+pub const ERROR_RESOURCE_PROPERTIES_STORED: DWORD = 5024;
+pub const ERROR_NOT_QUORUM_CLASS: DWORD = 5025;
+pub const ERROR_CORE_RESOURCE: DWORD = 5026;
+pub const ERROR_QUORUM_RESOURCE_ONLINE_FAILED: DWORD = 5027;
+pub const ERROR_QUORUMLOG_OPEN_FAILED: DWORD = 5028;
+pub const ERROR_CLUSTERLOG_CORRUPT: DWORD = 5029;
+pub const ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE: DWORD = 5030;
+pub const ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE: DWORD = 5031;
+pub const ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND: DWORD = 5032;
+pub const ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE: DWORD = 5033;
+pub const ERROR_QUORUM_OWNER_ALIVE: DWORD = 5034;
+pub const ERROR_NETWORK_NOT_AVAILABLE: DWORD = 5035;
+pub const ERROR_NODE_NOT_AVAILABLE: DWORD = 5036;
+pub const ERROR_ALL_NODES_NOT_AVAILABLE: DWORD = 5037;
+pub const ERROR_RESOURCE_FAILED: DWORD = 5038;
+pub const ERROR_CLUSTER_INVALID_NODE: DWORD = 5039;
+pub const ERROR_CLUSTER_NODE_EXISTS: DWORD = 5040;
+pub const ERROR_CLUSTER_JOIN_IN_PROGRESS: DWORD = 5041;
+pub const ERROR_CLUSTER_NODE_NOT_FOUND: DWORD = 5042;
+pub const ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND: DWORD = 5043;
+pub const ERROR_CLUSTER_NETWORK_EXISTS: DWORD = 5044;
+pub const ERROR_CLUSTER_NETWORK_NOT_FOUND: DWORD = 5045;
+pub const ERROR_CLUSTER_NETINTERFACE_EXISTS: DWORD = 5046;
+pub const ERROR_CLUSTER_NETINTERFACE_NOT_FOUND: DWORD = 5047;
+pub const ERROR_CLUSTER_INVALID_REQUEST: DWORD = 5048;
+pub const ERROR_CLUSTER_INVALID_NETWORK_PROVIDER: DWORD = 5049;
+pub const ERROR_CLUSTER_NODE_DOWN: DWORD = 5050;
+pub const ERROR_CLUSTER_NODE_UNREACHABLE: DWORD = 5051;
+pub const ERROR_CLUSTER_NODE_NOT_MEMBER: DWORD = 5052;
+pub const ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS: DWORD = 5053;
+pub const ERROR_CLUSTER_INVALID_NETWORK: DWORD = 5054;
+pub const ERROR_CLUSTER_NODE_UP: DWORD = 5056;
+pub const ERROR_CLUSTER_IPADDR_IN_USE: DWORD = 5057;
+pub const ERROR_CLUSTER_NODE_NOT_PAUSED: DWORD = 5058;
+pub const ERROR_CLUSTER_NO_SECURITY_CONTEXT: DWORD = 5059;
+pub const ERROR_CLUSTER_NETWORK_NOT_INTERNAL: DWORD = 5060;
+pub const ERROR_CLUSTER_NODE_ALREADY_UP: DWORD = 5061;
+pub const ERROR_CLUSTER_NODE_ALREADY_DOWN: DWORD = 5062;
+pub const ERROR_CLUSTER_NETWORK_ALREADY_ONLINE: DWORD = 5063;
+pub const ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE: DWORD = 5064;
+pub const ERROR_CLUSTER_NODE_ALREADY_MEMBER: DWORD = 5065;
+pub const ERROR_CLUSTER_LAST_INTERNAL_NETWORK: DWORD = 5066;
+pub const ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS: DWORD = 5067;
+pub const ERROR_INVALID_OPERATION_ON_QUORUM: DWORD = 5068;
+pub const ERROR_DEPENDENCY_NOT_ALLOWED: DWORD = 5069;
+pub const ERROR_CLUSTER_NODE_PAUSED: DWORD = 5070;
+pub const ERROR_NODE_CANT_HOST_RESOURCE: DWORD = 5071;
+pub const ERROR_CLUSTER_NODE_NOT_READY: DWORD = 5072;
+pub const ERROR_CLUSTER_NODE_SHUTTING_DOWN: DWORD = 5073;
+pub const ERROR_CLUSTER_JOIN_ABORTED: DWORD = 5074;
+pub const ERROR_CLUSTER_INCOMPATIBLE_VERSIONS: DWORD = 5075;
+pub const ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED: DWORD = 5076;
+pub const ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED: DWORD = 5077;
+pub const ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND: DWORD = 5078;
+pub const ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED: DWORD = 5079;
+pub const ERROR_CLUSTER_RESNAME_NOT_FOUND: DWORD = 5080;
+pub const ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED: DWORD = 5081;
+pub const ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST: DWORD = 5082;
+pub const ERROR_CLUSTER_DATABASE_SEQMISMATCH: DWORD = 5083;
+pub const ERROR_RESMON_INVALID_STATE: DWORD = 5084;
+pub const ERROR_CLUSTER_GUM_NOT_LOCKER: DWORD = 5085;
+pub const ERROR_QUORUM_DISK_NOT_FOUND: DWORD = 5086;
+pub const ERROR_DATABASE_BACKUP_CORRUPT: DWORD = 5087;
+pub const ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT: DWORD = 5088;
+pub const ERROR_RESOURCE_PROPERTY_UNCHANGEABLE: DWORD = 5089;
+pub const ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE: DWORD = 5890;
+pub const ERROR_CLUSTER_QUORUMLOG_NOT_FOUND: DWORD = 5891;
+pub const ERROR_CLUSTER_MEMBERSHIP_HALT: DWORD = 5892;
+pub const ERROR_CLUSTER_INSTANCE_ID_MISMATCH: DWORD = 5893;
+pub const ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP: DWORD = 5894;
+pub const ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH: DWORD = 5895;
+pub const ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP: DWORD = 5896;
+pub const ERROR_CLUSTER_PARAMETER_MISMATCH: DWORD = 5897;
+pub const ERROR_NODE_CANNOT_BE_CLUSTERED: DWORD = 5898;
+pub const ERROR_CLUSTER_WRONG_OS_VERSION: DWORD = 5899;
+pub const ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME: DWORD = 5900;
+pub const ERROR_CLUSCFG_ALREADY_COMMITTED: DWORD = 5901;
+pub const ERROR_CLUSCFG_ROLLBACK_FAILED: DWORD = 5902;
+pub const ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT: DWORD = 5903;
+pub const ERROR_CLUSTER_OLD_VERSION: DWORD = 5904;
+pub const ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME: DWORD = 5905;
+pub const ERROR_ENCRYPTION_FAILED: DWORD = 6000;
+pub const ERROR_DECRYPTION_FAILED: DWORD = 6001;
+pub const ERROR_FILE_ENCRYPTED: DWORD = 6002;
+pub const ERROR_NO_RECOVERY_POLICY: DWORD = 6003;
+pub const ERROR_NO_EFS: DWORD = 6004;
+pub const ERROR_WRONG_EFS: DWORD = 6005;
+pub const ERROR_NO_USER_KEYS: DWORD = 6006;
+pub const ERROR_FILE_NOT_ENCRYPTED: DWORD = 6007;
+pub const ERROR_NOT_EXPORT_FORMAT: DWORD = 6008;
+pub const ERROR_FILE_READ_ONLY: DWORD = 6009;
+pub const ERROR_DIR_EFS_DISALLOWED: DWORD = 6010;
+pub const ERROR_EFS_SERVER_NOT_TRUSTED: DWORD = 6011;
+pub const ERROR_BAD_RECOVERY_POLICY: DWORD = 6012;
+pub const ERROR_EFS_ALG_BLOB_TOO_BIG: DWORD = 6013;
+pub const ERROR_VOLUME_NOT_SUPPORT_EFS: DWORD = 6014;
+pub const ERROR_EFS_DISABLED: DWORD = 6015;
+pub const ERROR_EFS_VERSION_NOT_SUPPORT: DWORD = 6016;
+pub const ERROR_NO_BROWSER_SERVERS_FOUND: DWORD = 6118;
+pub const ERROR_CTX_WINSTATION_NAME_INVALID: DWORD = 7001;
+pub const ERROR_CTX_INVALID_PD: DWORD = 7002;
+pub const ERROR_CTX_PD_NOT_FOUND: DWORD = 7003;
+pub const ERROR_CTX_WD_NOT_FOUND: DWORD = 7004;
+pub const ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY: DWORD = 7005;
+pub const ERROR_CTX_SERVICE_NAME_COLLISION: DWORD = 7006;
+pub const ERROR_CTX_CLOSE_PENDING: DWORD = 7007;
+pub const ERROR_CTX_NO_OUTBUF: DWORD = 7008;
+pub const ERROR_CTX_MODEM_INF_NOT_FOUND: DWORD = 7009;
+pub const ERROR_CTX_INVALID_MODEMNAME: DWORD = 7010;
+pub const ERROR_CTX_MODEM_RESPONSE_ERROR: DWORD = 7011;
+pub const ERROR_CTX_MODEM_RESPONSE_TIMEOUT: DWORD = 7012;
+pub const ERROR_CTX_MODEM_RESPONSE_NO_CARRIER: DWORD = 7013;
+pub const ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE: DWORD = 7014;
+pub const ERROR_CTX_MODEM_RESPONSE_BUSY: DWORD = 7015;
+pub const ERROR_CTX_MODEM_RESPONSE_VOICE: DWORD = 7016;
+pub const ERROR_CTX_TD_ERROR: DWORD = 7017;
+pub const ERROR_CTX_WINSTATION_NOT_FOUND: DWORD = 7022;
+pub const ERROR_CTX_WINSTATION_ALREADY_EXISTS: DWORD = 7023;
+pub const ERROR_CTX_WINSTATION_BUSY: DWORD = 7024;
+pub const ERROR_CTX_BAD_VIDEO_MODE: DWORD = 7025;
+pub const ERROR_CTX_GRAPHICS_INVALID: DWORD = 7035;
+pub const ERROR_CTX_LOGON_DISABLED: DWORD = 7037;
+pub const ERROR_CTX_NOT_CONSOLE: DWORD = 7038;
+pub const ERROR_CTX_CLIENT_QUERY_TIMEOUT: DWORD = 7040;
+pub const ERROR_CTX_CONSOLE_DISCONNECT: DWORD = 7041;
+pub const ERROR_CTX_CONSOLE_CONNECT: DWORD = 7042;
+pub const ERROR_CTX_SHADOW_DENIED: DWORD = 7044;
+pub const ERROR_CTX_WINSTATION_ACCESS_DENIED: DWORD = 7045;
+pub const ERROR_CTX_INVALID_WD: DWORD = 7049;
+pub const ERROR_CTX_SHADOW_INVALID: DWORD = 7050;
+pub const ERROR_CTX_SHADOW_DISABLED: DWORD = 7051;
+pub const ERROR_CTX_CLIENT_LICENSE_IN_USE: DWORD = 7052;
+pub const ERROR_CTX_CLIENT_LICENSE_NOT_SET: DWORD = 7053;
+pub const ERROR_CTX_LICENSE_NOT_AVAILABLE: DWORD = 7054;
+pub const ERROR_CTX_LICENSE_CLIENT_INVALID: DWORD = 7055;
+pub const ERROR_CTX_LICENSE_EXPIRED: DWORD = 7056;
+pub const ERROR_CTX_SHADOW_NOT_RUNNING: DWORD = 7057;
+pub const ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE: DWORD = 7058;
+pub const ERROR_ACTIVATION_COUNT_EXCEEDED: DWORD = 7059;
+pub const ERROR_DS_NOT_INSTALLED: DWORD = 8200;
+pub const ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY: DWORD = 8201;
+pub const ERROR_DS_NO_ATTRIBUTE_OR_VALUE: DWORD = 8202;
+pub const ERROR_DS_INVALID_ATTRIBUTE_SYNTAX: DWORD = 8203;
+pub const ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED: DWORD = 8204;
+pub const ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS: DWORD = 8205;
+pub const ERROR_DS_BUSY: DWORD = 8206;
+pub const ERROR_DS_UNAVAILABLE: DWORD = 8207;
+pub const ERROR_DS_NO_RIDS_ALLOCATED: DWORD = 8208;
+pub const ERROR_DS_NO_MORE_RIDS: DWORD = 8209;
+pub const ERROR_DS_INCORRECT_ROLE_OWNER: DWORD = 8210;
+pub const ERROR_DS_RIDMGR_INIT_ERROR: DWORD = 8211;
+pub const ERROR_DS_OBJ_CLASS_VIOLATION: DWORD = 8212;
+pub const ERROR_DS_CANT_ON_NON_LEAF: DWORD = 8213;
+pub const ERROR_DS_CANT_ON_RDN: DWORD = 8214;
+pub const ERROR_DS_CANT_MOD_OBJ_CLASS: DWORD = 8215;
+pub const ERROR_DS_CROSS_DOM_MOVE_ERROR: DWORD = 8216;
+pub const ERROR_DS_GC_NOT_AVAILABLE: DWORD = 8217;
+pub const ERROR_SHARED_POLICY: DWORD = 8218;
+pub const ERROR_POLICY_OBJECT_NOT_FOUND: DWORD = 8219;
+pub const ERROR_POLICY_ONLY_IN_DS: DWORD = 8220;
+pub const ERROR_PROMOTION_ACTIVE: DWORD = 8221;
+pub const ERROR_NO_PROMOTION_ACTIVE: DWORD = 8222;
+pub const ERROR_DS_OPERATIONS_ERROR: DWORD = 8224;
+pub const ERROR_DS_PROTOCOL_ERROR: DWORD = 8225;
+pub const ERROR_DS_TIMELIMIT_EXCEEDED: DWORD = 8226;
+pub const ERROR_DS_SIZELIMIT_EXCEEDED: DWORD = 8227;
+pub const ERROR_DS_ADMIN_LIMIT_EXCEEDED: DWORD = 8228;
+pub const ERROR_DS_COMPARE_FALSE: DWORD = 8229;
+pub const ERROR_DS_COMPARE_TRUE: DWORD = 8230;
+pub const ERROR_DS_AUTH_METHOD_NOT_SUPPORTED: DWORD = 8231;
+pub const ERROR_DS_STRONG_AUTH_REQUIRED: DWORD = 8232;
+pub const ERROR_DS_INAPPROPRIATE_AUTH: DWORD = 8233;
+pub const ERROR_DS_AUTH_UNKNOWN: DWORD = 8234;
+pub const ERROR_DS_REFERRAL: DWORD = 8235;
+pub const ERROR_DS_UNAVAILABLE_CRIT_EXTENSION: DWORD = 8236;
+pub const ERROR_DS_CONFIDENTIALITY_REQUIRED: DWORD = 8237;
+pub const ERROR_DS_INAPPROPRIATE_MATCHING: DWORD = 8238;
+pub const ERROR_DS_CONSTRAINT_VIOLATION: DWORD = 8239;
+pub const ERROR_DS_NO_SUCH_OBJECT: DWORD = 8240;
+pub const ERROR_DS_ALIAS_PROBLEM: DWORD = 8241;
+pub const ERROR_DS_INVALID_DN_SYNTAX: DWORD = 8242;
+pub const ERROR_DS_IS_LEAF: DWORD = 8243;
+pub const ERROR_DS_ALIAS_DEREF_PROBLEM: DWORD = 8244;
+pub const ERROR_DS_UNWILLING_TO_PERFORM: DWORD = 8245;
+pub const ERROR_DS_LOOP_DETECT: DWORD = 8246;
+pub const ERROR_DS_NAMING_VIOLATION: DWORD = 8247;
+pub const ERROR_DS_OBJECT_RESULTS_TOO_LARGE: DWORD = 8248;
+pub const ERROR_DS_AFFECTS_MULTIPLE_DSAS: DWORD = 8249;
+pub const ERROR_DS_SERVER_DOWN: DWORD = 8250;
+pub const ERROR_DS_LOCAL_ERROR: DWORD = 8251;
+pub const ERROR_DS_ENCODING_ERROR: DWORD = 8252;
+pub const ERROR_DS_DECODING_ERROR: DWORD = 8253;
+pub const ERROR_DS_FILTER_UNKNOWN: DWORD = 8254;
+pub const ERROR_DS_PARAM_ERROR: DWORD = 8255;
+pub const ERROR_DS_NOT_SUPPORTED: DWORD = 8256;
+pub const ERROR_DS_NO_RESULTS_RETURNED: DWORD = 8257;
+pub const ERROR_DS_CONTROL_NOT_FOUND: DWORD = 8258;
+pub const ERROR_DS_CLIENT_LOOP: DWORD = 8259;
+pub const ERROR_DS_REFERRAL_LIMIT_EXCEEDED: DWORD = 8260;
+pub const ERROR_DS_SORT_CONTROL_MISSING: DWORD = 8261;
+pub const ERROR_DS_OFFSET_RANGE_ERROR: DWORD = 8262;
+pub const ERROR_DS_ROOT_MUST_BE_NC: DWORD = 8301;
+pub const ERROR_DS_ADD_REPLICA_INHIBITED: DWORD = 8302;
+pub const ERROR_DS_ATT_NOT_DEF_IN_SCHEMA: DWORD = 8303;
+pub const ERROR_DS_MAX_OBJ_SIZE_EXCEEDED: DWORD = 8304;
+pub const ERROR_DS_OBJ_STRING_NAME_EXISTS: DWORD = 8305;
+pub const ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA: DWORD = 8306;
+pub const ERROR_DS_RDN_DOESNT_MATCH_SCHEMA: DWORD = 8307;
+pub const ERROR_DS_NO_REQUESTED_ATTS_FOUND: DWORD = 8308;
+pub const ERROR_DS_USER_BUFFER_TO_SMALL: DWORD = 8309;
+pub const ERROR_DS_ATT_IS_NOT_ON_OBJ: DWORD = 8310;
+pub const ERROR_DS_ILLEGAL_MOD_OPERATION: DWORD = 8311;
+pub const ERROR_DS_OBJ_TOO_LARGE: DWORD = 8312;
+pub const ERROR_DS_BAD_INSTANCE_TYPE: DWORD = 8313;
+pub const ERROR_DS_MASTERDSA_REQUIRED: DWORD = 8314;
+pub const ERROR_DS_OBJECT_CLASS_REQUIRED: DWORD = 8315;
+pub const ERROR_DS_MISSING_REQUIRED_ATT: DWORD = 8316;
+pub const ERROR_DS_ATT_NOT_DEF_FOR_CLASS: DWORD = 8317;
+pub const ERROR_DS_ATT_ALREADY_EXISTS: DWORD = 8318;
+pub const ERROR_DS_CANT_ADD_ATT_VALUES: DWORD = 8320;
+pub const ERROR_DS_SINGLE_VALUE_CONSTRAINT: DWORD = 8321;
+pub const ERROR_DS_RANGE_CONSTRAINT: DWORD = 8322;
+pub const ERROR_DS_ATT_VAL_ALREADY_EXISTS: DWORD = 8323;
+pub const ERROR_DS_CANT_REM_MISSING_ATT: DWORD = 8324;
+pub const ERROR_DS_CANT_REM_MISSING_ATT_VAL: DWORD = 8325;
+pub const ERROR_DS_ROOT_CANT_BE_SUBREF: DWORD = 8326;
+pub const ERROR_DS_NO_CHAINING: DWORD = 8327;
+pub const ERROR_DS_NO_CHAINED_EVAL: DWORD = 8328;
+pub const ERROR_DS_NO_PARENT_OBJECT: DWORD = 8329;
+pub const ERROR_DS_PARENT_IS_AN_ALIAS: DWORD = 8330;
+pub const ERROR_DS_CANT_MIX_MASTER_AND_REPS: DWORD = 8331;
+pub const ERROR_DS_CHILDREN_EXIST: DWORD = 8332;
+pub const ERROR_DS_OBJ_NOT_FOUND: DWORD = 8333;
+pub const ERROR_DS_ALIASED_OBJ_MISSING: DWORD = 8334;
+pub const ERROR_DS_BAD_NAME_SYNTAX: DWORD = 8335;
+pub const ERROR_DS_ALIAS_POINTS_TO_ALIAS: DWORD = 8336;
+pub const ERROR_DS_CANT_DEREF_ALIAS: DWORD = 8337;
+pub const ERROR_DS_OUT_OF_SCOPE: DWORD = 8338;
+pub const ERROR_DS_OBJECT_BEING_REMOVED: DWORD = 8339;
+pub const ERROR_DS_CANT_DELETE_DSA_OBJ: DWORD = 8340;
+pub const ERROR_DS_GENERIC_ERROR: DWORD = 8341;
+pub const ERROR_DS_DSA_MUST_BE_INT_MASTER: DWORD = 8342;
+pub const ERROR_DS_CLASS_NOT_DSA: DWORD = 8343;
+pub const ERROR_DS_INSUFF_ACCESS_RIGHTS: DWORD = 8344;
+pub const ERROR_DS_ILLEGAL_SUPERIOR: DWORD = 8345;
+pub const ERROR_DS_ATTRIBUTE_OWNED_BY_SAM: DWORD = 8346;
+pub const ERROR_DS_NAME_TOO_MANY_PARTS: DWORD = 8347;
+pub const ERROR_DS_NAME_TOO_LONG: DWORD = 8348;
+pub const ERROR_DS_NAME_VALUE_TOO_LONG: DWORD = 8349;
+pub const ERROR_DS_NAME_UNPARSEABLE: DWORD = 8350;
+pub const ERROR_DS_NAME_TYPE_UNKNOWN: DWORD = 8351;
+pub const ERROR_DS_NOT_AN_OBJECT: DWORD = 8352;
+pub const ERROR_DS_SEC_DESC_TOO_SHORT: DWORD = 8353;
+pub const ERROR_DS_SEC_DESC_INVALID: DWORD = 8354;
+pub const ERROR_DS_NO_DELETED_NAME: DWORD = 8355;
+pub const ERROR_DS_SUBREF_MUST_HAVE_PARENT: DWORD = 8356;
+pub const ERROR_DS_NCNAME_MUST_BE_NC: DWORD = 8357;
+pub const ERROR_DS_CANT_ADD_SYSTEM_ONLY: DWORD = 8358;
+pub const ERROR_DS_CLASS_MUST_BE_CONCRETE: DWORD = 8359;
+pub const ERROR_DS_INVALID_DMD: DWORD = 8360;
+pub const ERROR_DS_OBJ_GUID_EXISTS: DWORD = 8361;
+pub const ERROR_DS_NOT_ON_BACKLINK: DWORD = 8362;
+pub const ERROR_DS_NO_CROSSREF_FOR_NC: DWORD = 8363;
+pub const ERROR_DS_SHUTTING_DOWN: DWORD = 8364;
+pub const ERROR_DS_UNKNOWN_OPERATION: DWORD = 8365;
+pub const ERROR_DS_INVALID_ROLE_OWNER: DWORD = 8366;
+pub const ERROR_DS_COULDNT_CONTACT_FSMO: DWORD = 8367;
+pub const ERROR_DS_CROSS_NC_DN_RENAME: DWORD = 8368;
+pub const ERROR_DS_CANT_MOD_SYSTEM_ONLY: DWORD = 8369;
+pub const ERROR_DS_REPLICATOR_ONLY: DWORD = 8370;
+pub const ERROR_DS_OBJ_CLASS_NOT_DEFINED: DWORD = 8371;
+pub const ERROR_DS_OBJ_CLASS_NOT_SUBCLASS: DWORD = 8372;
+pub const ERROR_DS_NAME_REFERENCE_INVALID: DWORD = 8373;
+pub const ERROR_DS_CROSS_REF_EXISTS: DWORD = 8374;
+pub const ERROR_DS_CANT_DEL_MASTER_CROSSREF: DWORD = 8375;
+pub const ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD: DWORD = 8376;
+pub const ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX: DWORD = 8377;
+pub const ERROR_DS_DUP_RDN: DWORD = 8378;
+pub const ERROR_DS_DUP_OID: DWORD = 8379;
+pub const ERROR_DS_DUP_MAPI_ID: DWORD = 8380;
+pub const ERROR_DS_DUP_SCHEMA_ID_GUID: DWORD = 8381;
+pub const ERROR_DS_DUP_LDAP_DISPLAY_NAME: DWORD = 8382;
+pub const ERROR_DS_SEMANTIC_ATT_TEST: DWORD = 8383;
+pub const ERROR_DS_SYNTAX_MISMATCH: DWORD = 8384;
+pub const ERROR_DS_EXISTS_IN_MUST_HAVE: DWORD = 8385;
+pub const ERROR_DS_EXISTS_IN_MAY_HAVE: DWORD = 8386;
+pub const ERROR_DS_NONEXISTENT_MAY_HAVE: DWORD = 8387;
+pub const ERROR_DS_NONEXISTENT_MUST_HAVE: DWORD = 8388;
+pub const ERROR_DS_AUX_CLS_TEST_FAIL: DWORD = 8389;
+pub const ERROR_DS_NONEXISTENT_POSS_SUP: DWORD = 8390;
+pub const ERROR_DS_SUB_CLS_TEST_FAIL: DWORD = 8391;
+pub const ERROR_DS_BAD_RDN_ATT_ID_SYNTAX: DWORD = 8392;
+pub const ERROR_DS_EXISTS_IN_AUX_CLS: DWORD = 8393;
+pub const ERROR_DS_EXISTS_IN_SUB_CLS: DWORD = 8394;
+pub const ERROR_DS_EXISTS_IN_POSS_SUP: DWORD = 8395;
+pub const ERROR_DS_RECALCSCHEMA_FAILED: DWORD = 8396;
+pub const ERROR_DS_TREE_DELETE_NOT_FINISHED: DWORD = 8397;
+pub const ERROR_DS_CANT_DELETE: DWORD = 8398;
+pub const ERROR_DS_ATT_SCHEMA_REQ_ID: DWORD = 8399;
+pub const ERROR_DS_BAD_ATT_SCHEMA_SYNTAX: DWORD = 8400;
+pub const ERROR_DS_CANT_CACHE_ATT: DWORD = 8401;
+pub const ERROR_DS_CANT_CACHE_CLASS: DWORD = 8402;
+pub const ERROR_DS_CANT_REMOVE_ATT_CACHE: DWORD = 8403;
+pub const ERROR_DS_CANT_REMOVE_CLASS_CACHE: DWORD = 8404;
+pub const ERROR_DS_CANT_RETRIEVE_DN: DWORD = 8405;
+pub const ERROR_DS_MISSING_SUPREF: DWORD = 8406;
+pub const ERROR_DS_CANT_RETRIEVE_INSTANCE: DWORD = 8407;
+pub const ERROR_DS_CODE_INCONSISTENCY: DWORD = 8408;
+pub const ERROR_DS_DATABASE_ERROR: DWORD = 8409;
+pub const ERROR_DS_GOVERNSID_MISSING: DWORD = 8410;
+pub const ERROR_DS_MISSING_EXPECTED_ATT: DWORD = 8411;
+pub const ERROR_DS_NCNAME_MISSING_CR_REF: DWORD = 8412;
+pub const ERROR_DS_SECURITY_CHECKING_ERROR: DWORD = 8413;
+pub const ERROR_DS_SCHEMA_NOT_LOADED: DWORD = 8414;
+pub const ERROR_DS_SCHEMA_ALLOC_FAILED: DWORD = 8415;
+pub const ERROR_DS_ATT_SCHEMA_REQ_SYNTAX: DWORD = 8416;
+pub const ERROR_DS_GCVERIFY_ERROR: DWORD = 8417;
+pub const ERROR_DS_DRA_SCHEMA_MISMATCH: DWORD = 8418;
+pub const ERROR_DS_CANT_FIND_DSA_OBJ: DWORD = 8419;
+pub const ERROR_DS_CANT_FIND_EXPECTED_NC: DWORD = 8420;
+pub const ERROR_DS_CANT_FIND_NC_IN_CACHE: DWORD = 8421;
+pub const ERROR_DS_CANT_RETRIEVE_CHILD: DWORD = 8422;
+pub const ERROR_DS_SECURITY_ILLEGAL_MODIFY: DWORD = 8423;
+pub const ERROR_DS_CANT_REPLACE_HIDDEN_REC: DWORD = 8424;
+pub const ERROR_DS_BAD_HIERARCHY_FILE: DWORD = 8425;
+pub const ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED: DWORD = 8426;
+pub const ERROR_DS_CONFIG_PARAM_MISSING: DWORD = 8427;
+pub const ERROR_DS_COUNTING_AB_INDICES_FAILED: DWORD = 8428;
+pub const ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED: DWORD = 8429;
+pub const ERROR_DS_INTERNAL_FAILURE: DWORD = 8430;
+pub const ERROR_DS_UNKNOWN_ERROR: DWORD = 8431;
+pub const ERROR_DS_ROOT_REQUIRES_CLASS_TOP: DWORD = 8432;
+pub const ERROR_DS_REFUSING_FSMO_ROLES: DWORD = 8433;
+pub const ERROR_DS_MISSING_FSMO_SETTINGS: DWORD = 8434;
+pub const ERROR_DS_UNABLE_TO_SURRENDER_ROLES: DWORD = 8435;
+pub const ERROR_DS_DRA_GENERIC: DWORD = 8436;
+pub const ERROR_DS_DRA_INVALID_PARAMETER: DWORD = 8437;
+pub const ERROR_DS_DRA_BUSY: DWORD = 8438;
+pub const ERROR_DS_DRA_BAD_DN: DWORD = 8439;
+pub const ERROR_DS_DRA_BAD_NC: DWORD = 8440;
+pub const ERROR_DS_DRA_DN_EXISTS: DWORD = 8441;
+pub const ERROR_DS_DRA_INTERNAL_ERROR: DWORD = 8442;
+pub const ERROR_DS_DRA_INCONSISTENT_DIT: DWORD = 8443;
+pub const ERROR_DS_DRA_CONNECTION_FAILED: DWORD = 8444;
+pub const ERROR_DS_DRA_BAD_INSTANCE_TYPE: DWORD = 8445;
+pub const ERROR_DS_DRA_OUT_OF_MEM: DWORD = 8446;
+pub const ERROR_DS_DRA_MAIL_PROBLEM: DWORD = 8447;
+pub const ERROR_DS_DRA_REF_ALREADY_EXISTS: DWORD = 8448;
+pub const ERROR_DS_DRA_REF_NOT_FOUND: DWORD = 8449;
+pub const ERROR_DS_DRA_OBJ_IS_REP_SOURCE: DWORD = 8450;
+pub const ERROR_DS_DRA_DB_ERROR: DWORD = 8451;
+pub const ERROR_DS_DRA_NO_REPLICA: DWORD = 8452;
+pub const ERROR_DS_DRA_ACCESS_DENIED: DWORD = 8453;
+pub const ERROR_DS_DRA_NOT_SUPPORTED: DWORD = 8454;
+pub const ERROR_DS_DRA_RPC_CANCELLED: DWORD = 8455;
+pub const ERROR_DS_DRA_SOURCE_DISABLED: DWORD = 8456;
+pub const ERROR_DS_DRA_SINK_DISABLED: DWORD = 8457;
+pub const ERROR_DS_DRA_NAME_COLLISION: DWORD = 8458;
+pub const ERROR_DS_DRA_SOURCE_REINSTALLED: DWORD = 8459;
+pub const ERROR_DS_DRA_MISSING_PARENT: DWORD = 8460;
+pub const ERROR_DS_DRA_PREEMPTED: DWORD = 8461;
+pub const ERROR_DS_DRA_ABANDON_SYNC: DWORD = 8462;
+pub const ERROR_DS_DRA_SHUTDOWN: DWORD = 8463;
+pub const ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET: DWORD = 8464;
+pub const ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA: DWORD = 8465;
+pub const ERROR_DS_DRA_EXTN_CONNECTION_FAILED: DWORD = 8466;
+pub const ERROR_DS_INSTALL_SCHEMA_MISMATCH: DWORD = 8467;
+pub const ERROR_DS_DUP_LINK_ID: DWORD = 8468;
+pub const ERROR_DS_NAME_ERROR_RESOLVING: DWORD = 8469;
+pub const ERROR_DS_NAME_ERROR_NOT_FOUND: DWORD = 8470;
+pub const ERROR_DS_NAME_ERROR_NOT_UNIQUE: DWORD = 8471;
+pub const ERROR_DS_NAME_ERROR_NO_MAPPING: DWORD = 8472;
+pub const ERROR_DS_NAME_ERROR_DOMAIN_ONLY: DWORD = 8473;
+pub const ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING: DWORD = 8474;
+pub const ERROR_DS_CONSTRUCTED_ATT_MOD: DWORD = 8475;
+pub const ERROR_DS_WRONG_OM_OBJ_CLASS: DWORD = 8476;
+pub const ERROR_DS_DRA_REPL_PENDING: DWORD = 8477;
+pub const ERROR_DS_DS_REQUIRED: DWORD = 8478;
+pub const ERROR_DS_INVALID_LDAP_DISPLAY_NAME: DWORD = 8479;
+pub const ERROR_DS_NON_BASE_SEARCH: DWORD = 8480;
+pub const ERROR_DS_CANT_RETRIEVE_ATTS: DWORD = 8481;
+pub const ERROR_DS_BACKLINK_WITHOUT_LINK: DWORD = 8482;
+pub const ERROR_DS_EPOCH_MISMATCH: DWORD = 8483;
+pub const ERROR_DS_SRC_NAME_MISMATCH: DWORD = 8484;
+pub const ERROR_DS_SRC_AND_DST_NC_IDENTICAL: DWORD = 8485;
+pub const ERROR_DS_DST_NC_MISMATCH: DWORD = 8486;
+pub const ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC: DWORD = 8487;
+pub const ERROR_DS_SRC_GUID_MISMATCH: DWORD = 8488;
+pub const ERROR_DS_CANT_MOVE_DELETED_OBJECT: DWORD = 8489;
+pub const ERROR_DS_PDC_OPERATION_IN_PROGRESS: DWORD = 8490;
+pub const ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD: DWORD = 8491;
+pub const ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION: DWORD = 8492;
+pub const ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS: DWORD = 8493;
+pub const ERROR_DS_NC_MUST_HAVE_NC_PARENT: DWORD = 8494;
+pub const ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE: DWORD = 8495;
+pub const ERROR_DS_DST_DOMAIN_NOT_NATIVE: DWORD = 8496;
+pub const ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER: DWORD = 8497;
+pub const ERROR_DS_CANT_MOVE_ACCOUNT_GROUP: DWORD = 8498;
+pub const ERROR_DS_CANT_MOVE_RESOURCE_GROUP: DWORD = 8499;
+pub const ERROR_DS_INVALID_SEARCH_FLAG: DWORD = 8500;
+pub const ERROR_DS_NO_TREE_DELETE_ABOVE_NC: DWORD = 8501;
+pub const ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE: DWORD = 8502;
+pub const ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE: DWORD = 8503;
+pub const ERROR_DS_SAM_INIT_FAILURE: DWORD = 8504;
+pub const ERROR_DS_SENSITIVE_GROUP_VIOLATION: DWORD = 8505;
+pub const ERROR_DS_CANT_MOD_PRIMARYGROUPID: DWORD = 8506;
+pub const ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD: DWORD = 8507;
+pub const ERROR_DS_NONSAFE_SCHEMA_CHANGE: DWORD = 8508;
+pub const ERROR_DS_SCHEMA_UPDATE_DISALLOWED: DWORD = 8509;
+pub const ERROR_DS_CANT_CREATE_UNDER_SCHEMA: DWORD = 8510;
+pub const ERROR_DS_INSTALL_NO_SRC_SCH_VERSION: DWORD = 8511;
+pub const ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE: DWORD = 8512;
+pub const ERROR_DS_INVALID_GROUP_TYPE: DWORD = 8513;
+pub const ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN: DWORD = 8514;
+pub const ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN: DWORD = 8515;
+pub const ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER: DWORD = 8516;
+pub const ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER: DWORD = 8517;
+pub const ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER: DWORD = 8518;
+pub const ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER: DWORD = 8519;
+pub const ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER: DWORD = 8520;
+pub const ERROR_DS_HAVE_PRIMARY_MEMBERS: DWORD = 8521;
+pub const ERROR_DS_STRING_SD_CONVERSION_FAILED: DWORD = 8522;
+pub const ERROR_DS_NAMING_MASTER_GC: DWORD = 8523;
+pub const ERROR_DS_DNS_LOOKUP_FAILURE: DWORD = 8524;
+pub const ERROR_DS_COULDNT_UPDATE_SPNS: DWORD = 8525;
+pub const ERROR_DS_CANT_RETRIEVE_SD: DWORD = 8526;
+pub const ERROR_DS_KEY_NOT_UNIQUE: DWORD = 8527;
+pub const ERROR_DS_WRONG_LINKED_ATT_SYNTAX: DWORD = 8528;
+pub const ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD: DWORD = 8529;
+pub const ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY: DWORD = 8530;
+pub const ERROR_DS_CANT_START: DWORD = 8531;
+pub const ERROR_DS_INIT_FAILURE: DWORD = 8532;
+pub const ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION: DWORD = 8533;
+pub const ERROR_DS_SOURCE_DOMAIN_IN_FOREST: DWORD = 8534;
+pub const ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST: DWORD = 8535;
+pub const ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED: DWORD = 8536;
+pub const ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN: DWORD = 8537;
+pub const ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER: DWORD = 8538;
+pub const ERROR_DS_SRC_SID_EXISTS_IN_FOREST: DWORD = 8539;
+pub const ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH: DWORD = 8540;
+pub const ERROR_SAM_INIT_FAILURE: DWORD = 8541;
+pub const ERROR_DS_DRA_SCHEMA_INFO_SHIP: DWORD = 8542;
+pub const ERROR_DS_DRA_SCHEMA_CONFLICT: DWORD = 8543;
+pub const ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT: DWORD = 8544;
+pub const ERROR_DS_DRA_OBJ_NC_MISMATCH: DWORD = 8545;
+pub const ERROR_DS_NC_STILL_HAS_DSAS: DWORD = 8546;
+pub const ERROR_DS_GC_REQUIRED: DWORD = 8547;
+pub const ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY: DWORD = 8548;
+pub const ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS: DWORD = 8549;
+pub const ERROR_DS_CANT_ADD_TO_GC: DWORD = 8550;
+pub const ERROR_DS_NO_CHECKPOINT_WITH_PDC: DWORD = 8551;
+pub const ERROR_DS_SOURCE_AUDITING_NOT_ENABLED: DWORD = 8552;
+pub const ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC: DWORD = 8553;
+pub const ERROR_DS_INVALID_NAME_FOR_SPN: DWORD = 8554;
+pub const ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS: DWORD = 8555;
+pub const ERROR_DS_UNICODEPWD_NOT_IN_QUOTES: DWORD = 8556;
+pub const ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED: DWORD = 8557;
+pub const ERROR_DS_MUST_BE_RUN_ON_DST_DC: DWORD = 8558;
+pub const ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER: DWORD = 8559;
+pub const ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ: DWORD = 8560;
+pub const ERROR_DS_INIT_FAILURE_CONSOLE: DWORD = 8561;
+pub const ERROR_DS_SAM_INIT_FAILURE_CONSOLE: DWORD = 8562;
+pub const ERROR_DS_FOREST_VERSION_TOO_HIGH: DWORD = 8563;
+pub const ERROR_DS_DOMAIN_VERSION_TOO_HIGH: DWORD = 8564;
+pub const ERROR_DS_FOREST_VERSION_TOO_LOW: DWORD = 8565;
+pub const ERROR_DS_DOMAIN_VERSION_TOO_LOW: DWORD = 8566;
+pub const ERROR_DS_INCOMPATIBLE_VERSION: DWORD = 8567;
+pub const ERROR_DS_LOW_DSA_VERSION: DWORD = 8568;
+pub const ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN: DWORD = 8569;
+pub const ERROR_DS_NOT_SUPPORTED_SORT_ORDER: DWORD = 8570;
+pub const ERROR_DS_NAME_NOT_UNIQUE: DWORD = 8571;
+pub const ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4: DWORD = 8572;
+pub const ERROR_DS_OUT_OF_VERSION_STORE: DWORD = 8573;
+pub const ERROR_DS_INCOMPATIBLE_CONTROLS_USED: DWORD = 8574;
+pub const ERROR_DS_NO_REF_DOMAIN: DWORD = 8575;
+pub const ERROR_DS_RESERVED_LINK_ID: DWORD = 8576;
+pub const ERROR_DS_LINK_ID_NOT_AVAILABLE: DWORD = 8577;
+pub const ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER: DWORD = 8578;
+pub const ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE: DWORD = 8579;
+pub const ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC: DWORD = 8580;
+pub const ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG: DWORD = 8581;
+pub const ERROR_DS_MODIFYDN_WRONG_GRANDPARENT: DWORD = 8582;
+pub const ERROR_DS_NAME_ERROR_TRUST_REFERRAL: DWORD = 8583;
+pub const ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER: DWORD = 8584;
+pub const ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD: DWORD = 8585;
+pub const ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2: DWORD = 8586;
+pub const ERROR_DS_THREAD_LIMIT_EXCEEDED: DWORD = 8587;
+pub const ERROR_DS_NOT_CLOSEST: DWORD = 8588;
+pub const ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF: DWORD = 8589;
+pub const ERROR_DS_SINGLE_USER_MODE_FAILED: DWORD = 8590;
+pub const ERROR_DS_NTDSCRIPT_SYNTAX_ERROR: DWORD = 8591;
+pub const ERROR_DS_NTDSCRIPT_PROCESS_ERROR: DWORD = 8592;
+pub const ERROR_DS_DIFFERENT_REPL_EPOCHS: DWORD = 8593;
+pub const ERROR_DS_DRS_EXTENSIONS_CHANGED: DWORD = 8594;
+pub const ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR: DWORD = 8595;
+pub const ERROR_DS_NO_MSDS_INTID: DWORD = 8596;
+pub const ERROR_DS_DUP_MSDS_INTID: DWORD = 8597;
+pub const ERROR_DS_EXISTS_IN_RDNATTID: DWORD = 8598;
+pub const ERROR_DS_AUTHORIZATION_FAILED: DWORD = 8599;
+pub const ERROR_DS_INVALID_SCRIPT: DWORD = 8600;
+pub const ERROR_DS_REMOTE_CROSSREF_OP_FAILED: DWORD = 8601;
+pub const ERROR_DS_CROSS_REF_BUSY: DWORD = 8602;
+pub const ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN: DWORD = 8603;
+pub const ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC: DWORD = 8604;
+pub const ERROR_DS_DUPLICATE_ID_FOUND: DWORD = 8605;
+pub const ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT: DWORD = 8606;
+pub const ERROR_DS_GROUP_CONVERSION_ERROR: DWORD = 8607;
+pub const ERROR_DS_CANT_MOVE_APP_BASIC_GROUP: DWORD = 8608;
+pub const ERROR_DS_CANT_MOVE_APP_QUERY_GROUP: DWORD = 8609;
+pub const ERROR_DS_ROLE_NOT_VERIFIED: DWORD = 8610;
+pub const ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL: DWORD = 8611;
+pub const ERROR_DS_DOMAIN_RENAME_IN_PROGRESS: DWORD = 8612;
+pub const ERROR_DS_EXISTING_AD_CHILD_NC: DWORD = 8613;
+pub const ERROR_DS_REPL_LIFETIME_EXCEEDED: DWORD = 8614;
+pub const ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER: DWORD = 8615;
+pub const ERROR_DS_LDAP_SEND_QUEUE_FULL: DWORD = 8616;
+pub const ERROR_DS_DRA_OUT_SCHEDULE_WINDOW: DWORD = 8617;
+pub const ERROR_SXS_SECTION_NOT_FOUND: DWORD = 14000;
+pub const ERROR_SXS_CANT_GEN_ACTCTX: DWORD = 14001;
+pub const ERROR_SXS_INVALID_ACTCTXDATA_FORMAT: DWORD = 14002;
+pub const ERROR_SXS_ASSEMBLY_NOT_FOUND: DWORD = 14003;
+pub const ERROR_SXS_MANIFEST_FORMAT_ERROR: DWORD = 14004;
+pub const ERROR_SXS_MANIFEST_PARSE_ERROR: DWORD = 14005;
+pub const ERROR_SXS_ACTIVATION_CONTEXT_DISABLED: DWORD = 14006;
+pub const ERROR_SXS_KEY_NOT_FOUND: DWORD = 14007;
+pub const ERROR_SXS_VERSION_CONFLICT: DWORD = 14008;
+pub const ERROR_SXS_WRONG_SECTION_TYPE: DWORD = 14009;
+pub const ERROR_SXS_THREAD_QUERIES_DISABLED: DWORD = 14010;
+pub const ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET: DWORD = 14011;
+pub const ERROR_SXS_UNKNOWN_ENCODING_GROUP: DWORD = 14012;
+pub const ERROR_SXS_UNKNOWN_ENCODING: DWORD = 14013;
+pub const ERROR_SXS_INVALID_XML_NAMESPACE_URI: DWORD = 14014;
+pub const ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED: DWORD = 14015;
+pub const ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED: DWORD = 14016;
+pub const ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE: DWORD = 14017;
+pub const ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE: DWORD = 14018;
+pub const ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE: DWORD = 14019;
+pub const ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT: DWORD = 14020;
+pub const ERROR_SXS_DUPLICATE_DLL_NAME: DWORD = 14021;
+pub const ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME: DWORD = 14022;
+pub const ERROR_SXS_DUPLICATE_CLSID: DWORD = 14023;
+pub const ERROR_SXS_DUPLICATE_IID: DWORD = 14024;
+pub const ERROR_SXS_DUPLICATE_TLBID: DWORD = 14025;
+pub const ERROR_SXS_DUPLICATE_PROGID: DWORD = 14026;
+pub const ERROR_SXS_DUPLICATE_ASSEMBLY_NAME: DWORD = 14027;
+pub const ERROR_SXS_FILE_HASH_MISMATCH: DWORD = 14028;
+pub const ERROR_SXS_POLICY_PARSE_ERROR: DWORD = 14029;
+pub const ERROR_SXS_XML_E_MISSINGQUOTE: DWORD = 14030;
+pub const ERROR_SXS_XML_E_COMMENTSYNTAX: DWORD = 14031;
+pub const ERROR_SXS_XML_E_BADSTARTNAMECHAR: DWORD = 14032;
+pub const ERROR_SXS_XML_E_BADNAMECHAR: DWORD = 14033;
+pub const ERROR_SXS_XML_E_BADCHARINSTRING: DWORD = 14034;
+pub const ERROR_SXS_XML_E_XMLDECLSYNTAX: DWORD = 14035;
+pub const ERROR_SXS_XML_E_BADCHARDATA: DWORD = 14036;
+pub const ERROR_SXS_XML_E_MISSINGWHITESPACE: DWORD = 14037;
+pub const ERROR_SXS_XML_E_EXPECTINGTAGEND: DWORD = 14038;
+pub const ERROR_SXS_XML_E_MISSINGSEMICOLON: DWORD = 14039;
+pub const ERROR_SXS_XML_E_UNBALANCEDPAREN: DWORD = 14040;
+pub const ERROR_SXS_XML_E_INTERNALERROR: DWORD = 14041;
+pub const ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE: DWORD = 14042;
+pub const ERROR_SXS_XML_E_INCOMPLETE_ENCODING: DWORD = 14043;
+pub const ERROR_SXS_XML_E_MISSING_PAREN: DWORD = 14044;
+pub const ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE: DWORD = 14045;
+pub const ERROR_SXS_XML_E_MULTIPLE_COLONS: DWORD = 14046;
+pub const ERROR_SXS_XML_E_INVALID_DECIMAL: DWORD = 14047;
+pub const ERROR_SXS_XML_E_INVALID_HEXIDECIMAL: DWORD = 14048;
+pub const ERROR_SXS_XML_E_INVALID_UNICODE: DWORD = 14049;
+pub const ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK: DWORD = 14050;
+pub const ERROR_SXS_XML_E_UNEXPECTEDENDTAG: DWORD = 14051;
+pub const ERROR_SXS_XML_E_UNCLOSEDTAG: DWORD = 14052;
+pub const ERROR_SXS_XML_E_DUPLICATEATTRIBUTE: DWORD = 14053;
+pub const ERROR_SXS_XML_E_MULTIPLEROOTS: DWORD = 14054;
+pub const ERROR_SXS_XML_E_INVALIDATROOTLEVEL: DWORD = 14055;
+pub const ERROR_SXS_XML_E_BADXMLDECL: DWORD = 14056;
+pub const ERROR_SXS_XML_E_MISSINGROOT: DWORD = 14057;
+pub const ERROR_SXS_XML_E_UNEXPECTEDEOF: DWORD = 14058;
+pub const ERROR_SXS_XML_E_BADPEREFINSUBSET: DWORD = 14059;
+pub const ERROR_SXS_XML_E_UNCLOSEDSTARTTAG: DWORD = 14060;
+pub const ERROR_SXS_XML_E_UNCLOSEDENDTAG: DWORD = 14061;
+pub const ERROR_SXS_XML_E_UNCLOSEDSTRING: DWORD = 14062;
+pub const ERROR_SXS_XML_E_UNCLOSEDCOMMENT: DWORD = 14063;
+pub const ERROR_SXS_XML_E_UNCLOSEDDECL: DWORD = 14064;
+pub const ERROR_SXS_XML_E_UNCLOSEDCDATA: DWORD = 14065;
+pub const ERROR_SXS_XML_E_RESERVEDNAMESPACE: DWORD = 14066;
+pub const ERROR_SXS_XML_E_INVALIDENCODING: DWORD = 14067;
+pub const ERROR_SXS_XML_E_INVALIDSWITCH: DWORD = 14068;
+pub const ERROR_SXS_XML_E_BADXMLCASE: DWORD = 14069;
+pub const ERROR_SXS_XML_E_INVALID_STANDALONE: DWORD = 14070;
+pub const ERROR_SXS_XML_E_UNEXPECTED_STANDALONE: DWORD = 14071;
+pub const ERROR_SXS_XML_E_INVALID_VERSION: DWORD = 14072;
+pub const ERROR_SXS_XML_E_MISSINGEQUALS: DWORD = 14073;
+pub const ERROR_SXS_PROTECTION_RECOVERY_FAILED: DWORD = 14074;
+pub const ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT: DWORD = 14075;
+pub const ERROR_SXS_PROTECTION_CATALOG_NOT_VALID: DWORD = 14076;
+pub const ERROR_SXS_UNTRANSLATABLE_HRESULT: DWORD = 14077;
+pub const ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING: DWORD = 14078;
+pub const ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE: DWORD = 14079;
+pub const ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME: DWORD = 14080;
+pub const ERROR_SXS_ASSEMBLY_MISSING: DWORD = 14081;
+pub const ERROR_SXS_CORRUPT_ACTIVATION_STACK: DWORD = 14082;
+pub const ERROR_SXS_CORRUPTION: DWORD = 14083;
+pub const ERROR_SXS_EARLY_DEACTIVATION: DWORD = 14084;
+pub const ERROR_SXS_INVALID_DEACTIVATION: DWORD = 14085;
+pub const ERROR_SXS_MULTIPLE_DEACTIVATION: DWORD = 14086;
+pub const ERROR_SXS_PROCESS_TERMINATION_REQUESTED: DWORD = 14087;
+pub const ERROR_SXS_RELEASE_ACTIVATION_CONTEXT: DWORD = 14088;
+pub const ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY: DWORD = 14089;
+pub const ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE: DWORD = 14090;
+pub const ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME: DWORD = 14091;
+pub const ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE: DWORD = 14092;
+pub const ERROR_SXS_IDENTITY_PARSE_ERROR: DWORD = 14093;
+pub const ERROR_MALFORMED_SUBSTITUTION_STRING: DWORD = 14094;
+pub const ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN: DWORD = 14095;
+pub const ERROR_UNMAPPED_SUBSTITUTION_STRING: DWORD = 14096;
+pub const ERROR_SXS_ASSEMBLY_NOT_LOCKED: DWORD = 14097;
+pub const ERROR_SXS_COMPONENT_STORE_CORRUPT: DWORD = 14098;
+pub const ERROR_ADVANCED_INSTALLER_FAILED: DWORD = 14099;
+pub const ERROR_XML_ENCODING_MISMATCH: DWORD = 14100;
+pub const ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT: DWORD = 14101;
+pub const ERROR_SXS_IDENTITIES_DIFFERENT: DWORD = 14102;
+pub const ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT: DWORD = 14103;
+pub const ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY: DWORD = 14104;
+pub const ERROR_SXS_MANIFEST_TOO_BIG: DWORD = 14105;
+pub const ERROR_SXS_SETTING_NOT_REGISTERED: DWORD = 14106;
+pub const ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE: DWORD = 14107;
+pub const ERROR_SMI_PRIMITIVE_INSTALLER_FAILED: DWORD = 14108;
+pub const ERROR_GENERIC_COMMAND_FAILED: DWORD = 14109;
+pub const ERROR_SXS_FILE_HASH_MISSING: DWORD = 14110;
+pub const ERROR_IPSEC_QM_POLICY_EXISTS: DWORD = 13000;
+pub const ERROR_IPSEC_QM_POLICY_NOT_FOUND: DWORD = 13001;
+pub const ERROR_IPSEC_QM_POLICY_IN_USE: DWORD = 13002;
+pub const ERROR_IPSEC_MM_POLICY_EXISTS: DWORD = 13003;
+pub const ERROR_IPSEC_MM_POLICY_NOT_FOUND: DWORD = 13004;
+pub const ERROR_IPSEC_MM_POLICY_IN_USE: DWORD = 13005;
+pub const ERROR_IPSEC_MM_FILTER_EXISTS: DWORD = 13006;
+pub const ERROR_IPSEC_MM_FILTER_NOT_FOUND: DWORD = 13007;
+pub const ERROR_IPSEC_TRANSPORT_FILTER_EXISTS: DWORD = 13008;
+pub const ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND: DWORD = 13009;
+pub const ERROR_IPSEC_MM_AUTH_EXISTS: DWORD = 13010;
+pub const ERROR_IPSEC_MM_AUTH_NOT_FOUND: DWORD = 13011;
+pub const ERROR_IPSEC_MM_AUTH_IN_USE: DWORD = 13012;
+pub const ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND: DWORD = 13013;
+pub const ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND: DWORD = 13014;
+pub const ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND: DWORD = 13015;
+pub const ERROR_IPSEC_TUNNEL_FILTER_EXISTS: DWORD = 13016;
+pub const ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND: DWORD = 13017;
+pub const ERROR_IPSEC_MM_FILTER_PENDING_DELETION: DWORD = 13018;
+pub const ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION: DWORD = 13019;
+pub const ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION: DWORD = 13020;
+pub const ERROR_IPSEC_MM_POLICY_PENDING_DELETION: DWORD = 13021;
+pub const ERROR_IPSEC_MM_AUTH_PENDING_DELETION: DWORD = 13022;
+pub const ERROR_IPSEC_QM_POLICY_PENDING_DELETION: DWORD = 13023;
+pub const ERROR_IPSEC_IKE_NEG_STATUS_BEGIN: DWORD = 13800;
+pub const ERROR_IPSEC_IKE_AUTH_FAIL: DWORD = 13801;
+pub const ERROR_IPSEC_IKE_ATTRIB_FAIL: DWORD = 13802;
+pub const ERROR_IPSEC_IKE_NEGOTIATION_PENDING: DWORD = 13803;
+pub const ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR: DWORD = 13804;
+pub const ERROR_IPSEC_IKE_TIMED_OUT: DWORD = 13805;
+pub const ERROR_IPSEC_IKE_NO_CERT: DWORD = 13806;
+pub const ERROR_IPSEC_IKE_SA_DELETED: DWORD = 13807;
+pub const ERROR_IPSEC_IKE_SA_REAPED: DWORD = 13808;
+pub const ERROR_IPSEC_IKE_MM_ACQUIRE_DROP: DWORD = 13809;
+pub const ERROR_IPSEC_IKE_QM_ACQUIRE_DROP: DWORD = 13810;
+pub const ERROR_IPSEC_IKE_QUEUE_DROP_MM: DWORD = 13811;
+pub const ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM: DWORD = 13812;
+pub const ERROR_IPSEC_IKE_DROP_NO_RESPONSE: DWORD = 13813;
+pub const ERROR_IPSEC_IKE_MM_DELAY_DROP: DWORD = 13814;
+pub const ERROR_IPSEC_IKE_QM_DELAY_DROP: DWORD = 13815;
+pub const ERROR_IPSEC_IKE_ERROR: DWORD = 13816;
+pub const ERROR_IPSEC_IKE_CRL_FAILED: DWORD = 13817;
+pub const ERROR_IPSEC_IKE_INVALID_KEY_USAGE: DWORD = 13818;
+pub const ERROR_IPSEC_IKE_INVALID_CERT_TYPE: DWORD = 13819;
+pub const ERROR_IPSEC_IKE_NO_PRIVATE_KEY: DWORD = 13820;
+pub const ERROR_IPSEC_IKE_DH_FAIL: DWORD = 13822;
+pub const ERROR_IPSEC_IKE_INVALID_HEADER: DWORD = 13824;
+pub const ERROR_IPSEC_IKE_NO_POLICY: DWORD = 13825;
+pub const ERROR_IPSEC_IKE_INVALID_SIGNATURE: DWORD = 13826;
+pub const ERROR_IPSEC_IKE_KERBEROS_ERROR: DWORD = 13827;
+pub const ERROR_IPSEC_IKE_NO_PUBLIC_KEY: DWORD = 13828;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR: DWORD = 13829;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_SA: DWORD = 13830;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_PROP: DWORD = 13831;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_TRANS: DWORD = 13832;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_KE: DWORD = 13833;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_ID: DWORD = 13834;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_CERT: DWORD = 13835;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ: DWORD = 13836;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_HASH: DWORD = 13837;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_SIG: DWORD = 13838;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_NONCE: DWORD = 13839;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY: DWORD = 13840;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_DELETE: DWORD = 13841;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR: DWORD = 13842;
+pub const ERROR_IPSEC_IKE_INVALID_PAYLOAD: DWORD = 13843;
+pub const ERROR_IPSEC_IKE_LOAD_SOFT_SA: DWORD = 13844;
+pub const ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN: DWORD = 13845;
+pub const ERROR_IPSEC_IKE_INVALID_COOKIE: DWORD = 13846;
+pub const ERROR_IPSEC_IKE_NO_PEER_CERT: DWORD = 13847;
+pub const ERROR_IPSEC_IKE_PEER_CRL_FAILED: DWORD = 13848;
+pub const ERROR_IPSEC_IKE_POLICY_CHANGE: DWORD = 13849;
+pub const ERROR_IPSEC_IKE_NO_MM_POLICY: DWORD = 13850;
+pub const ERROR_IPSEC_IKE_NOTCBPRIV: DWORD = 13851;
+pub const ERROR_IPSEC_IKE_SECLOADFAIL: DWORD = 13852;
+pub const ERROR_IPSEC_IKE_FAILSSPINIT: DWORD = 13853;
+pub const ERROR_IPSEC_IKE_FAILQUERYSSP: DWORD = 13854;
+pub const ERROR_IPSEC_IKE_SRVACQFAIL: DWORD = 13855;
+pub const ERROR_IPSEC_IKE_SRVQUERYCRED: DWORD = 13856;
+pub const ERROR_IPSEC_IKE_GETSPIFAIL: DWORD = 13857;
+pub const ERROR_IPSEC_IKE_INVALID_FILTER: DWORD = 13858;
+pub const ERROR_IPSEC_IKE_OUT_OF_MEMORY: DWORD = 13859;
+pub const ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED: DWORD = 13860;
+pub const ERROR_IPSEC_IKE_INVALID_POLICY: DWORD = 13861;
+pub const ERROR_IPSEC_IKE_UNKNOWN_DOI: DWORD = 13862;
+pub const ERROR_IPSEC_IKE_INVALID_SITUATION: DWORD = 13863;
+pub const ERROR_IPSEC_IKE_DH_FAILURE: DWORD = 13864;
+pub const ERROR_IPSEC_IKE_INVALID_GROUP: DWORD = 13865;
+pub const ERROR_IPSEC_IKE_ENCRYPT: DWORD = 13866;
+pub const ERROR_IPSEC_IKE_DECRYPT: DWORD = 13867;
+pub const ERROR_IPSEC_IKE_POLICY_MATCH: DWORD = 13868;
+pub const ERROR_IPSEC_IKE_UNSUPPORTED_ID: DWORD = 13869;
+pub const ERROR_IPSEC_IKE_INVALID_HASH: DWORD = 13870;
+pub const ERROR_IPSEC_IKE_INVALID_HASH_ALG: DWORD = 13871;
+pub const ERROR_IPSEC_IKE_INVALID_HASH_SIZE: DWORD = 13872;
+pub const ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG: DWORD = 13873;
+pub const ERROR_IPSEC_IKE_INVALID_AUTH_ALG: DWORD = 13874;
+pub const ERROR_IPSEC_IKE_INVALID_SIG: DWORD = 13875;
+pub const ERROR_IPSEC_IKE_LOAD_FAILED: DWORD = 13876;
+pub const ERROR_IPSEC_IKE_RPC_DELETE: DWORD = 13877;
+pub const ERROR_IPSEC_IKE_BENIGN_REINIT: DWORD = 13878;
+pub const ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY: DWORD = 13879;
+pub const ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN: DWORD = 13881;
+pub const ERROR_IPSEC_IKE_MM_LIMIT: DWORD = 13882;
+pub const ERROR_IPSEC_IKE_NEGOTIATION_DISABLED: DWORD = 13883;
+/*pub const ERROR_IPSEC_IKE_NEG_STATUS_END: DWORD = 13884)*/
+pub const ERROR_IPSEC_IKE_QM_LIMIT: DWORD = 13884;
+pub const ERROR_IPSEC_IKE_MM_EXPIRED: DWORD = 13885;
+pub const ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID: DWORD = 13886;
+pub const ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH: DWORD = 13887;
+pub const ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID: DWORD = 13888;
+pub const ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD: DWORD = 13889;
+pub const ERROR_IPSEC_IKE_DOS_COOKIE_SENT: DWORD = 13890;
+pub const ERROR_IPSEC_IKE_SHUTTING_DOWN: DWORD = 13891;
+pub const ERROR_IPSEC_IKE_CGA_AUTH_FAILED: DWORD = 13892;
+pub const ERROR_IPSEC_IKE_PROCESS_ERR_NATOA: DWORD = 13893;
+pub const ERROR_IPSEC_IKE_INVALID_MM_FOR_QM: DWORD = 13894;
+pub const ERROR_IPSEC_IKE_QM_EXPIRED: DWORD = 13895;
+pub const ERROR_IPSEC_IKE_TOO_MANY_FILTERS: DWORD = 13896;
+pub const ERROR_IPSEC_IKE_NEG_STATUS_END: DWORD = 13897;
+pub const ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL: DWORD = 13898;
+pub const ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE: DWORD = 13899;
+pub const ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING: DWORD = 13900;
+pub const ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING: DWORD = 13901;
+pub const ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS: DWORD = 13902;
+pub const ERROR_IPSEC_IKE_RATELIMIT_DROP: DWORD = 13903;
+pub const ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE: DWORD = 13904;
+pub const ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE: DWORD = 13905;
+pub const ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE: DWORD = 13906;
+pub const ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY: DWORD = 13907;
+pub const ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE: DWORD = 13908;
+pub const ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END: DWORD = 13909;
+pub const ERROR_IPSEC_BAD_SPI: DWORD = 13910;
+pub const ERROR_IPSEC_SA_LIFETIME_EXPIRED: DWORD = 13911;
+pub const ERROR_IPSEC_WRONG_SA: DWORD = 13912;
+pub const ERROR_IPSEC_REPLAY_CHECK_FAILED: DWORD = 13913;
+pub const ERROR_IPSEC_INVALID_PACKET: DWORD = 13914;
+pub const ERROR_IPSEC_INTEGRITY_CHECK_FAILED: DWORD = 13915;
+pub const ERROR_IPSEC_CLEAR_TEXT_DROP: DWORD = 13916;
+pub const ERROR_IPSEC_AUTH_FIREWALL_DROP: DWORD = 13917;
+pub const ERROR_IPSEC_THROTTLE_DROP: DWORD = 13918;
+pub const ERROR_IPSEC_DOSP_BLOCK: DWORD = 13925;
+pub const ERROR_IPSEC_DOSP_RECEIVED_MULTICAST: DWORD = 13926;
+pub const ERROR_IPSEC_DOSP_INVALID_PACKET: DWORD = 13927;
+pub const ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED: DWORD = 13928;
+pub const ERROR_IPSEC_DOSP_MAX_ENTRIES: DWORD = 13929;
+pub const ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED: DWORD = 13930;
+pub const ERROR_IPSEC_DOSP_NOT_INSTALLED: DWORD = 13931;
+pub const ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES: DWORD = 13932;
+pub const ERROR_EVT_INVALID_CHANNEL_PATH: DWORD = 15000;
+pub const ERROR_EVT_INVALID_QUERY: DWORD = 15001;
+pub const ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND: DWORD = 15002;
+pub const ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND: DWORD = 15003;
+pub const ERROR_EVT_INVALID_PUBLISHER_NAME: DWORD = 15004;
+pub const ERROR_EVT_INVALID_EVENT_DATA: DWORD = 15005;
+pub const ERROR_EVT_CHANNEL_NOT_FOUND: DWORD = 15007;
+pub const ERROR_EVT_MALFORMED_XML_TEXT: DWORD = 15008;
+pub const ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL: DWORD = 15009;
+pub const ERROR_EVT_CONFIGURATION_ERROR: DWORD = 15010;
+pub const ERROR_EVT_QUERY_RESULT_STALE: DWORD = 15011;
+pub const ERROR_EVT_QUERY_RESULT_INVALID_POSITION: DWORD = 15012;
+pub const ERROR_EVT_NON_VALIDATING_MSXML: DWORD = 15013;
+pub const ERROR_EVT_FILTER_ALREADYSCOPED: DWORD = 15014;
+pub const ERROR_EVT_FILTER_NOTELTSET: DWORD = 15015;
+pub const ERROR_EVT_FILTER_INVARG: DWORD = 15016;
+pub const ERROR_EVT_FILTER_INVTEST: DWORD = 15017;
+pub const ERROR_EVT_FILTER_INVTYPE: DWORD = 15018;
+pub const ERROR_EVT_FILTER_PARSEERR: DWORD = 15019;
+pub const ERROR_EVT_FILTER_UNSUPPORTEDOP: DWORD = 15020;
+pub const ERROR_EVT_FILTER_UNEXPECTEDTOKEN: DWORD = 15021;
+pub const ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL: DWORD = 15022;
+pub const ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE: DWORD = 15023;
+pub const ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE: DWORD = 15024;
+pub const ERROR_EVT_CHANNEL_CANNOT_ACTIVATE: DWORD = 15025;
+pub const ERROR_EVT_FILTER_TOO_COMPLEX: DWORD = 15026;
+pub const ERROR_EVT_MESSAGE_NOT_FOUND: DWORD = 15027;
+pub const ERROR_EVT_MESSAGE_ID_NOT_FOUND: DWORD = 15028;
+pub const ERROR_EVT_UNRESOLVED_VALUE_INSERT: DWORD = 15029;
+pub const ERROR_EVT_UNRESOLVED_PARAMETER_INSERT: DWORD = 15030;
+pub const ERROR_EVT_MAX_INSERTS_REACHED: DWORD = 15031;
+pub const ERROR_EVT_EVENT_DEFINITION_NOT_FOUND: DWORD = 15032;
+pub const ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND: DWORD = 15033;
+pub const ERROR_EVT_VERSION_TOO_OLD: DWORD = 15034;
+pub const ERROR_EVT_VERSION_TOO_NEW: DWORD = 15035;
+pub const ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY: DWORD = 15036;
+pub const ERROR_EVT_PUBLISHER_DISABLED: DWORD = 15037;
+pub const ERROR_EVT_FILTER_OUT_OF_RANGE: DWORD = 15038;
+pub const ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE: DWORD = 15080;
+pub const ERROR_EC_LOG_DISABLED: DWORD = 15081;
+pub const ERROR_EC_CIRCULAR_FORWARDING: DWORD = 15082;
+pub const ERROR_EC_CREDSTORE_FULL: DWORD = 15083;
+pub const ERROR_EC_CRED_NOT_FOUND: DWORD = 15084;
+pub const ERROR_EC_NO_ACTIVE_CHANNEL: DWORD = 15085;
+pub const ERROR_MUI_FILE_NOT_FOUND: DWORD = 15100;
+pub const ERROR_MUI_INVALID_FILE: DWORD = 15101;
+pub const ERROR_MUI_INVALID_RC_CONFIG: DWORD = 15102;
+pub const ERROR_MUI_INVALID_LOCALE_NAME: DWORD = 15103;
+pub const ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME: DWORD = 15104;
+pub const ERROR_MUI_FILE_NOT_LOADED: DWORD = 15105;
+pub const ERROR_RESOURCE_ENUM_USER_STOP: DWORD = 15106;
+pub const ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED: DWORD = 15107;
+pub const ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME: DWORD = 15108;
+pub const ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE: DWORD = 15110;
+pub const ERROR_MRM_INVALID_PRICONFIG: DWORD = 15111;
+pub const ERROR_MRM_INVALID_FILE_TYPE: DWORD = 15112;
+pub const ERROR_MRM_UNKNOWN_QUALIFIER: DWORD = 15113;
+pub const ERROR_MRM_INVALID_QUALIFIER_VALUE: DWORD = 15114;
+pub const ERROR_MRM_NO_CANDIDATE: DWORD = 15115;
+pub const ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE: DWORD = 15116;
+pub const ERROR_MRM_RESOURCE_TYPE_MISMATCH: DWORD = 15117;
+pub const ERROR_MRM_DUPLICATE_MAP_NAME: DWORD = 15118;
+pub const ERROR_MRM_DUPLICATE_ENTRY: DWORD = 15119;
+pub const ERROR_MRM_INVALID_RESOURCE_IDENTIFIER: DWORD = 15120;
+pub const ERROR_MRM_FILEPATH_TOO_LONG: DWORD = 15121;
+pub const ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE: DWORD = 15122;
+pub const ERROR_MRM_INVALID_PRI_FILE: DWORD = 15126;
+pub const ERROR_MRM_NAMED_RESOURCE_NOT_FOUND: DWORD = 15127;
+pub const ERROR_MRM_MAP_NOT_FOUND: DWORD = 15135;
+pub const ERROR_MRM_UNSUPPORTED_PROFILE_TYPE: DWORD = 15136;
+pub const ERROR_MRM_INVALID_QUALIFIER_OPERATOR: DWORD = 15137;
+pub const ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE: DWORD = 15138;
+pub const ERROR_MRM_AUTOMERGE_ENABLED: DWORD = 15139;
+pub const ERROR_MRM_TOO_MANY_RESOURCES: DWORD = 15140;
+pub const ERROR_MCA_INVALID_CAPABILITIES_STRING: DWORD = 15200;
+pub const ERROR_MCA_INVALID_VCP_VERSION: DWORD = 15201;
+pub const ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION: DWORD = 15202;
+pub const ERROR_MCA_MCCS_VERSION_MISMATCH: DWORD = 15203;
+pub const ERROR_MCA_UNSUPPORTED_MCCS_VERSION: DWORD = 15204;
+pub const ERROR_MCA_INTERNAL_ERROR: DWORD = 15205;
+pub const ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED: DWORD = 15206;
+pub const ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE: DWORD = 15207;
+pub const ERROR_AMBIGUOUS_SYSTEM_DEVICE: DWORD = 15250;
+pub const ERROR_SYSTEM_DEVICE_NOT_FOUND: DWORD = 15299;
+pub const ERROR_HASH_NOT_SUPPORTED: DWORD = 15300;
+pub const ERROR_HASH_NOT_PRESENT: DWORD = 15301;
+pub const ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED: DWORD = 15321;
+pub const ERROR_GPIO_CLIENT_INFORMATION_INVALID: DWORD = 15322;
+pub const ERROR_GPIO_VERSION_NOT_SUPPORTED: DWORD = 15323;
+pub const ERROR_GPIO_INVALID_REGISTRATION_PACKET: DWORD = 15324;
+pub const ERROR_GPIO_OPERATION_DENIED: DWORD = 15325;
+pub const ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE: DWORD = 15326;
+pub const ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED: DWORD = 15327;
+pub const ERROR_CANNOT_SWITCH_RUNLEVEL: DWORD = 15400;
+pub const ERROR_INVALID_RUNLEVEL_SETTING: DWORD = 15401;
+pub const ERROR_RUNLEVEL_SWITCH_TIMEOUT: DWORD = 15402;
+pub const ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT: DWORD = 15403;
+pub const ERROR_RUNLEVEL_SWITCH_IN_PROGRESS: DWORD = 15404;
+pub const ERROR_SERVICES_FAILED_AUTOSTART: DWORD = 15405;
+pub const ERROR_COM_TASK_STOP_PENDING: DWORD = 15501;
+pub const ERROR_INSTALL_OPEN_PACKAGE_FAILED: DWORD = 15600;
+pub const ERROR_INSTALL_PACKAGE_NOT_FOUND: DWORD = 15601;
+pub const ERROR_INSTALL_INVALID_PACKAGE: DWORD = 15602;
+pub const ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED: DWORD = 15603;
+pub const ERROR_INSTALL_OUT_OF_DISK_SPACE: DWORD = 15604;
+pub const ERROR_INSTALL_NETWORK_FAILURE: DWORD = 15605;
+pub const ERROR_INSTALL_REGISTRATION_FAILURE: DWORD = 15606;
+pub const ERROR_INSTALL_DEREGISTRATION_FAILURE: DWORD = 15607;
+pub const ERROR_INSTALL_CANCEL: DWORD = 15608;
+pub const ERROR_INSTALL_FAILED: DWORD = 15609;
+pub const ERROR_REMOVE_FAILED: DWORD = 15610;
+pub const ERROR_PACKAGE_ALREADY_EXISTS: DWORD = 15611;
+pub const ERROR_NEEDS_REMEDIATION: DWORD = 15612;
+pub const ERROR_INSTALL_PREREQUISITE_FAILED: DWORD = 15613;
+pub const ERROR_PACKAGE_REPOSITORY_CORRUPTED: DWORD = 15614;
+pub const ERROR_INSTALL_POLICY_FAILURE: DWORD = 15615;
+pub const ERROR_PACKAGE_UPDATING: DWORD = 15616;
+pub const ERROR_DEPLOYMENT_BLOCKED_BY_POLICY: DWORD = 15617;
+pub const ERROR_PACKAGES_IN_USE: DWORD = 15618;
+pub const ERROR_RECOVERY_FILE_CORRUPT: DWORD = 15619;
+pub const ERROR_INVALID_STAGED_SIGNATURE: DWORD = 15620;
+pub const ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED: DWORD = 15621;
+pub const ERROR_INSTALL_PACKAGE_DOWNGRADE: DWORD = 15622;
+pub const ERROR_SYSTEM_NEEDS_REMEDIATION: DWORD = 15623;
+pub const ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN: DWORD = 15624;
+pub const ERROR_RESILIENCY_FILE_CORRUPT: DWORD = 15625;
+pub const ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING: DWORD = 15626;
+pub const ERROR_STATE_LOAD_STORE_FAILED: DWORD = 15800;
+pub const ERROR_STATE_GET_VERSION_FAILED: DWORD = 15801;
+pub const ERROR_STATE_SET_VERSION_FAILED: DWORD = 15802;
+pub const ERROR_STATE_STRUCTURED_RESET_FAILED: DWORD = 15803;
+pub const ERROR_STATE_OPEN_CONTAINER_FAILED: DWORD = 15804;
+pub const ERROR_STATE_CREATE_CONTAINER_FAILED: DWORD = 15805;
+pub const ERROR_STATE_DELETE_CONTAINER_FAILED: DWORD = 15806;
+pub const ERROR_STATE_READ_SETTING_FAILED: DWORD = 15807;
+pub const ERROR_STATE_WRITE_SETTING_FAILED: DWORD = 15808;
+pub const ERROR_STATE_DELETE_SETTING_FAILED: DWORD = 15809;
+pub const ERROR_STATE_QUERY_SETTING_FAILED: DWORD = 15810;
+pub const ERROR_STATE_READ_COMPOSITE_SETTING_FAILED: DWORD = 15811;
+pub const ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED: DWORD = 15812;
+pub const ERROR_STATE_ENUMERATE_CONTAINER_FAILED: DWORD = 15813;
+pub const ERROR_STATE_ENUMERATE_SETTINGS_FAILED: DWORD = 15814;
+pub const ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED: DWORD = 15815;
+pub const ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED: DWORD = 15816;
+pub const ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED: DWORD = 15817;
+pub const ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED: DWORD = 15818;
+pub const ERROR_API_UNAVAILABLE: DWORD = 15841;
+pub const ERROR_AUDITING_DISABLED: DWORD = 0xC0090001;
+pub const ERROR_ALL_SIDS_FILTERED: DWORD = 0xC0090002;
+
+pub const WSABASEERR: c_int = 10000;
+pub const WSAEINTR: c_int = WSABASEERR + 4;
+pub const WSAEBADF: c_int = WSABASEERR + 9;
+pub const WSAEACCES: c_int = WSABASEERR + 13;
+pub const WSAEFAULT: c_int = WSABASEERR + 14;
+pub const WSAEINVAL: c_int = WSABASEERR + 22;
+pub const WSAEMFILE: c_int = WSABASEERR + 24;
+pub const WSAEWOULDBLOCK: c_int = WSABASEERR + 35;
+pub const WSAEINPROGRESS: c_int = WSABASEERR + 36;
+pub const WSAEALREADY: c_int = WSABASEERR + 37;
+pub const WSAENOTSOCK: c_int = WSABASEERR + 38;
+pub const WSAEDESTADDRREQ: c_int = WSABASEERR + 39;
+pub const WSAEMSGSIZE: c_int = WSABASEERR + 40;
+pub const WSAEPROTOTYPE: c_int = WSABASEERR + 41;
+pub const WSAENOPROTOOPT: c_int = WSABASEERR + 42;
+pub const WSAEPROTONOSUPPORT: c_int = WSABASEERR + 43;
+pub const WSAESOCKTNOSUPPORT: c_int = WSABASEERR + 44;
+pub const WSAEOPNOTSUPP: c_int = WSABASEERR + 45;
+pub const WSAEPFNOSUPPORT: c_int = WSABASEERR + 46;
+pub const WSAEAFNOSUPPORT: c_int = WSABASEERR + 47;
+pub const WSAEADDRINUSE: c_int = WSABASEERR + 48;
+pub const WSAEADDRNOTAVAIL: c_int = WSABASEERR + 49;
+pub const WSAENETDOWN: c_int = WSABASEERR + 50;
+pub const WSAENETUNREACH: c_int = WSABASEERR + 51;
+pub const WSAENETRESET: c_int = WSABASEERR + 52;
+pub const WSAECONNABORTED: c_int = WSABASEERR + 53;
+pub const WSAECONNRESET: c_int = WSABASEERR + 54;
+pub const WSAENOBUFS: c_int = WSABASEERR + 55;
+pub const WSAEISCONN: c_int = WSABASEERR + 56;
+pub const WSAENOTCONN: c_int = WSABASEERR + 57;
+pub const WSAESHUTDOWN: c_int = WSABASEERR + 58;
+pub const WSAETOOMANYREFS: c_int = WSABASEERR + 59;
+pub const WSAETIMEDOUT: c_int = WSABASEERR + 60;
+pub const WSAECONNREFUSED: c_int = WSABASEERR + 61;
+pub const WSAELOOP: c_int = WSABASEERR + 62;
+pub const WSAENAMETOOLONG: c_int = WSABASEERR + 63;
+pub const WSAEHOSTDOWN: c_int = WSABASEERR + 64;
+pub const WSAEHOSTUNREACH: c_int = WSABASEERR + 65;
+pub const WSAENOTEMPTY: c_int = WSABASEERR + 66;
+pub const WSAEPROCLIM: c_int = WSABASEERR + 67;
+pub const WSAEUSERS: c_int = WSABASEERR + 68;
+pub const WSAEDQUOT: c_int = WSABASEERR + 69;
+pub const WSAESTALE: c_int = WSABASEERR + 70;
+pub const WSAEREMOTE: c_int = WSABASEERR + 71;
+pub const WSASYSNOTREADY: c_int = WSABASEERR + 91;
+pub const WSAVERNOTSUPPORTED: c_int = WSABASEERR + 92;
+pub const WSANOTINITIALISED: c_int = WSABASEERR + 93;
+pub const WSAEDISCON: c_int = WSABASEERR + 101;
+pub const WSAENOMORE: c_int = WSABASEERR + 102;
+pub const WSAECANCELLED: c_int = WSABASEERR + 103;
+pub const WSAEINVALIDPROCTABLE: c_int = WSABASEERR + 104;
+pub const WSAEINVALIDPROVIDER: c_int = WSABASEERR + 105;
+pub const WSAEPROVIDERFAILEDINIT: c_int = WSABASEERR + 106;
+pub const WSASYSCALLFAILURE: c_int = WSABASEERR + 107;
+pub const WSASERVICE_NOT_FOUND: c_int = WSABASEERR + 108;
+pub const WSATYPE_NOT_FOUND: c_int = WSABASEERR + 109;
+pub const WSA_E_NO_MORE: c_int = WSABASEERR + 110;
+pub const WSA_E_CANCELLED: c_int = WSABASEERR + 111;
+pub const WSAEREFUSED: c_int = WSABASEERR + 112;
+pub const WSAHOST_NOT_FOUND: c_int = WSABASEERR + 1001;
+pub const WSATRY_AGAIN: c_int = WSABASEERR + 1002;
+pub const WSANO_RECOVERY: c_int = WSABASEERR + 1003;
+pub const WSANO_DATA: c_int = WSABASEERR + 1004;
+pub const WSA_QOS_RECEIVERS: c_int = WSABASEERR + 1005;
+pub const WSA_QOS_SENDERS: c_int = WSABASEERR + 1006;
+pub const WSA_QOS_NO_SENDERS: c_int = WSABASEERR + 1007;
+pub const WSA_QOS_NO_RECEIVERS: c_int = WSABASEERR + 1008;
+pub const WSA_QOS_REQUEST_CONFIRMED: c_int = WSABASEERR + 1009;
+pub const WSA_QOS_ADMISSION_FAILURE: c_int = WSABASEERR + 1010;
+pub const WSA_QOS_POLICY_FAILURE: c_int = WSABASEERR + 1011;
+pub const WSA_QOS_BAD_STYLE: c_int = WSABASEERR + 1012;
+pub const WSA_QOS_BAD_OBJECT: c_int = WSABASEERR + 1013;
+pub const WSA_QOS_TRAFFIC_CTRL_ERROR: c_int = WSABASEERR + 1014;
+pub const WSA_QOS_GENERIC_ERROR: c_int = WSABASEERR + 1015;
+pub const WSA_QOS_ESERVICETYPE: c_int = WSABASEERR + 1016;
+pub const WSA_QOS_EFLOWSPEC: c_int = WSABASEERR + 1017;
+pub const WSA_QOS_EPROVSPECBUF: c_int = WSABASEERR + 1018;
+pub const WSA_QOS_EFILTERSTYLE: c_int = WSABASEERR + 1019;
+pub const WSA_QOS_EFILTERTYPE: c_int = WSABASEERR + 1020;
+pub const WSA_QOS_EFILTERCOUNT: c_int = WSABASEERR + 1021;
+pub const WSA_QOS_EOBJLENGTH: c_int = WSABASEERR + 1022;
+pub const WSA_QOS_EFLOWCOUNT: c_int = WSABASEERR + 1023;
+pub const WSA_QOS_EUNKNOWNPSOBJ: c_int = WSABASEERR + 1024;
+pub const WSA_QOS_EUNKOWNPSOBJ: c_int = WSA_QOS_EUNKNOWNPSOBJ;
+pub const WSA_QOS_EPOLICYOBJ: c_int = WSABASEERR + 1025;
+pub const WSA_QOS_EFLOWDESC: c_int = WSABASEERR + 1026;
+pub const WSA_QOS_EPSFLOWSPEC: c_int = WSABASEERR + 1027;
+pub const WSA_QOS_EPSFILTERSPEC: c_int = WSABASEERR + 1028;
+pub const WSA_QOS_ESDMODEOBJ: c_int = WSABASEERR + 1029;
+pub const WSA_QOS_ESHAPERATEOBJ: c_int = WSABASEERR + 1030;
+pub const WSA_QOS_RESERVED_PETYPE: c_int = WSABASEERR + 1031;
diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs
new file mode 100644
index 000000000..1a5421fac
--- /dev/null
+++ b/library/std/src/sys/windows/cmath.rs
@@ -0,0 +1,92 @@
+#![cfg(not(test))]
+
+use libc::{c_double, c_float};
+
+extern "C" {
+ pub fn acos(n: c_double) -> c_double;
+ pub fn asin(n: c_double) -> c_double;
+ pub fn atan(n: c_double) -> c_double;
+ pub fn atan2(a: c_double, b: c_double) -> c_double;
+ pub fn cbrt(n: c_double) -> c_double;
+ pub fn cbrtf(n: c_float) -> c_float;
+ pub fn cosh(n: c_double) -> c_double;
+ pub fn expm1(n: c_double) -> c_double;
+ pub fn expm1f(n: c_float) -> c_float;
+ pub fn fdim(a: c_double, b: c_double) -> c_double;
+ pub fn fdimf(a: c_float, b: c_float) -> c_float;
+ #[cfg_attr(target_env = "msvc", link_name = "_hypot")]
+ pub fn hypot(x: c_double, y: c_double) -> c_double;
+ #[cfg_attr(target_env = "msvc", link_name = "_hypotf")]
+ pub fn hypotf(x: c_float, y: c_float) -> c_float;
+ pub fn log1p(n: c_double) -> c_double;
+ pub fn log1pf(n: c_float) -> c_float;
+ pub fn sinh(n: c_double) -> c_double;
+ pub fn tan(n: c_double) -> c_double;
+ pub fn tanh(n: c_double) -> c_double;
+}
+
+pub use self::shims::*;
+
+#[cfg(not(all(target_env = "msvc", target_arch = "x86")))]
+mod shims {
+ use libc::c_float;
+
+ extern "C" {
+ pub fn acosf(n: c_float) -> c_float;
+ pub fn asinf(n: c_float) -> c_float;
+ pub fn atan2f(a: c_float, b: c_float) -> c_float;
+ pub fn atanf(n: c_float) -> c_float;
+ pub fn coshf(n: c_float) -> c_float;
+ pub fn sinhf(n: c_float) -> c_float;
+ pub fn tanf(n: c_float) -> c_float;
+ pub fn tanhf(n: c_float) -> c_float;
+ }
+}
+
+// On 32-bit x86 MSVC these functions aren't defined, so we just define shims
+// which promote everything fo f64, perform the calculation, and then demote
+// back to f32. While not precisely correct should be "correct enough" for now.
+#[cfg(all(target_env = "msvc", target_arch = "x86"))]
+mod shims {
+ use libc::c_float;
+
+ #[inline]
+ pub unsafe fn acosf(n: c_float) -> c_float {
+ f64::acos(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn asinf(n: c_float) -> c_float {
+ f64::asin(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn atan2f(n: c_float, b: c_float) -> c_float {
+ f64::atan2(n as f64, b as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn atanf(n: c_float) -> c_float {
+ f64::atan(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn coshf(n: c_float) -> c_float {
+ f64::cosh(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn sinhf(n: c_float) -> c_float {
+ f64::sinh(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn tanf(n: c_float) -> c_float {
+ f64::tan(n as f64) as c_float
+ }
+
+ #[inline]
+ pub unsafe fn tanhf(n: c_float) -> c_float {
+ f64::tanh(n as f64) as c_float
+ }
+}
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
new file mode 100644
index 000000000..ccc90177a
--- /dev/null
+++ b/library/std/src/sys/windows/compat.rs
@@ -0,0 +1,273 @@
+//! A "compatibility layer" for supporting older versions of Windows
+//!
+//! The standard library uses some Windows API functions that are not present
+//! on older versions of Windows. (Note that the oldest version of Windows
+//! that Rust supports is Windows 7 (client) and Windows Server 2008 (server).)
+//! This module implements a form of delayed DLL import binding, using
+//! `GetModuleHandle` and `GetProcAddress` to look up DLL entry points at
+//! runtime.
+//!
+//! This implementation uses a static initializer to look up the DLL entry
+//! points. The CRT (C runtime) executes static initializers before `main`
+//! is called (for binaries) and before `DllMain` is called (for DLLs).
+//! This is the ideal time to look up DLL imports, because we are guaranteed
+//! that no other threads will attempt to call these entry points. Thus,
+//! we can look up the imports and store them in `static mut` fields
+//! without any synchronization.
+//!
+//! This has an additional advantage: Because the DLL import lookup happens
+//! at module initialization, the cost of these lookups is deterministic,
+//! and is removed from the code paths that actually call the DLL imports.
+//! That is, there is no unpredictable "cache miss" that occurs when calling
+//! a DLL import. For applications that benefit from predictable delays,
+//! this is a benefit. This also eliminates the comparison-and-branch
+//! from the hot path.
+//!
+//! Currently, the standard library uses only a small number of dynamic
+//! DLL imports. If this number grows substantially, then the cost of
+//! performing all of the lookups at initialization time might become
+//! substantial.
+//!
+//! The mechanism of registering a static initializer with the CRT is
+//! documented in
+//! [CRT Initialization](https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization?view=msvc-160).
+//! It works by contributing a global symbol to the `.CRT$XCU` section.
+//! The linker builds a table of all static initializer functions.
+//! The CRT startup code then iterates that table, calling each
+//! initializer function.
+//!
+//! # **WARNING!!*
+//! The environment that a static initializer function runs in is highly
+//! constrained. There are **many** restrictions on what static initializers
+//! can safely do. Static initializer functions **MUST NOT** do any of the
+//! following (this list is not comprehensive):
+//! * touch any other static field that is used by a different static
+//! initializer, because the order that static initializers run in
+//! is not defined.
+//! * call `LoadLibrary` or any other function that acquires the DLL
+//! loader lock.
+//! * call any Rust function or CRT function that touches any static
+//! (global) state.
+
+use crate::ffi::{c_void, CStr};
+use crate::ptr::NonNull;
+use crate::sys::c;
+
+/// Helper macro for creating CStrs from literals and symbol names.
+macro_rules! ansi_str {
+ (sym $ident:ident) => {{
+ #[allow(unused_unsafe)]
+ crate::sys::compat::const_cstr_from_bytes(concat!(stringify!($ident), "\0").as_bytes())
+ }};
+ ($lit:literal) => {{ crate::sys::compat::const_cstr_from_bytes(concat!($lit, "\0").as_bytes()) }};
+}
+
+/// Creates a C string wrapper from a byte slice, in a constant context.
+///
+/// This is a utility function used by the [`ansi_str`] macro.
+///
+/// # Panics
+///
+/// Panics if the slice is not null terminated or contains nulls, except as the last item
+pub(crate) const fn const_cstr_from_bytes(bytes: &'static [u8]) -> &'static CStr {
+ if !matches!(bytes.last(), Some(&0)) {
+ panic!("A CStr must be null terminated");
+ }
+ let mut i = 0;
+ // At this point `len()` is at least 1.
+ while i < bytes.len() - 1 {
+ if bytes[i] == 0 {
+ panic!("A CStr must not have interior nulls")
+ }
+ i += 1;
+ }
+ // SAFETY: The safety is ensured by the above checks.
+ unsafe { crate::ffi::CStr::from_bytes_with_nul_unchecked(bytes) }
+}
+
+#[used]
+#[link_section = ".CRT$XCU"]
+static INIT_TABLE_ENTRY: unsafe extern "C" fn() = init;
+
+/// This is where the magic preloading of symbols happens.
+///
+/// Note that any functions included here will be unconditionally included in
+/// the final binary, regardless of whether or not they're actually used.
+///
+/// Therefore, this is limited to `compat_fn_optional` functions which must be
+/// preloaded and any functions which may be more time sensitive, even for the first call.
+unsafe extern "C" fn init() {
+ // There is no locking here. This code is executed before main() is entered, and
+ // is guaranteed to be single-threaded.
+ //
+ // DO NOT do anything interesting or complicated in this function! DO NOT call
+ // any Rust functions or CRT functions if those functions touch any global state,
+ // because this function runs during global initialization. For example, DO NOT
+ // do any dynamic allocation, don't call LoadLibrary, etc.
+
+ if let Some(synch) = Module::new(c::SYNCH_API) {
+ // These are optional and so we must manually attempt to load them
+ // before they can be used.
+ c::WaitOnAddress::preload(synch);
+ c::WakeByAddressSingle::preload(synch);
+ }
+
+ if let Some(kernel32) = Module::new(c::KERNEL32) {
+ // Preloading this means getting a precise time will be as fast as possible.
+ c::GetSystemTimePreciseAsFileTime::preload(kernel32);
+ }
+}
+
+/// Represents a loaded module.
+///
+/// Note that the modules std depends on must not be unloaded.
+/// Therefore a `Module` is always valid for the lifetime of std.
+#[derive(Copy, Clone)]
+pub(in crate::sys) struct Module(NonNull<c_void>);
+impl Module {
+ /// Try to get a handle to a loaded module.
+ ///
+ /// # SAFETY
+ ///
+ /// This should only be use for modules that exist for the lifetime of std
+ /// (e.g. kernel32 and ntdll).
+ pub unsafe fn new(name: &CStr) -> Option<Self> {
+ // SAFETY: A CStr is always null terminated.
+ let module = c::GetModuleHandleA(name.as_ptr());
+ NonNull::new(module).map(Self)
+ }
+
+ // Try to get the address of a function.
+ pub fn proc_address(self, name: &CStr) -> Option<NonNull<c_void>> {
+ // SAFETY:
+ // `self.0` will always be a valid module.
+ // A CStr is always null terminated.
+ let proc = unsafe { c::GetProcAddress(self.0.as_ptr(), name.as_ptr()) };
+ NonNull::new(proc)
+ }
+}
+
+/// Load a function or use a fallback implementation if that fails.
+macro_rules! compat_fn_with_fallback {
+ (pub static $module:ident: &CStr = $name:expr; $(
+ $(#[$meta:meta])*
+ pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $fallback_body:block
+ )*) => (
+ pub static $module: &CStr = $name;
+ $(
+ $(#[$meta])*
+ pub mod $symbol {
+ #[allow(unused_imports)]
+ use super::*;
+ use crate::mem;
+ use crate::ffi::CStr;
+ use crate::sync::atomic::{AtomicPtr, Ordering};
+ use crate::sys::compat::Module;
+
+ type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
+
+ /// `PTR` contains a function pointer to one of three functions.
+ /// It starts with the `load` function.
+ /// When that is called it attempts to load the requested symbol.
+ /// If it succeeds, `PTR` is set to the address of that symbol.
+ /// If it fails, then `PTR` is set to `fallback`.
+ static PTR: AtomicPtr<c_void> = AtomicPtr::new(load as *mut _);
+
+ unsafe extern "system" fn load($($argname: $argtype),*) -> $rettype {
+ let func = load_from_module(Module::new($module));
+ func($($argname),*)
+ }
+
+ fn load_from_module(module: Option<Module>) -> F {
+ unsafe {
+ static SYMBOL_NAME: &CStr = ansi_str!(sym $symbol);
+ if let Some(f) = module.and_then(|m| m.proc_address(SYMBOL_NAME)) {
+ PTR.store(f.as_ptr(), Ordering::Relaxed);
+ mem::transmute(f)
+ } else {
+ PTR.store(fallback as *mut _, Ordering::Relaxed);
+ fallback
+ }
+ }
+ }
+
+ #[allow(unused_variables)]
+ unsafe extern "system" fn fallback($($argname: $argtype),*) -> $rettype {
+ $fallback_body
+ }
+
+ #[allow(unused)]
+ pub(in crate::sys) fn preload(module: Module) {
+ load_from_module(Some(module));
+ }
+
+ #[inline(always)]
+ pub unsafe fn call($($argname: $argtype),*) -> $rettype {
+ let func: F = mem::transmute(PTR.load(Ordering::Relaxed));
+ func($($argname),*)
+ }
+ }
+ $(#[$meta])*
+ pub use $symbol::call as $symbol;
+ )*)
+}
+
+/// A function that either exists or doesn't.
+///
+/// NOTE: Optional functions must be preloaded in the `init` function above, or they will always be None.
+macro_rules! compat_fn_optional {
+ (pub static $module:ident: &CStr = $name:expr; $(
+ $(#[$meta:meta])*
+ pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty;
+ )*) => (
+ pub static $module: &CStr = $name;
+ $(
+ $(#[$meta])*
+ pub mod $symbol {
+ #[allow(unused_imports)]
+ use super::*;
+ use crate::mem;
+ use crate::sync::atomic::{AtomicPtr, Ordering};
+ use crate::sys::compat::Module;
+ use crate::ptr::{self, NonNull};
+
+ type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
+
+ /// `PTR` will either be `null()` or set to the loaded function.
+ static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
+
+ /// Only allow access to the function if it has loaded successfully.
+ #[inline(always)]
+ #[cfg(not(miri))]
+ pub fn option() -> Option<F> {
+ unsafe {
+ NonNull::new(PTR.load(Ordering::Relaxed)).map(|f| mem::transmute(f))
+ }
+ }
+
+ // Miri does not understand the way we do preloading
+ // therefore load the function here instead.
+ #[cfg(miri)]
+ pub fn option() -> Option<F> {
+ let mut func = NonNull::new(PTR.load(Ordering::Relaxed));
+ if func.is_none() {
+ unsafe { Module::new($module).map(preload) };
+ func = NonNull::new(PTR.load(Ordering::Relaxed));
+ }
+ unsafe {
+ func.map(|f| mem::transmute(f))
+ }
+ }
+
+ #[allow(unused)]
+ pub(in crate::sys) fn preload(module: Module) {
+ unsafe {
+ static SYMBOL_NAME: &CStr = ansi_str!(sym $symbol);
+ if let Some(f) = module.proc_address(SYMBOL_NAME) {
+ PTR.store(f.as_ptr(), Ordering::Relaxed);
+ }
+ }
+ }
+ }
+ )*)
+}
diff --git a/library/std/src/sys/windows/env.rs b/library/std/src/sys/windows/env.rs
new file mode 100644
index 000000000..f0a99d620
--- /dev/null
+++ b/library/std/src/sys/windows/env.rs
@@ -0,0 +1,9 @@
+pub mod os {
+ pub const FAMILY: &str = "windows";
+ pub const OS: &str = "windows";
+ pub const DLL_PREFIX: &str = "";
+ pub const DLL_SUFFIX: &str = ".dll";
+ pub const DLL_EXTENSION: &str = "dll";
+ pub const EXE_SUFFIX: &str = ".exe";
+ pub const EXE_EXTENSION: &str = "exe";
+}
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
new file mode 100644
index 000000000..aed082b3e
--- /dev/null
+++ b/library/std/src/sys/windows/fs.rs
@@ -0,0 +1,1399 @@
+use crate::os::windows::prelude::*;
+
+use crate::ffi::OsString;
+use crate::fmt;
+use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::mem;
+use crate::os::windows::io::{AsHandle, BorrowedHandle};
+use crate::path::{Path, PathBuf};
+use crate::ptr;
+use crate::slice;
+use crate::sync::Arc;
+use crate::sys::handle::Handle;
+use crate::sys::time::SystemTime;
+use crate::sys::{c, cvt};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::thread;
+
+use super::path::maybe_verbatim;
+use super::to_u16s;
+
+pub struct File {
+ handle: Handle,
+}
+
+#[derive(Clone)]
+pub struct FileAttr {
+ attributes: c::DWORD,
+ creation_time: c::FILETIME,
+ last_access_time: c::FILETIME,
+ last_write_time: c::FILETIME,
+ file_size: u64,
+ reparse_tag: c::DWORD,
+ volume_serial_number: Option<u32>,
+ number_of_links: Option<u32>,
+ file_index: Option<u64>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct FileType {
+ attributes: c::DWORD,
+ reparse_tag: c::DWORD,
+}
+
+pub struct ReadDir {
+ handle: FindNextFileHandle,
+ root: Arc<PathBuf>,
+ first: Option<c::WIN32_FIND_DATAW>,
+}
+
+struct FindNextFileHandle(c::HANDLE);
+
+unsafe impl Send for FindNextFileHandle {}
+unsafe impl Sync for FindNextFileHandle {}
+
+pub struct DirEntry {
+ root: Arc<PathBuf>,
+ data: c::WIN32_FIND_DATAW,
+}
+
+unsafe impl Send for OpenOptions {}
+unsafe impl Sync for OpenOptions {}
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {
+ // generic
+ read: bool,
+ write: bool,
+ append: bool,
+ truncate: bool,
+ create: bool,
+ create_new: bool,
+ // system-specific
+ custom_flags: u32,
+ access_mode: Option<c::DWORD>,
+ attributes: c::DWORD,
+ share_mode: c::DWORD,
+ security_qos_flags: c::DWORD,
+ security_attributes: c::LPSECURITY_ATTRIBUTES,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct FilePermissions {
+ attrs: c::DWORD,
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {
+ accessed: Option<c::FILETIME>,
+ modified: Option<c::FILETIME>,
+}
+
+#[derive(Debug)]
+pub struct DirBuilder;
+
+impl fmt::Debug for ReadDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame.
+ // Thus the result will be e g 'ReadDir("C:\")'
+ fmt::Debug::fmt(&*self.root, f)
+ }
+}
+
+impl Iterator for ReadDir {
+ type Item = io::Result<DirEntry>;
+ fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ if let Some(first) = self.first.take() {
+ if let Some(e) = DirEntry::new(&self.root, &first) {
+ return Some(Ok(e));
+ }
+ }
+ unsafe {
+ let mut wfd = mem::zeroed();
+ loop {
+ if c::FindNextFileW(self.handle.0, &mut wfd) == 0 {
+ if c::GetLastError() == c::ERROR_NO_MORE_FILES {
+ return None;
+ } else {
+ return Some(Err(Error::last_os_error()));
+ }
+ }
+ if let Some(e) = DirEntry::new(&self.root, &wfd) {
+ return Some(Ok(e));
+ }
+ }
+ }
+ }
+}
+
+impl Drop for FindNextFileHandle {
+ fn drop(&mut self) {
+ let r = unsafe { c::FindClose(self.0) };
+ debug_assert!(r != 0);
+ }
+}
+
+impl DirEntry {
+ fn new(root: &Arc<PathBuf>, wfd: &c::WIN32_FIND_DATAW) -> Option<DirEntry> {
+ match &wfd.cFileName[0..3] {
+ // check for '.' and '..'
+ &[46, 0, ..] | &[46, 46, 0, ..] => return None,
+ _ => {}
+ }
+
+ Some(DirEntry { root: root.clone(), data: *wfd })
+ }
+
+ pub fn path(&self) -> PathBuf {
+ self.root.join(&self.file_name())
+ }
+
+ pub fn file_name(&self) -> OsString {
+ let filename = super::truncate_utf16_at_nul(&self.data.cFileName);
+ OsString::from_wide(filename)
+ }
+
+ pub fn file_type(&self) -> io::Result<FileType> {
+ Ok(FileType::new(
+ self.data.dwFileAttributes,
+ /* reparse_tag = */ self.data.dwReserved0,
+ ))
+ }
+
+ pub fn metadata(&self) -> io::Result<FileAttr> {
+ Ok(self.data.into())
+ }
+}
+
+impl OpenOptions {
+ pub fn new() -> OpenOptions {
+ OpenOptions {
+ // generic
+ read: false,
+ write: false,
+ append: false,
+ truncate: false,
+ create: false,
+ create_new: false,
+ // system-specific
+ custom_flags: 0,
+ access_mode: None,
+ share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE,
+ attributes: 0,
+ security_qos_flags: 0,
+ security_attributes: ptr::null_mut(),
+ }
+ }
+
+ pub fn read(&mut self, read: bool) {
+ self.read = read;
+ }
+ pub fn write(&mut self, write: bool) {
+ self.write = write;
+ }
+ pub fn append(&mut self, append: bool) {
+ self.append = append;
+ }
+ pub fn truncate(&mut self, truncate: bool) {
+ self.truncate = truncate;
+ }
+ pub fn create(&mut self, create: bool) {
+ self.create = create;
+ }
+ pub fn create_new(&mut self, create_new: bool) {
+ self.create_new = create_new;
+ }
+
+ pub fn custom_flags(&mut self, flags: u32) {
+ self.custom_flags = flags;
+ }
+ pub fn access_mode(&mut self, access_mode: u32) {
+ self.access_mode = Some(access_mode);
+ }
+ pub fn share_mode(&mut self, share_mode: u32) {
+ self.share_mode = share_mode;
+ }
+ pub fn attributes(&mut self, attrs: u32) {
+ self.attributes = attrs;
+ }
+ pub fn security_qos_flags(&mut self, flags: u32) {
+ // We have to set `SECURITY_SQOS_PRESENT` here, because one of the valid flags we can
+ // receive is `SECURITY_ANONYMOUS = 0x0`, which we can't check for later on.
+ self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT;
+ }
+ pub fn security_attributes(&mut self, attrs: c::LPSECURITY_ATTRIBUTES) {
+ self.security_attributes = attrs;
+ }
+
+ fn get_access_mode(&self) -> io::Result<c::DWORD> {
+ const ERROR_INVALID_PARAMETER: i32 = 87;
+
+ match (self.read, self.write, self.append, self.access_mode) {
+ (.., Some(mode)) => Ok(mode),
+ (true, false, false, None) => Ok(c::GENERIC_READ),
+ (false, true, false, None) => Ok(c::GENERIC_WRITE),
+ (true, true, false, None) => Ok(c::GENERIC_READ | c::GENERIC_WRITE),
+ (false, _, true, None) => Ok(c::FILE_GENERIC_WRITE & !c::FILE_WRITE_DATA),
+ (true, _, true, None) => {
+ Ok(c::GENERIC_READ | (c::FILE_GENERIC_WRITE & !c::FILE_WRITE_DATA))
+ }
+ (false, false, false, None) => Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER)),
+ }
+ }
+
+ fn get_creation_mode(&self) -> io::Result<c::DWORD> {
+ const ERROR_INVALID_PARAMETER: i32 = 87;
+
+ match (self.write, self.append) {
+ (true, false) => {}
+ (false, false) => {
+ if self.truncate || self.create || self.create_new {
+ return Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER));
+ }
+ }
+ (_, true) => {
+ if self.truncate && !self.create_new {
+ return Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER));
+ }
+ }
+ }
+
+ Ok(match (self.create, self.truncate, self.create_new) {
+ (false, false, false) => c::OPEN_EXISTING,
+ (true, false, false) => c::OPEN_ALWAYS,
+ (false, true, false) => c::TRUNCATE_EXISTING,
+ (true, true, false) => c::CREATE_ALWAYS,
+ (_, _, true) => c::CREATE_NEW,
+ })
+ }
+
+ fn get_flags_and_attributes(&self) -> c::DWORD {
+ self.custom_flags
+ | self.attributes
+ | self.security_qos_flags
+ | if self.create_new { c::FILE_FLAG_OPEN_REPARSE_POINT } else { 0 }
+ }
+}
+
+impl File {
+ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
+ let path = maybe_verbatim(path)?;
+ let handle = unsafe {
+ c::CreateFileW(
+ path.as_ptr(),
+ opts.get_access_mode()?,
+ opts.share_mode,
+ opts.security_attributes,
+ opts.get_creation_mode()?,
+ opts.get_flags_and_attributes(),
+ ptr::null_mut(),
+ )
+ };
+ if let Ok(handle) = handle.try_into() {
+ Ok(File { handle: Handle::from_inner(handle) })
+ } else {
+ Err(Error::last_os_error())
+ }
+ }
+
+ pub fn fsync(&self) -> io::Result<()> {
+ cvt(unsafe { c::FlushFileBuffers(self.handle.as_raw_handle()) })?;
+ Ok(())
+ }
+
+ pub fn datasync(&self) -> io::Result<()> {
+ self.fsync()
+ }
+
+ pub fn truncate(&self, size: u64) -> io::Result<()> {
+ let mut info = c::FILE_END_OF_FILE_INFO { EndOfFile: size as c::LARGE_INTEGER };
+ let size = mem::size_of_val(&info);
+ cvt(unsafe {
+ c::SetFileInformationByHandle(
+ self.handle.as_raw_handle(),
+ c::FileEndOfFileInfo,
+ &mut info as *mut _ as *mut _,
+ size as c::DWORD,
+ )
+ })?;
+ Ok(())
+ }
+
+ #[cfg(not(target_vendor = "uwp"))]
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ unsafe {
+ let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
+ cvt(c::GetFileInformationByHandle(self.handle.as_raw_handle(), &mut info))?;
+ let mut reparse_tag = 0;
+ if info.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ if let Ok((_, buf)) = self.reparse_point(&mut b) {
+ reparse_tag = buf.ReparseTag;
+ }
+ }
+ Ok(FileAttr {
+ attributes: info.dwFileAttributes,
+ creation_time: info.ftCreationTime,
+ last_access_time: info.ftLastAccessTime,
+ last_write_time: info.ftLastWriteTime,
+ file_size: (info.nFileSizeLow as u64) | ((info.nFileSizeHigh as u64) << 32),
+ reparse_tag,
+ volume_serial_number: Some(info.dwVolumeSerialNumber),
+ number_of_links: Some(info.nNumberOfLinks),
+ file_index: Some(
+ (info.nFileIndexLow as u64) | ((info.nFileIndexHigh as u64) << 32),
+ ),
+ })
+ }
+ }
+
+ #[cfg(target_vendor = "uwp")]
+ pub fn file_attr(&self) -> io::Result<FileAttr> {
+ unsafe {
+ let mut info: c::FILE_BASIC_INFO = mem::zeroed();
+ let size = mem::size_of_val(&info);
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileBasicInfo,
+ &mut info as *mut _ as *mut libc::c_void,
+ size as c::DWORD,
+ ))?;
+ let mut attr = FileAttr {
+ attributes: info.FileAttributes,
+ creation_time: c::FILETIME {
+ dwLowDateTime: info.CreationTime as c::DWORD,
+ dwHighDateTime: (info.CreationTime >> 32) as c::DWORD,
+ },
+ last_access_time: c::FILETIME {
+ dwLowDateTime: info.LastAccessTime as c::DWORD,
+ dwHighDateTime: (info.LastAccessTime >> 32) as c::DWORD,
+ },
+ last_write_time: c::FILETIME {
+ dwLowDateTime: info.LastWriteTime as c::DWORD,
+ dwHighDateTime: (info.LastWriteTime >> 32) as c::DWORD,
+ },
+ file_size: 0,
+ reparse_tag: 0,
+ volume_serial_number: None,
+ number_of_links: None,
+ file_index: None,
+ };
+ let mut info: c::FILE_STANDARD_INFO = mem::zeroed();
+ let size = mem::size_of_val(&info);
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileStandardInfo,
+ &mut info as *mut _ as *mut libc::c_void,
+ size as c::DWORD,
+ ))?;
+ attr.file_size = info.AllocationSize as u64;
+ attr.number_of_links = Some(info.NumberOfLinks);
+ if attr.file_type().is_reparse_point() {
+ let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ if let Ok((_, buf)) = self.reparse_point(&mut b) {
+ attr.reparse_tag = buf.ReparseTag;
+ }
+ }
+ Ok(attr)
+ }
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.handle.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.handle.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.handle.is_read_vectored()
+ }
+
+ pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ self.handle.read_at(buf, offset)
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ self.handle.read_buf(buf)
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.handle.write(buf)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.handle.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.handle.is_write_vectored()
+ }
+
+ pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ self.handle.write_at(buf, offset)
+ }
+
+ pub fn flush(&self) -> io::Result<()> {
+ Ok(())
+ }
+
+ pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
+ let (whence, pos) = match pos {
+ // Casting to `i64` is fine, `SetFilePointerEx` reinterprets this
+ // integer as `u64`.
+ SeekFrom::Start(n) => (c::FILE_BEGIN, n as i64),
+ SeekFrom::End(n) => (c::FILE_END, n),
+ SeekFrom::Current(n) => (c::FILE_CURRENT, n),
+ };
+ let pos = pos as c::LARGE_INTEGER;
+ let mut newpos = 0;
+ cvt(unsafe { c::SetFilePointerEx(self.handle.as_raw_handle(), pos, &mut newpos, whence) })?;
+ Ok(newpos as u64)
+ }
+
+ pub fn duplicate(&self) -> io::Result<File> {
+ Ok(Self { handle: self.handle.try_clone()? })
+ }
+
+ fn reparse_point<'a>(
+ &self,
+ space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE],
+ ) -> io::Result<(c::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
+ unsafe {
+ let mut bytes = 0;
+ cvt({
+ c::DeviceIoControl(
+ self.handle.as_raw_handle(),
+ c::FSCTL_GET_REPARSE_POINT,
+ ptr::null_mut(),
+ 0,
+ space.as_mut_ptr() as *mut _,
+ space.len() as c::DWORD,
+ &mut bytes,
+ ptr::null_mut(),
+ )
+ })?;
+ Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
+ }
+ }
+
+ fn readlink(&self) -> io::Result<PathBuf> {
+ let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let (_bytes, buf) = self.reparse_point(&mut space)?;
+ unsafe {
+ let (path_buffer, subst_off, subst_len, relative) = match buf.ReparseTag {
+ c::IO_REPARSE_TAG_SYMLINK => {
+ let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
+ &buf.rest as *const _ as *const _;
+ (
+ &(*info).PathBuffer as *const _ as *const u16,
+ (*info).SubstituteNameOffset / 2,
+ (*info).SubstituteNameLength / 2,
+ (*info).Flags & c::SYMLINK_FLAG_RELATIVE != 0,
+ )
+ }
+ c::IO_REPARSE_TAG_MOUNT_POINT => {
+ let info: *const c::MOUNT_POINT_REPARSE_BUFFER =
+ &buf.rest as *const _ as *const _;
+ (
+ &(*info).PathBuffer as *const _ as *const u16,
+ (*info).SubstituteNameOffset / 2,
+ (*info).SubstituteNameLength / 2,
+ false,
+ )
+ }
+ _ => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Uncategorized,
+ "Unsupported reparse point type",
+ ));
+ }
+ };
+ let subst_ptr = path_buffer.offset(subst_off as isize);
+ let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
+ // Absolute paths start with an NT internal namespace prefix `\??\`
+ // We should not let it leak through.
+ if !relative && subst.starts_with(&[92u16, 63u16, 63u16, 92u16]) {
+ subst = &subst[4..];
+ }
+ Ok(PathBuf::from(OsString::from_wide(subst)))
+ }
+ }
+
+ pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> {
+ let mut info = c::FILE_BASIC_INFO {
+ CreationTime: 0,
+ LastAccessTime: 0,
+ LastWriteTime: 0,
+ ChangeTime: 0,
+ FileAttributes: perm.attrs,
+ };
+ let size = mem::size_of_val(&info);
+ cvt(unsafe {
+ c::SetFileInformationByHandle(
+ self.handle.as_raw_handle(),
+ c::FileBasicInfo,
+ &mut info as *mut _ as *mut _,
+ size as c::DWORD,
+ )
+ })?;
+ Ok(())
+ }
+
+ pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ let is_zero = |t: c::FILETIME| t.dwLowDateTime == 0 && t.dwHighDateTime == 0;
+ if times.accessed.map_or(false, is_zero) || times.modified.map_or(false, is_zero) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "Cannot set file timestamp to 0",
+ ));
+ }
+ cvt(unsafe {
+ c::SetFileTime(self.as_handle(), None, times.accessed.as_ref(), times.modified.as_ref())
+ })?;
+ Ok(())
+ }
+
+ /// Get only basic file information such as attributes and file times.
+ fn basic_info(&self) -> io::Result<c::FILE_BASIC_INFO> {
+ unsafe {
+ let mut info: c::FILE_BASIC_INFO = mem::zeroed();
+ let size = mem::size_of_val(&info);
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileBasicInfo,
+ &mut info as *mut _ as *mut libc::c_void,
+ size as c::DWORD,
+ ))?;
+ Ok(info)
+ }
+ }
+ /// Delete using POSIX semantics.
+ ///
+ /// Files will be deleted as soon as the handle is closed. This is supported
+ /// for Windows 10 1607 (aka RS1) and later. However some filesystem
+ /// drivers will not support it even then, e.g. FAT32.
+ ///
+ /// If the operation is not supported for this filesystem or OS version
+ /// then errors will be `ERROR_NOT_SUPPORTED` or `ERROR_INVALID_PARAMETER`.
+ fn posix_delete(&self) -> io::Result<()> {
+ let mut info = c::FILE_DISPOSITION_INFO_EX {
+ Flags: c::FILE_DISPOSITION_DELETE
+ | c::FILE_DISPOSITION_POSIX_SEMANTICS
+ | c::FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE,
+ };
+ let size = mem::size_of_val(&info);
+ cvt(unsafe {
+ c::SetFileInformationByHandle(
+ self.handle.as_raw_handle(),
+ c::FileDispositionInfoEx,
+ &mut info as *mut _ as *mut _,
+ size as c::DWORD,
+ )
+ })?;
+ Ok(())
+ }
+
+ /// Delete a file using win32 semantics. The file won't actually be deleted
+ /// until all file handles are closed. However, marking a file for deletion
+ /// will prevent anyone from opening a new handle to the file.
+ fn win32_delete(&self) -> io::Result<()> {
+ let mut info = c::FILE_DISPOSITION_INFO { DeleteFile: c::TRUE as _ };
+ let size = mem::size_of_val(&info);
+ cvt(unsafe {
+ c::SetFileInformationByHandle(
+ self.handle.as_raw_handle(),
+ c::FileDispositionInfo,
+ &mut info as *mut _ as *mut _,
+ size as c::DWORD,
+ )
+ })?;
+ Ok(())
+ }
+
+ /// Fill the given buffer with as many directory entries as will fit.
+ /// This will remember its position and continue from the last call unless
+ /// `restart` is set to `true`.
+ ///
+ /// The returned bool indicates if there are more entries or not.
+ /// It is an error if `self` is not a directory.
+ ///
+ /// # Symlinks and other reparse points
+ ///
+ /// On Windows a file is either a directory or a non-directory.
+ /// A symlink directory is simply an empty directory with some "reparse" metadata attached.
+ /// So if you open a link (not its target) and iterate the directory,
+ /// you will always iterate an empty directory regardless of the target.
+ fn fill_dir_buff(&self, buffer: &mut DirBuff, restart: bool) -> io::Result<bool> {
+ let class =
+ if restart { c::FileIdBothDirectoryRestartInfo } else { c::FileIdBothDirectoryInfo };
+
+ unsafe {
+ let result = cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ class,
+ buffer.as_mut_ptr().cast(),
+ buffer.capacity() as _,
+ ));
+ match result {
+ Ok(_) => Ok(true),
+ Err(e) if e.raw_os_error() == Some(c::ERROR_NO_MORE_FILES as _) => Ok(false),
+ Err(e) => Err(e),
+ }
+ }
+ }
+}
+
+/// A buffer for holding directory entries.
+struct DirBuff {
+ buffer: Vec<u8>,
+}
+impl DirBuff {
+ fn new() -> Self {
+ const BUFFER_SIZE: usize = 1024;
+ Self { buffer: vec![0_u8; BUFFER_SIZE] }
+ }
+ fn capacity(&self) -> usize {
+ self.buffer.len()
+ }
+ fn as_mut_ptr(&mut self) -> *mut u8 {
+ self.buffer.as_mut_ptr().cast()
+ }
+ /// Returns a `DirBuffIter`.
+ fn iter(&self) -> DirBuffIter<'_> {
+ DirBuffIter::new(self)
+ }
+}
+impl AsRef<[u8]> for DirBuff {
+ fn as_ref(&self) -> &[u8] {
+ &self.buffer
+ }
+}
+
+/// An iterator over entries stored in a `DirBuff`.
+///
+/// Currently only returns file names (UTF-16 encoded).
+struct DirBuffIter<'a> {
+ buffer: Option<&'a [u8]>,
+ cursor: usize,
+}
+impl<'a> DirBuffIter<'a> {
+ fn new(buffer: &'a DirBuff) -> Self {
+ Self { buffer: Some(buffer.as_ref()), cursor: 0 }
+ }
+}
+impl<'a> Iterator for DirBuffIter<'a> {
+ type Item = (&'a [u16], bool);
+ fn next(&mut self) -> Option<Self::Item> {
+ use crate::mem::size_of;
+ let buffer = &self.buffer?[self.cursor..];
+
+ // Get the name and next entry from the buffer.
+ // SAFETY: The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the
+ // last field (the file name) is unsized. So an offset has to be
+ // used to get the file name slice.
+ let (name, is_directory, next_entry) = unsafe {
+ let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>();
+ let next_entry = (*info).NextEntryOffset as usize;
+ let name = crate::slice::from_raw_parts(
+ (*info).FileName.as_ptr().cast::<u16>(),
+ (*info).FileNameLength as usize / size_of::<u16>(),
+ );
+ let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
+
+ (name, is_directory, next_entry)
+ };
+
+ if next_entry == 0 {
+ self.buffer = None
+ } else {
+ self.cursor += next_entry
+ }
+
+ // Skip `.` and `..` pseudo entries.
+ const DOT: u16 = b'.' as u16;
+ match name {
+ [DOT] | [DOT, DOT] => self.next(),
+ _ => Some((name, is_directory)),
+ }
+ }
+}
+
+/// Open a link relative to the parent directory, ensure no symlinks are followed.
+fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<File> {
+ // This is implemented using the lower level `NtCreateFile` function as
+ // unfortunately opening a file relative to a parent is not supported by
+ // win32 functions. It is however a fundamental feature of the NT kernel.
+ //
+ // See https://docs.microsoft.com/en-us/windows/win32/api/winternl/nf-winternl-ntcreatefile
+ unsafe {
+ let mut handle = ptr::null_mut();
+ let mut io_status = c::IO_STATUS_BLOCK::default();
+ let name_str = c::UNICODE_STRING::from_ref(name);
+ use crate::sync::atomic::{AtomicU32, Ordering};
+ // The `OBJ_DONT_REPARSE` attribute ensures that we haven't been
+ // tricked into following a symlink. However, it may not be available in
+ // earlier versions of Windows.
+ static ATTRIBUTES: AtomicU32 = AtomicU32::new(c::OBJ_DONT_REPARSE);
+ let object = c::OBJECT_ATTRIBUTES {
+ ObjectName: &name_str,
+ RootDirectory: parent.as_raw_handle(),
+ Attributes: ATTRIBUTES.load(Ordering::Relaxed),
+ ..c::OBJECT_ATTRIBUTES::default()
+ };
+ let status = c::NtCreateFile(
+ &mut handle,
+ access,
+ &object,
+ &mut io_status,
+ crate::ptr::null_mut(),
+ 0,
+ c::FILE_SHARE_DELETE | c::FILE_SHARE_READ | c::FILE_SHARE_WRITE,
+ c::FILE_OPEN,
+ // If `name` is a symlink then open the link rather than the target.
+ c::FILE_OPEN_REPARSE_POINT,
+ crate::ptr::null_mut(),
+ 0,
+ );
+ // Convert an NTSTATUS to the more familiar Win32 error codes (aka "DosError")
+ if c::nt_success(status) {
+ Ok(File::from_raw_handle(handle))
+ } else if status == c::STATUS_DELETE_PENDING {
+ // We make a special exception for `STATUS_DELETE_PENDING` because
+ // otherwise this will be mapped to `ERROR_ACCESS_DENIED` which is
+ // very unhelpful.
+ Err(io::Error::from_raw_os_error(c::ERROR_DELETE_PENDING as _))
+ } else if status == c::STATUS_INVALID_PARAMETER
+ && ATTRIBUTES.load(Ordering::Relaxed) == c::OBJ_DONT_REPARSE
+ {
+ // Try without `OBJ_DONT_REPARSE`. See above.
+ ATTRIBUTES.store(0, Ordering::Relaxed);
+ open_link_no_reparse(parent, name, access)
+ } else {
+ Err(io::Error::from_raw_os_error(c::RtlNtStatusToDosError(status) as _))
+ }
+ }
+}
+
+impl AsInner<Handle> for File {
+ fn as_inner(&self) -> &Handle {
+ &self.handle
+ }
+}
+
+impl IntoInner<Handle> for File {
+ fn into_inner(self) -> Handle {
+ self.handle
+ }
+}
+
+impl FromInner<Handle> for File {
+ fn from_inner(handle: Handle) -> File {
+ File { handle }
+ }
+}
+
+impl AsHandle for File {
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ self.as_inner().as_handle()
+ }
+}
+
+impl AsRawHandle for File {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.as_inner().as_raw_handle()
+ }
+}
+
+impl IntoRawHandle for File {
+ fn into_raw_handle(self) -> RawHandle {
+ self.into_inner().into_raw_handle()
+ }
+}
+
+impl FromRawHandle for File {
+ unsafe fn from_raw_handle(raw_handle: RawHandle) -> Self {
+ Self { handle: FromInner::from_inner(FromRawHandle::from_raw_handle(raw_handle)) }
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // FIXME(#24570): add more info here (e.g., mode)
+ let mut b = f.debug_struct("File");
+ b.field("handle", &self.handle.as_raw_handle());
+ if let Ok(path) = get_path(&self) {
+ b.field("path", &path);
+ }
+ b.finish()
+ }
+}
+
+impl FileAttr {
+ pub fn size(&self) -> u64 {
+ self.file_size
+ }
+
+ pub fn perm(&self) -> FilePermissions {
+ FilePermissions { attrs: self.attributes }
+ }
+
+ pub fn attrs(&self) -> u32 {
+ self.attributes
+ }
+
+ pub fn file_type(&self) -> FileType {
+ FileType::new(self.attributes, self.reparse_tag)
+ }
+
+ pub fn modified(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from(self.last_write_time))
+ }
+
+ pub fn accessed(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from(self.last_access_time))
+ }
+
+ pub fn created(&self) -> io::Result<SystemTime> {
+ Ok(SystemTime::from(self.creation_time))
+ }
+
+ pub fn modified_u64(&self) -> u64 {
+ to_u64(&self.last_write_time)
+ }
+
+ pub fn accessed_u64(&self) -> u64 {
+ to_u64(&self.last_access_time)
+ }
+
+ pub fn created_u64(&self) -> u64 {
+ to_u64(&self.creation_time)
+ }
+
+ pub fn volume_serial_number(&self) -> Option<u32> {
+ self.volume_serial_number
+ }
+
+ pub fn number_of_links(&self) -> Option<u32> {
+ self.number_of_links
+ }
+
+ pub fn file_index(&self) -> Option<u64> {
+ self.file_index
+ }
+}
+impl From<c::WIN32_FIND_DATAW> for FileAttr {
+ fn from(wfd: c::WIN32_FIND_DATAW) -> Self {
+ FileAttr {
+ attributes: wfd.dwFileAttributes,
+ creation_time: wfd.ftCreationTime,
+ last_access_time: wfd.ftLastAccessTime,
+ last_write_time: wfd.ftLastWriteTime,
+ file_size: ((wfd.nFileSizeHigh as u64) << 32) | (wfd.nFileSizeLow as u64),
+ reparse_tag: if wfd.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ // reserved unless this is a reparse point
+ wfd.dwReserved0
+ } else {
+ 0
+ },
+ volume_serial_number: None,
+ number_of_links: None,
+ file_index: None,
+ }
+ }
+}
+
+fn to_u64(ft: &c::FILETIME) -> u64 {
+ (ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32)
+}
+
+impl FilePermissions {
+ pub fn readonly(&self) -> bool {
+ self.attrs & c::FILE_ATTRIBUTE_READONLY != 0
+ }
+
+ pub fn set_readonly(&mut self, readonly: bool) {
+ if readonly {
+ self.attrs |= c::FILE_ATTRIBUTE_READONLY;
+ } else {
+ self.attrs &= !c::FILE_ATTRIBUTE_READONLY;
+ }
+ }
+}
+
+impl FileTimes {
+ pub fn set_accessed(&mut self, t: SystemTime) {
+ self.accessed = Some(t.into_inner());
+ }
+
+ pub fn set_modified(&mut self, t: SystemTime) {
+ self.modified = Some(t.into_inner());
+ }
+}
+
+impl FileType {
+ fn new(attrs: c::DWORD, reparse_tag: c::DWORD) -> FileType {
+ FileType { attributes: attrs, reparse_tag }
+ }
+ pub fn is_dir(&self) -> bool {
+ !self.is_symlink() && self.is_directory()
+ }
+ pub fn is_file(&self) -> bool {
+ !self.is_symlink() && !self.is_directory()
+ }
+ pub fn is_symlink(&self) -> bool {
+ self.is_reparse_point() && self.is_reparse_tag_name_surrogate()
+ }
+ pub fn is_symlink_dir(&self) -> bool {
+ self.is_symlink() && self.is_directory()
+ }
+ pub fn is_symlink_file(&self) -> bool {
+ self.is_symlink() && !self.is_directory()
+ }
+ fn is_directory(&self) -> bool {
+ self.attributes & c::FILE_ATTRIBUTE_DIRECTORY != 0
+ }
+ fn is_reparse_point(&self) -> bool {
+ self.attributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0
+ }
+ fn is_reparse_tag_name_surrogate(&self) -> bool {
+ self.reparse_tag & 0x20000000 != 0
+ }
+}
+
+impl DirBuilder {
+ pub fn new() -> DirBuilder {
+ DirBuilder
+ }
+
+ pub fn mkdir(&self, p: &Path) -> io::Result<()> {
+ let p = maybe_verbatim(p)?;
+ cvt(unsafe { c::CreateDirectoryW(p.as_ptr(), ptr::null_mut()) })?;
+ Ok(())
+ }
+}
+
+pub fn readdir(p: &Path) -> io::Result<ReadDir> {
+ let root = p.to_path_buf();
+ let star = p.join("*");
+ let path = maybe_verbatim(&star)?;
+
+ unsafe {
+ let mut wfd = mem::zeroed();
+ let find_handle = c::FindFirstFileW(path.as_ptr(), &mut wfd);
+ if find_handle != c::INVALID_HANDLE_VALUE {
+ Ok(ReadDir {
+ handle: FindNextFileHandle(find_handle),
+ root: Arc::new(root),
+ first: Some(wfd),
+ })
+ } else {
+ Err(Error::last_os_error())
+ }
+ }
+}
+
+pub fn unlink(p: &Path) -> io::Result<()> {
+ let p_u16s = maybe_verbatim(p)?;
+ cvt(unsafe { c::DeleteFileW(p_u16s.as_ptr()) })?;
+ Ok(())
+}
+
+pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
+ let old = maybe_verbatim(old)?;
+ let new = maybe_verbatim(new)?;
+ cvt(unsafe { c::MoveFileExW(old.as_ptr(), new.as_ptr(), c::MOVEFILE_REPLACE_EXISTING) })?;
+ Ok(())
+}
+
+pub fn rmdir(p: &Path) -> io::Result<()> {
+ let p = maybe_verbatim(p)?;
+ cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) })?;
+ Ok(())
+}
+
+/// Open a file or directory without following symlinks.
+fn open_link(path: &Path, access_mode: u32) -> io::Result<File> {
+ let mut opts = OpenOptions::new();
+ opts.access_mode(access_mode);
+ // `FILE_FLAG_BACKUP_SEMANTICS` allows opening directories.
+ // `FILE_FLAG_OPEN_REPARSE_POINT` opens a link instead of its target.
+ opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS | c::FILE_FLAG_OPEN_REPARSE_POINT);
+ File::open(path, &opts)
+}
+
+pub fn remove_dir_all(path: &Path) -> io::Result<()> {
+ let file = open_link(path, c::DELETE | c::FILE_LIST_DIRECTORY)?;
+
+ // Test if the file is not a directory or a symlink to a directory.
+ if (file.basic_info()?.FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) == 0 {
+ return Err(io::Error::from_raw_os_error(c::ERROR_DIRECTORY as _));
+ }
+
+ match remove_dir_all_iterative(&file, File::posix_delete) {
+ Err(e) => {
+ if let Some(code) = e.raw_os_error() {
+ match code as u32 {
+ // If POSIX delete is not supported for this filesystem then fallback to win32 delete.
+ c::ERROR_NOT_SUPPORTED
+ | c::ERROR_INVALID_FUNCTION
+ | c::ERROR_INVALID_PARAMETER => {
+ remove_dir_all_iterative(&file, File::win32_delete)
+ }
+ _ => Err(e),
+ }
+ } else {
+ Err(e)
+ }
+ }
+ ok => ok,
+ }
+}
+
+fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io::Result<()> {
+ // When deleting files we may loop this many times when certain error conditions occur.
+ // This allows remove_dir_all to succeed when the error is temporary.
+ const MAX_RETRIES: u32 = 10;
+
+ let mut buffer = DirBuff::new();
+ let mut dirlist = vec![f.duplicate()?];
+
+ // FIXME: This is a hack so we can push to the dirlist vec after borrowing from it.
+ fn copy_handle(f: &File) -> mem::ManuallyDrop<File> {
+ unsafe { mem::ManuallyDrop::new(File::from_raw_handle(f.as_raw_handle())) }
+ }
+
+ let mut restart = true;
+ while let Some(dir) = dirlist.last() {
+ let dir = copy_handle(dir);
+
+ // Fill the buffer and iterate the entries.
+ let more_data = dir.fill_dir_buff(&mut buffer, restart)?;
+ restart = false;
+ for (name, is_directory) in buffer.iter() {
+ if is_directory {
+ let child_dir = open_link_no_reparse(
+ &dir,
+ name,
+ c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY,
+ )?;
+ dirlist.push(child_dir);
+ } else {
+ for i in 1..=MAX_RETRIES {
+ let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE);
+ match result {
+ Ok(f) => delete(&f)?,
+ // Already deleted, so skip.
+ Err(e) if e.kind() == io::ErrorKind::NotFound => break,
+ // Retry a few times if the file is locked or a delete is already in progress.
+ Err(e)
+ if i < MAX_RETRIES
+ && (e.raw_os_error() == Some(c::ERROR_DELETE_PENDING as _)
+ || e.raw_os_error()
+ == Some(c::ERROR_SHARING_VIOLATION as _)) => {}
+ // Otherwise return the error.
+ Err(e) => return Err(e),
+ }
+ thread::yield_now();
+ }
+ }
+ }
+ // If there were no more files then delete the directory.
+ if !more_data {
+ if let Some(dir) = dirlist.pop() {
+ // Retry deleting a few times in case we need to wait for a file to be deleted.
+ for i in 1..=MAX_RETRIES {
+ let result = delete(&dir);
+ if let Err(e) = result {
+ if i == MAX_RETRIES || e.kind() != io::ErrorKind::DirectoryNotEmpty {
+ return Err(e);
+ }
+ thread::yield_now();
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ }
+ Ok(())
+}
+
+pub fn readlink(path: &Path) -> io::Result<PathBuf> {
+ // Open the link with no access mode, instead of generic read.
+ // By default FILE_LIST_DIRECTORY is denied for the junction "C:\Documents and Settings", so
+ // this is needed for a common case.
+ let mut opts = OpenOptions::new();
+ opts.access_mode(0);
+ opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS);
+ let file = File::open(&path, &opts)?;
+ file.readlink()
+}
+
+pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
+ symlink_inner(original, link, false)
+}
+
+pub fn symlink_inner(original: &Path, link: &Path, dir: bool) -> io::Result<()> {
+ let original = to_u16s(original)?;
+ let link = maybe_verbatim(link)?;
+ let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
+ // Formerly, symlink creation required the SeCreateSymbolicLink privilege. For the Windows 10
+ // Creators Update, Microsoft loosened this to allow unprivileged symlink creation if the
+ // computer is in Developer Mode, but SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE must be
+ // added to dwFlags to opt into this behaviour.
+ let result = cvt(unsafe {
+ c::CreateSymbolicLinkW(
+ link.as_ptr(),
+ original.as_ptr(),
+ flags | c::SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE,
+ ) as c::BOOL
+ });
+ if let Err(err) = result {
+ if err.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) {
+ // Older Windows objects to SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE,
+ // so if we encounter ERROR_INVALID_PARAMETER, retry without that flag.
+ cvt(unsafe {
+ c::CreateSymbolicLinkW(link.as_ptr(), original.as_ptr(), flags) as c::BOOL
+ })?;
+ } else {
+ return Err(err);
+ }
+ }
+ Ok(())
+}
+
+#[cfg(not(target_vendor = "uwp"))]
+pub fn link(original: &Path, link: &Path) -> io::Result<()> {
+ let original = maybe_verbatim(original)?;
+ let link = maybe_verbatim(link)?;
+ cvt(unsafe { c::CreateHardLinkW(link.as_ptr(), original.as_ptr(), ptr::null_mut()) })?;
+ Ok(())
+}
+
+#[cfg(target_vendor = "uwp")]
+pub fn link(_original: &Path, _link: &Path) -> io::Result<()> {
+ return Err(io::const_io_error!(
+ io::ErrorKind::Unsupported,
+ "hard link are not supported on UWP",
+ ));
+}
+
+pub fn stat(path: &Path) -> io::Result<FileAttr> {
+ metadata(path, ReparsePoint::Follow)
+}
+
+pub fn lstat(path: &Path) -> io::Result<FileAttr> {
+ metadata(path, ReparsePoint::Open)
+}
+
+#[repr(u32)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum ReparsePoint {
+ Follow = 0,
+ Open = c::FILE_FLAG_OPEN_REPARSE_POINT,
+}
+impl ReparsePoint {
+ fn as_flag(self) -> u32 {
+ self as u32
+ }
+}
+
+fn metadata(path: &Path, reparse: ReparsePoint) -> io::Result<FileAttr> {
+ let mut opts = OpenOptions::new();
+ // No read or write permissions are necessary
+ opts.access_mode(0);
+ opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS | reparse.as_flag());
+
+ // Attempt to open the file normally.
+ // If that fails with `ERROR_SHARING_VIOLATION` then retry using `FindFirstFileW`.
+ // If the fallback fails for any reason we return the original error.
+ match File::open(path, &opts) {
+ Ok(file) => file.file_attr(),
+ Err(e) if e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as _) => {
+ // `ERROR_SHARING_VIOLATION` will almost never be returned.
+ // Usually if a file is locked you can still read some metadata.
+ // However, there are special system files, such as
+ // `C:\hiberfil.sys`, that are locked in a way that denies even that.
+ unsafe {
+ let path = maybe_verbatim(path)?;
+
+ // `FindFirstFileW` accepts wildcard file names.
+ // Fortunately wildcards are not valid file names and
+ // `ERROR_SHARING_VIOLATION` means the file exists (but is locked)
+ // therefore it's safe to assume the file name given does not
+ // include wildcards.
+ let mut wfd = mem::zeroed();
+ let handle = c::FindFirstFileW(path.as_ptr(), &mut wfd);
+
+ if handle == c::INVALID_HANDLE_VALUE {
+ // This can fail if the user does not have read access to the
+ // directory.
+ Err(e)
+ } else {
+ // We no longer need the find handle.
+ c::FindClose(handle);
+
+ // `FindFirstFileW` reads the cached file information from the
+ // directory. The downside is that this metadata may be outdated.
+ let attrs = FileAttr::from(wfd);
+ if reparse == ReparsePoint::Follow && attrs.file_type().is_symlink() {
+ Err(e)
+ } else {
+ Ok(attrs)
+ }
+ }
+ }
+ }
+ Err(e) => Err(e),
+ }
+}
+
+pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
+ let p = maybe_verbatim(p)?;
+ unsafe {
+ cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs))?;
+ Ok(())
+ }
+}
+
+fn get_path(f: &File) -> io::Result<PathBuf> {
+ super::fill_utf16_buf(
+ |buf, sz| unsafe {
+ c::GetFinalPathNameByHandleW(f.handle.as_raw_handle(), buf, sz, c::VOLUME_NAME_DOS)
+ },
+ |buf| PathBuf::from(OsString::from_wide(buf)),
+ )
+}
+
+pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
+ let mut opts = OpenOptions::new();
+ // No read or write permissions are necessary
+ opts.access_mode(0);
+ // This flag is so we can open directories too
+ opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS);
+ let f = File::open(p, &opts)?;
+ get_path(&f)
+}
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ unsafe extern "system" fn callback(
+ _TotalFileSize: c::LARGE_INTEGER,
+ _TotalBytesTransferred: c::LARGE_INTEGER,
+ _StreamSize: c::LARGE_INTEGER,
+ StreamBytesTransferred: c::LARGE_INTEGER,
+ dwStreamNumber: c::DWORD,
+ _dwCallbackReason: c::DWORD,
+ _hSourceFile: c::HANDLE,
+ _hDestinationFile: c::HANDLE,
+ lpData: c::LPVOID,
+ ) -> c::DWORD {
+ if dwStreamNumber == 1 {
+ *(lpData as *mut i64) = StreamBytesTransferred;
+ }
+ c::PROGRESS_CONTINUE
+ }
+ let pfrom = maybe_verbatim(from)?;
+ let pto = maybe_verbatim(to)?;
+ let mut size = 0i64;
+ cvt(unsafe {
+ c::CopyFileExW(
+ pfrom.as_ptr(),
+ pto.as_ptr(),
+ Some(callback),
+ &mut size as *mut _ as *mut _,
+ ptr::null_mut(),
+ 0,
+ )
+ })?;
+ Ok(size as u64)
+}
+
+#[allow(dead_code)]
+pub fn symlink_junction<P: AsRef<Path>, Q: AsRef<Path>>(
+ original: P,
+ junction: Q,
+) -> io::Result<()> {
+ symlink_junction_inner(original.as_ref(), junction.as_ref())
+}
+
+// Creating a directory junction on windows involves dealing with reparse
+// points and the DeviceIoControl function, and this code is a skeleton of
+// what can be found here:
+//
+// http://www.flexhex.com/docs/articles/hard-links.phtml
+#[allow(dead_code)]
+fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
+ let d = DirBuilder::new();
+ d.mkdir(&junction)?;
+
+ let mut opts = OpenOptions::new();
+ opts.write(true);
+ opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS);
+ let f = File::open(junction, &opts)?;
+ let h = f.as_inner().as_raw_handle();
+
+ unsafe {
+ let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let db = data.as_mut_ptr() as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
+ let buf = &mut (*db).ReparseTarget as *mut c::WCHAR;
+ let mut i = 0;
+ // FIXME: this conversion is very hacky
+ let v = br"\??\";
+ let v = v.iter().map(|x| *x as u16);
+ for c in v.chain(original.as_os_str().encode_wide()) {
+ *buf.offset(i) = c;
+ i += 1;
+ }
+ *buf.offset(i) = 0;
+ i += 1;
+ (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
+ (*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
+ (*db).ReparseTargetLength = ((i - 1) * 2) as c::WORD;
+ (*db).ReparseDataLength = (*db).ReparseTargetLength as c::DWORD + 12;
+
+ let mut ret = 0;
+ cvt(c::DeviceIoControl(
+ h as *mut _,
+ c::FSCTL_SET_REPARSE_POINT,
+ data.as_ptr() as *mut _,
+ (*db).ReparseDataLength + 8,
+ ptr::null_mut(),
+ 0,
+ &mut ret,
+ ptr::null_mut(),
+ ))
+ .map(drop)
+ }
+}
+
+// Try to see if a file exists but, unlike `exists`, report I/O errors.
+pub fn try_exists(path: &Path) -> io::Result<bool> {
+ // Open the file to ensure any symlinks are followed to their target.
+ let mut opts = OpenOptions::new();
+ // No read, write, etc access rights are needed.
+ opts.access_mode(0);
+ // Backup semantics enables opening directories as well as files.
+ opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS);
+ match File::open(path, &opts) {
+ Err(e) => match e.kind() {
+ // The file definitely does not exist
+ io::ErrorKind::NotFound => Ok(false),
+
+ // `ERROR_SHARING_VIOLATION` means that the file has been locked by
+ // another process. This is often temporary so we simply report it
+ // as the file existing.
+ _ if e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as i32) => Ok(true),
+
+ // Other errors such as `ERROR_ACCESS_DENIED` may indicate that the
+ // file exists. However, these types of errors are usually more
+ // permanent so we report them here.
+ _ => Err(e),
+ },
+ // The file was opened successfully therefore it must exist,
+ Ok(_) => Ok(true),
+ }
+}
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
new file mode 100644
index 000000000..e24b09cc9
--- /dev/null
+++ b/library/std/src/sys/windows/handle.rs
@@ -0,0 +1,335 @@
+#![unstable(issue = "none", feature = "windows_handle")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::mem;
+use crate::os::windows::io::{
+ AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle,
+};
+use crate::ptr;
+use crate::sys::c;
+use crate::sys::cvt;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+/// An owned container for `HANDLE` object, closing them on Drop.
+///
+/// All methods are inherited through a `Deref` impl to `RawHandle`
+pub struct Handle(OwnedHandle);
+
+impl Handle {
+ pub fn new_event(manual: bool, init: bool) -> io::Result<Handle> {
+ unsafe {
+ let event =
+ c::CreateEventW(ptr::null_mut(), manual as c::BOOL, init as c::BOOL, ptr::null());
+ if event.is_null() {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(Handle::from_raw_handle(event))
+ }
+ }
+ }
+}
+
+impl AsInner<OwnedHandle> for Handle {
+ fn as_inner(&self) -> &OwnedHandle {
+ &self.0
+ }
+}
+
+impl IntoInner<OwnedHandle> for Handle {
+ fn into_inner(self) -> OwnedHandle {
+ self.0
+ }
+}
+
+impl FromInner<OwnedHandle> for Handle {
+ fn from_inner(file_desc: OwnedHandle) -> Self {
+ Self(file_desc)
+ }
+}
+
+impl AsHandle for Handle {
+ fn as_handle(&self) -> BorrowedHandle<'_> {
+ self.0.as_handle()
+ }
+}
+
+impl AsRawHandle for Handle {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.0.as_raw_handle()
+ }
+}
+
+impl IntoRawHandle for Handle {
+ fn into_raw_handle(self) -> RawHandle {
+ self.0.into_raw_handle()
+ }
+}
+
+impl FromRawHandle for Handle {
+ unsafe fn from_raw_handle(raw_handle: RawHandle) -> Self {
+ Self(FromRawHandle::from_raw_handle(raw_handle))
+ }
+}
+
+impl Handle {
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let res = unsafe { self.synchronous_read(buf.as_mut_ptr().cast(), buf.len(), None) };
+
+ match res {
+ Ok(read) => Ok(read as usize),
+
+ // The special treatment of BrokenPipe is to deal with Windows
+ // pipe semantics, which yields this error when *reading* from
+ // a pipe after the other end has closed; we interpret that as
+ // EOF on the pipe.
+ Err(ref e) if e.kind() == ErrorKind::BrokenPipe => Ok(0),
+
+ Err(e) => Err(e),
+ }
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ crate::io::default_read_vectored(|buf| self.read(buf), bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> {
+ let res =
+ unsafe { self.synchronous_read(buf.as_mut_ptr().cast(), buf.len(), Some(offset)) };
+
+ match res {
+ Ok(read) => Ok(read as usize),
+ Err(ref e) if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32) => Ok(0),
+ Err(e) => Err(e),
+ }
+ }
+
+ pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ let res = unsafe {
+ self.synchronous_read(buf.unfilled_mut().as_mut_ptr(), buf.remaining(), None)
+ };
+
+ match res {
+ Ok(read) => {
+ // Safety: `read` bytes were written to the initialized portion of the buffer
+ unsafe {
+ buf.assume_init(read as usize);
+ }
+ buf.add_filled(read as usize);
+ Ok(())
+ }
+
+ // The special treatment of BrokenPipe is to deal with Windows
+ // pipe semantics, which yields this error when *reading* from
+ // a pipe after the other end has closed; we interpret that as
+ // EOF on the pipe.
+ Err(ref e) if e.kind() == ErrorKind::BrokenPipe => Ok(()),
+
+ Err(e) => Err(e),
+ }
+ }
+
+ pub unsafe fn read_overlapped(
+ &self,
+ buf: &mut [u8],
+ overlapped: *mut c::OVERLAPPED,
+ ) -> io::Result<Option<usize>> {
+ let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD;
+ let mut amt = 0;
+ let res = cvt(c::ReadFile(
+ self.as_handle(),
+ buf.as_ptr() as c::LPVOID,
+ len,
+ &mut amt,
+ overlapped,
+ ));
+ match res {
+ Ok(_) => Ok(Some(amt as usize)),
+ Err(e) => {
+ if e.raw_os_error() == Some(c::ERROR_IO_PENDING as i32) {
+ Ok(None)
+ } else if e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32) {
+ Ok(Some(0))
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+
+ pub fn overlapped_result(
+ &self,
+ overlapped: *mut c::OVERLAPPED,
+ wait: bool,
+ ) -> io::Result<usize> {
+ unsafe {
+ let mut bytes = 0;
+ let wait = if wait { c::TRUE } else { c::FALSE };
+ let res =
+ cvt(c::GetOverlappedResult(self.as_raw_handle(), overlapped, &mut bytes, wait));
+ match res {
+ Ok(_) => Ok(bytes as usize),
+ Err(e) => {
+ if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32)
+ || e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32)
+ {
+ Ok(0)
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+ }
+
+ pub fn cancel_io(&self) -> io::Result<()> {
+ unsafe { cvt(c::CancelIo(self.as_raw_handle())).map(drop) }
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ self.synchronous_write(&buf, None)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ crate::io::default_write_vectored(|buf| self.write(buf), bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ self.synchronous_write(&buf, Some(offset))
+ }
+
+ pub fn try_clone(&self) -> io::Result<Self> {
+ Ok(Self(self.0.try_clone()?))
+ }
+
+ pub fn duplicate(
+ &self,
+ access: c::DWORD,
+ inherit: bool,
+ options: c::DWORD,
+ ) -> io::Result<Self> {
+ Ok(Self(self.0.as_handle().duplicate(access, inherit, options)?))
+ }
+
+ /// Performs a synchronous read.
+ ///
+ /// If the handle is opened for asynchronous I/O then this abort the process.
+ /// See #81357.
+ ///
+ /// If `offset` is `None` then the current file position is used.
+ unsafe fn synchronous_read(
+ &self,
+ buf: *mut mem::MaybeUninit<u8>,
+ len: usize,
+ offset: Option<u64>,
+ ) -> io::Result<usize> {
+ let mut io_status = c::IO_STATUS_BLOCK::default();
+
+ // The length is clamped at u32::MAX.
+ let len = cmp::min(len, c::DWORD::MAX as usize) as c::DWORD;
+ let status = c::NtReadFile(
+ self.as_handle(),
+ ptr::null_mut(),
+ None,
+ ptr::null_mut(),
+ &mut io_status,
+ buf,
+ len,
+ offset.map(|n| n as _).as_ref(),
+ None,
+ );
+
+ let status = if status == c::STATUS_PENDING {
+ c::WaitForSingleObject(self.as_raw_handle(), c::INFINITE);
+ io_status.status()
+ } else {
+ status
+ };
+ match status {
+ // If the operation has not completed then abort the process.
+ // Doing otherwise means that the buffer and stack may be written to
+ // after this function returns.
+ c::STATUS_PENDING => rtabort!("I/O error: operation failed to complete synchronously"),
+
+ // Return `Ok(0)` when there's nothing more to read.
+ c::STATUS_END_OF_FILE => Ok(0),
+
+ // Success!
+ status if c::nt_success(status) => Ok(io_status.Information),
+
+ status => {
+ let error = c::RtlNtStatusToDosError(status);
+ Err(io::Error::from_raw_os_error(error as _))
+ }
+ }
+ }
+
+ /// Performs a synchronous write.
+ ///
+ /// If the handle is opened for asynchronous I/O then this abort the process.
+ /// See #81357.
+ ///
+ /// If `offset` is `None` then the current file position is used.
+ fn synchronous_write(&self, buf: &[u8], offset: Option<u64>) -> io::Result<usize> {
+ let mut io_status = c::IO_STATUS_BLOCK::default();
+
+ // The length is clamped at u32::MAX.
+ let len = cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
+ let status = unsafe {
+ c::NtWriteFile(
+ self.as_handle(),
+ ptr::null_mut(),
+ None,
+ ptr::null_mut(),
+ &mut io_status,
+ buf.as_ptr(),
+ len,
+ offset.map(|n| n as _).as_ref(),
+ None,
+ )
+ };
+ let status = if status == c::STATUS_PENDING {
+ unsafe { c::WaitForSingleObject(self.as_raw_handle(), c::INFINITE) };
+ io_status.status()
+ } else {
+ status
+ };
+ match status {
+ // If the operation has not completed then abort the process.
+ // Doing otherwise means that the buffer may be read and the stack
+ // written to after this function returns.
+ c::STATUS_PENDING => rtabort!("I/O error: operation failed to complete synchronously"),
+
+ // Success!
+ status if c::nt_success(status) => Ok(io_status.Information),
+
+ status => {
+ let error = unsafe { c::RtlNtStatusToDosError(status) };
+ Err(io::Error::from_raw_os_error(error as _))
+ }
+ }
+ }
+}
+
+impl<'a> Read for &'a Handle {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ (**self).read_vectored(bufs)
+ }
+}
diff --git a/library/std/src/sys/windows/handle/tests.rs b/library/std/src/sys/windows/handle/tests.rs
new file mode 100644
index 000000000..d836dae4c
--- /dev/null
+++ b/library/std/src/sys/windows/handle/tests.rs
@@ -0,0 +1,22 @@
+use crate::sys::pipe::{anon_pipe, Pipes};
+use crate::{thread, time};
+
+/// Test the synchronous fallback for overlapped I/O.
+#[test]
+fn overlapped_handle_fallback() {
+ // Create some pipes. `ours` will be asynchronous.
+ let Pipes { ours, theirs } = anon_pipe(true, false).unwrap();
+
+ let async_readable = ours.into_handle();
+ let sync_writeable = theirs.into_handle();
+
+ thread::scope(|_| {
+ thread::sleep(time::Duration::from_millis(100));
+ sync_writeable.write(b"hello world!").unwrap();
+ });
+
+ // The pipe buffer starts empty so reading won't complete synchronously unless
+ // our fallback path works.
+ let mut buffer = [0u8; 1024];
+ async_readable.read(&mut buffer).unwrap();
+}
diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs
new file mode 100644
index 000000000..fb06df1f8
--- /dev/null
+++ b/library/std/src/sys/windows/io.rs
@@ -0,0 +1,80 @@
+use crate::marker::PhantomData;
+use crate::slice;
+use crate::sys::c;
+
+#[derive(Copy, Clone)]
+#[repr(transparent)]
+pub struct IoSlice<'a> {
+ vec: c::WSABUF,
+ _p: PhantomData<&'a [u8]>,
+}
+
+impl<'a> IoSlice<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
+ assert!(buf.len() <= c::ULONG::MAX as usize);
+ IoSlice {
+ vec: c::WSABUF {
+ len: buf.len() as c::ULONG,
+ buf: buf.as_ptr() as *mut u8 as *mut c::CHAR,
+ },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if (self.vec.len as usize) < n {
+ panic!("advancing IoSlice beyond its length");
+ }
+
+ unsafe {
+ self.vec.len -= n as c::ULONG;
+ self.vec.buf = self.vec.buf.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) }
+ }
+}
+
+#[repr(transparent)]
+pub struct IoSliceMut<'a> {
+ vec: c::WSABUF,
+ _p: PhantomData<&'a mut [u8]>,
+}
+
+impl<'a> IoSliceMut<'a> {
+ #[inline]
+ pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+ assert!(buf.len() <= c::ULONG::MAX as usize);
+ IoSliceMut {
+ vec: c::WSABUF { len: buf.len() as c::ULONG, buf: buf.as_mut_ptr() as *mut c::CHAR },
+ _p: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub fn advance(&mut self, n: usize) {
+ if (self.vec.len as usize) < n {
+ panic!("advancing IoSliceMut beyond its length");
+ }
+
+ unsafe {
+ self.vec.len -= n as c::ULONG;
+ self.vec.buf = self.vec.buf.add(n);
+ }
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) }
+ }
+}
diff --git a/library/std/src/sys/windows/locks/condvar.rs b/library/std/src/sys/windows/locks/condvar.rs
new file mode 100644
index 000000000..be9a2abbe
--- /dev/null
+++ b/library/std/src/sys/windows/locks/condvar.rs
@@ -0,0 +1,52 @@
+use crate::cell::UnsafeCell;
+use crate::sys::c;
+use crate::sys::locks::{mutex, Mutex};
+use crate::sys::os;
+use crate::time::Duration;
+
+pub struct Condvar {
+ inner: UnsafeCell<c::CONDITION_VARIABLE>,
+}
+
+pub type MovableCondvar = Condvar;
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+impl Condvar {
+ #[inline]
+ pub const fn new() -> Condvar {
+ Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) }
+ }
+
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0);
+ debug_assert!(r != 0);
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ let r = c::SleepConditionVariableSRW(
+ self.inner.get(),
+ mutex::raw(mutex),
+ crate::sys::windows::dur2timeout(dur),
+ 0,
+ );
+ if r == 0 {
+ debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize);
+ false
+ } else {
+ true
+ }
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {
+ c::WakeConditionVariable(self.inner.get())
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ c::WakeAllConditionVariable(self.inner.get())
+ }
+}
diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs
new file mode 100644
index 000000000..d412ff152
--- /dev/null
+++ b/library/std/src/sys/windows/locks/mod.rs
@@ -0,0 +1,6 @@
+mod condvar;
+mod mutex;
+mod rwlock;
+pub use condvar::{Condvar, MovableCondvar};
+pub use mutex::{MovableMutex, Mutex};
+pub use rwlock::{MovableRwLock, RwLock};
diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs
new file mode 100644
index 000000000..f91e8f9f5
--- /dev/null
+++ b/library/std/src/sys/windows/locks/mutex.rs
@@ -0,0 +1,57 @@
+//! System Mutexes
+//!
+//! The Windows implementation of mutexes is a little odd and it might not be
+//! immediately obvious what's going on. The primary oddness is that SRWLock is
+//! used instead of CriticalSection, and this is done because:
+//!
+//! 1. SRWLock is several times faster than CriticalSection according to
+//! benchmarks performed on both Windows 8 and Windows 7.
+//!
+//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
+//! Unix implementation deadlocks so consistency is preferred. See #19962 for
+//! more details.
+//!
+//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
+//! is that there are no guarantees of fairness.
+
+use crate::cell::UnsafeCell;
+use crate::sys::c;
+
+pub struct Mutex {
+ srwlock: UnsafeCell<c::SRWLOCK>,
+}
+
+// Windows SRW Locks are movable (while not borrowed).
+pub type MovableMutex = Mutex;
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+#[inline]
+pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
+ m.srwlock.get()
+}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
+ }
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ c::AcquireSRWLockExclusive(raw(self));
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ c::TryAcquireSRWLockExclusive(raw(self)) != 0
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ c::ReleaseSRWLockExclusive(raw(self));
+ }
+}
diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs
new file mode 100644
index 000000000..fa5ffe574
--- /dev/null
+++ b/library/std/src/sys/windows/locks/rwlock.rs
@@ -0,0 +1,42 @@
+use crate::cell::UnsafeCell;
+use crate::sys::c;
+
+pub struct RwLock {
+ inner: UnsafeCell<c::SRWLOCK>,
+}
+
+pub type MovableRwLock = RwLock;
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
+ }
+ #[inline]
+ pub unsafe fn read(&self) {
+ c::AcquireSRWLockShared(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ c::TryAcquireSRWLockShared(self.inner.get()) != 0
+ }
+ #[inline]
+ pub unsafe fn write(&self) {
+ c::AcquireSRWLockExclusive(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ }
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ c::ReleaseSRWLockShared(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ c::ReleaseSRWLockExclusive(self.inner.get())
+ }
+}
diff --git a/library/std/src/sys/windows/memchr.rs b/library/std/src/sys/windows/memchr.rs
new file mode 100644
index 000000000..b9e5bcc1b
--- /dev/null
+++ b/library/std/src/sys/windows/memchr.rs
@@ -0,0 +1,5 @@
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+// Fallback memchr is fastest on Windows.
+pub use core::slice::memchr::{memchr, memrchr};
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
new file mode 100644
index 000000000..b3f6d2d0a
--- /dev/null
+++ b/library/std/src/sys/windows/mod.rs
@@ -0,0 +1,323 @@
+#![allow(missing_docs, nonstandard_style)]
+
+use crate::ffi::{CStr, OsStr, OsString};
+use crate::io::ErrorKind;
+use crate::os::windows::ffi::{OsStrExt, OsStringExt};
+use crate::path::PathBuf;
+use crate::time::Duration;
+
+pub use self::rand::hashmap_random_keys;
+
+#[macro_use]
+pub mod compat;
+
+pub mod alloc;
+pub mod args;
+pub mod c;
+pub mod cmath;
+pub mod env;
+pub mod fs;
+pub mod handle;
+pub mod io;
+pub mod locks;
+pub mod memchr;
+pub mod net;
+pub mod os;
+pub mod os_str;
+pub mod path;
+pub mod pipe;
+pub mod process;
+pub mod rand;
+pub mod thread;
+pub mod thread_local_dtor;
+pub mod thread_local_key;
+pub mod thread_parker;
+pub mod time;
+cfg_if::cfg_if! {
+ if #[cfg(not(target_vendor = "uwp"))] {
+ pub mod stdio;
+ pub mod stack_overflow;
+ } else {
+ pub mod stdio_uwp;
+ pub mod stack_overflow_uwp;
+ pub use self::stdio_uwp as stdio;
+ pub use self::stack_overflow_uwp as stack_overflow;
+ }
+}
+
+// SAFETY: must be called only once during runtime initialization.
+// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
+pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
+ stack_overflow::init();
+
+ // Normally, `thread::spawn` will call `Thread::set_name` but since this thread already
+ // exists, we have to call it ourselves.
+ thread::Thread::set_name(&CStr::from_bytes_with_nul_unchecked(b"main\0"));
+}
+
+// SAFETY: must be called only once during runtime cleanup.
+// NOTE: this is not guaranteed to run, for example when the program aborts.
+pub unsafe fn cleanup() {
+ net::cleanup();
+}
+
+pub fn decode_error_kind(errno: i32) -> ErrorKind {
+ use ErrorKind::*;
+
+ match errno as c::DWORD {
+ c::ERROR_ACCESS_DENIED => return PermissionDenied,
+ c::ERROR_ALREADY_EXISTS => return AlreadyExists,
+ c::ERROR_FILE_EXISTS => return AlreadyExists,
+ c::ERROR_BROKEN_PIPE => return BrokenPipe,
+ c::ERROR_FILE_NOT_FOUND => return NotFound,
+ c::ERROR_PATH_NOT_FOUND => return NotFound,
+ c::ERROR_NO_DATA => return BrokenPipe,
+ c::ERROR_INVALID_NAME => return InvalidFilename,
+ c::ERROR_INVALID_PARAMETER => return InvalidInput,
+ c::ERROR_NOT_ENOUGH_MEMORY | c::ERROR_OUTOFMEMORY => return OutOfMemory,
+ c::ERROR_SEM_TIMEOUT
+ | c::WAIT_TIMEOUT
+ | c::ERROR_DRIVER_CANCEL_TIMEOUT
+ | c::ERROR_OPERATION_ABORTED
+ | c::ERROR_SERVICE_REQUEST_TIMEOUT
+ | c::ERROR_COUNTER_TIMEOUT
+ | c::ERROR_TIMEOUT
+ | c::ERROR_RESOURCE_CALL_TIMED_OUT
+ | c::ERROR_CTX_MODEM_RESPONSE_TIMEOUT
+ | c::ERROR_CTX_CLIENT_QUERY_TIMEOUT
+ | c::FRS_ERR_SYSVOL_POPULATE_TIMEOUT
+ | c::ERROR_DS_TIMELIMIT_EXCEEDED
+ | c::DNS_ERROR_RECORD_TIMED_OUT
+ | c::ERROR_IPSEC_IKE_TIMED_OUT
+ | c::ERROR_RUNLEVEL_SWITCH_TIMEOUT
+ | c::ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT => return TimedOut,
+ c::ERROR_CALL_NOT_IMPLEMENTED => return Unsupported,
+ c::ERROR_HOST_UNREACHABLE => return HostUnreachable,
+ c::ERROR_NETWORK_UNREACHABLE => return NetworkUnreachable,
+ c::ERROR_DIRECTORY => return NotADirectory,
+ c::ERROR_DIRECTORY_NOT_SUPPORTED => return IsADirectory,
+ c::ERROR_DIR_NOT_EMPTY => return DirectoryNotEmpty,
+ c::ERROR_WRITE_PROTECT => return ReadOnlyFilesystem,
+ c::ERROR_DISK_FULL | c::ERROR_HANDLE_DISK_FULL => return StorageFull,
+ c::ERROR_SEEK_ON_DEVICE => return NotSeekable,
+ c::ERROR_DISK_QUOTA_EXCEEDED => return FilesystemQuotaExceeded,
+ c::ERROR_FILE_TOO_LARGE => return FileTooLarge,
+ c::ERROR_BUSY => return ResourceBusy,
+ c::ERROR_POSSIBLE_DEADLOCK => return Deadlock,
+ c::ERROR_NOT_SAME_DEVICE => return CrossesDevices,
+ c::ERROR_TOO_MANY_LINKS => return TooManyLinks,
+ c::ERROR_FILENAME_EXCED_RANGE => return InvalidFilename,
+ _ => {}
+ }
+
+ match errno {
+ c::WSAEACCES => PermissionDenied,
+ c::WSAEADDRINUSE => AddrInUse,
+ c::WSAEADDRNOTAVAIL => AddrNotAvailable,
+ c::WSAECONNABORTED => ConnectionAborted,
+ c::WSAECONNREFUSED => ConnectionRefused,
+ c::WSAECONNRESET => ConnectionReset,
+ c::WSAEINVAL => InvalidInput,
+ c::WSAENOTCONN => NotConnected,
+ c::WSAEWOULDBLOCK => WouldBlock,
+ c::WSAETIMEDOUT => TimedOut,
+ c::WSAEHOSTUNREACH => HostUnreachable,
+ c::WSAENETDOWN => NetworkDown,
+ c::WSAENETUNREACH => NetworkUnreachable,
+
+ _ => Uncategorized,
+ }
+}
+
+pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> {
+ let ptr = haystack.as_ptr();
+ let mut start = &haystack[..];
+
+ // For performance reasons unfold the loop eight times.
+ while start.len() >= 8 {
+ macro_rules! if_return {
+ ($($n:literal,)+) => {
+ $(
+ if start[$n] == needle {
+ return Some(((&start[$n] as *const u16).addr() - ptr.addr()) / 2);
+ }
+ )+
+ }
+ }
+
+ if_return!(0, 1, 2, 3, 4, 5, 6, 7,);
+
+ start = &start[8..];
+ }
+
+ for c in start {
+ if *c == needle {
+ return Some(((c as *const u16).addr() - ptr.addr()) / 2);
+ }
+ }
+ None
+}
+
+pub fn to_u16s<S: AsRef<OsStr>>(s: S) -> crate::io::Result<Vec<u16>> {
+ fn inner(s: &OsStr) -> crate::io::Result<Vec<u16>> {
+ // Most paths are ASCII, so reserve capacity for as much as there are bytes
+ // in the OsStr plus one for the null-terminating character. We are not
+ // wasting bytes here as paths created by this function are primarily used
+ // in an ephemeral fashion.
+ let mut maybe_result = Vec::with_capacity(s.len() + 1);
+ maybe_result.extend(s.encode_wide());
+
+ if unrolled_find_u16s(0, &maybe_result).is_some() {
+ return Err(crate::io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "strings passed to WinAPI cannot contain NULs",
+ ));
+ }
+ maybe_result.push(0);
+ Ok(maybe_result)
+ }
+ inner(s.as_ref())
+}
+
+// Many Windows APIs follow a pattern of where we hand a buffer and then they
+// will report back to us how large the buffer should be or how many bytes
+// currently reside in the buffer. This function is an abstraction over these
+// functions by making them easier to call.
+//
+// The first callback, `f1`, is yielded a (pointer, len) pair which can be
+// passed to a syscall. The `ptr` is valid for `len` items (u16 in this case).
+// The closure is expected to return what the syscall returns which will be
+// interpreted by this function to determine if the syscall needs to be invoked
+// again (with more buffer space).
+//
+// Once the syscall has completed (errors bail out early) the second closure is
+// yielded the data which has been read from the syscall. The return value
+// from this closure is then the return value of the function.
+fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T>
+where
+ F1: FnMut(*mut u16, c::DWORD) -> c::DWORD,
+ F2: FnOnce(&[u16]) -> T,
+{
+ // Start off with a stack buf but then spill over to the heap if we end up
+ // needing more space.
+ //
+ // This initial size also works around `GetFullPathNameW` returning
+ // incorrect size hints for some short paths:
+ // https://github.com/dylni/normpath/issues/5
+ let mut stack_buf = [0u16; 512];
+ let mut heap_buf = Vec::new();
+ unsafe {
+ let mut n = stack_buf.len();
+ loop {
+ let buf = if n <= stack_buf.len() {
+ &mut stack_buf[..]
+ } else {
+ let extra = n - heap_buf.len();
+ heap_buf.reserve(extra);
+ heap_buf.set_len(n);
+ &mut heap_buf[..]
+ };
+
+ // This function is typically called on windows API functions which
+ // will return the correct length of the string, but these functions
+ // also return the `0` on error. In some cases, however, the
+ // returned "correct length" may actually be 0!
+ //
+ // To handle this case we call `SetLastError` to reset it to 0 and
+ // then check it again if we get the "0 error value". If the "last
+ // error" is still 0 then we interpret it as a 0 length buffer and
+ // not an actual error.
+ c::SetLastError(0);
+ let k = match f1(buf.as_mut_ptr(), n as c::DWORD) {
+ 0 if c::GetLastError() == 0 => 0,
+ 0 => return Err(crate::io::Error::last_os_error()),
+ n => n,
+ } as usize;
+ if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
+ n *= 2;
+ } else if k > n {
+ n = k;
+ } else if k == n {
+ // It is impossible to reach this point.
+ // On success, k is the returned string length excluding the null.
+ // On failure, k is the required buffer length including the null.
+ // Therefore k never equals n.
+ unreachable!();
+ } else {
+ return Ok(f2(&buf[..k]));
+ }
+ }
+ }
+}
+
+fn os2path(s: &[u16]) -> PathBuf {
+ PathBuf::from(OsString::from_wide(s))
+}
+
+pub fn truncate_utf16_at_nul(v: &[u16]) -> &[u16] {
+ match unrolled_find_u16s(0, v) {
+ // don't include the 0
+ Some(i) => &v[..i],
+ None => v,
+ }
+}
+
+pub trait IsZero {
+ fn is_zero(&self) -> bool;
+}
+
+macro_rules! impl_is_zero {
+ ($($t:ident)*) => ($(impl IsZero for $t {
+ fn is_zero(&self) -> bool {
+ *self == 0
+ }
+ })*)
+}
+
+impl_is_zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
+
+pub fn cvt<I: IsZero>(i: I) -> crate::io::Result<I> {
+ if i.is_zero() { Err(crate::io::Error::last_os_error()) } else { Ok(i) }
+}
+
+pub fn dur2timeout(dur: Duration) -> c::DWORD {
+ // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the
+ // timeouts in windows APIs are typically u32 milliseconds. To translate, we
+ // have two pieces to take care of:
+ //
+ // * Nanosecond precision is rounded up
+ // * Greater than u32::MAX milliseconds (50 days) is rounded up to INFINITE
+ // (never time out).
+ dur.as_secs()
+ .checked_mul(1000)
+ .and_then(|ms| ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000))
+ .and_then(|ms| ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 { 1 } else { 0 }))
+ .map(|ms| if ms > <c::DWORD>::MAX as u64 { c::INFINITE } else { ms as c::DWORD })
+ .unwrap_or(c::INFINITE)
+}
+
+/// Use `__fastfail` to abort the process
+///
+/// This is the same implementation as in libpanic_abort's `__rust_start_panic`. See
+/// that function for more information on `__fastfail`
+#[allow(unreachable_code)]
+pub fn abort_internal() -> ! {
+ #[allow(unused)]
+ const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
+ #[cfg(not(miri))] // inline assembly does not work in Miri
+ unsafe {
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
+ core::arch::asm!("int $$0x29", in("ecx") FAST_FAIL_FATAL_APP_EXIT);
+ crate::intrinsics::unreachable();
+ } else if #[cfg(all(target_arch = "arm", target_feature = "thumb-mode"))] {
+ core::arch::asm!(".inst 0xDEFB", in("r0") FAST_FAIL_FATAL_APP_EXIT);
+ crate::intrinsics::unreachable();
+ } else if #[cfg(target_arch = "aarch64")] {
+ core::arch::asm!("brk 0xF003", in("x0") FAST_FAIL_FATAL_APP_EXIT);
+ crate::intrinsics::unreachable();
+ }
+ }
+ }
+ crate::intrinsics::abort();
+}
diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs
new file mode 100644
index 000000000..e0701a498
--- /dev/null
+++ b/library/std/src/sys/windows/net.rs
@@ -0,0 +1,476 @@
+#![unstable(issue = "none", feature = "windows_net")]
+
+use crate::cmp;
+use crate::io::{self, IoSlice, IoSliceMut, Read};
+use crate::mem;
+use crate::net::{Shutdown, SocketAddr};
+use crate::os::windows::io::{
+ AsRawSocket, AsSocket, BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket,
+};
+use crate::ptr;
+use crate::sync::OnceLock;
+use crate::sys;
+use crate::sys::c;
+use crate::sys_common::net;
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+use crate::time::Duration;
+
+use libc::{c_int, c_long, c_ulong, c_ushort};
+
+pub type wrlen_t = i32;
+
+pub mod netc {
+ pub use crate::sys::c::ADDRESS_FAMILY as sa_family_t;
+ pub use crate::sys::c::ADDRINFOA as addrinfo;
+ pub use crate::sys::c::SOCKADDR as sockaddr;
+ pub use crate::sys::c::SOCKADDR_STORAGE_LH as sockaddr_storage;
+ pub use crate::sys::c::*;
+}
+
+pub struct Socket(OwnedSocket);
+
+static WSA_CLEANUP: OnceLock<unsafe extern "system" fn() -> i32> = OnceLock::new();
+
+/// Checks whether the Windows socket interface has been started already, and
+/// if not, starts it.
+pub fn init() {
+ let _ = WSA_CLEANUP.get_or_init(|| unsafe {
+ let mut data: c::WSADATA = mem::zeroed();
+ let ret = c::WSAStartup(
+ 0x202, // version 2.2
+ &mut data,
+ );
+ assert_eq!(ret, 0);
+
+ // Only register `WSACleanup` if `WSAStartup` is actually ever called.
+ // Workaround to prevent linking to `WS2_32.dll` when no network functionality is used.
+ // See issue #85441.
+ c::WSACleanup
+ });
+}
+
+pub fn cleanup() {
+ // only perform cleanup if network functionality was actually initialized
+ if let Some(cleanup) = WSA_CLEANUP.get() {
+ unsafe {
+ cleanup();
+ }
+ }
+}
+
+/// Returns the last error from the Windows socket interface.
+fn last_error() -> io::Error {
+ io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
+}
+
+#[doc(hidden)]
+pub trait IsMinusOne {
+ fn is_minus_one(&self) -> bool;
+}
+
+macro_rules! impl_is_minus_one {
+ ($($t:ident)*) => ($(impl IsMinusOne for $t {
+ fn is_minus_one(&self) -> bool {
+ *self == -1
+ }
+ })*)
+}
+
+impl_is_minus_one! { i8 i16 i32 i64 isize }
+
+/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
+/// and if so, returns the last error from the Windows socket interface. This
+/// function must be called before another call to the socket API is made.
+pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
+ if t.is_minus_one() { Err(last_error()) } else { Ok(t) }
+}
+
+/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
+pub fn cvt_gai(err: c_int) -> io::Result<()> {
+ if err == 0 { Ok(()) } else { Err(last_error()) }
+}
+
+/// Just to provide the same interface as sys/unix/net.rs
+pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
+where
+ T: IsMinusOne,
+ F: FnMut() -> T,
+{
+ cvt(f())
+}
+
+impl Socket {
+ pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
+ let family = match *addr {
+ SocketAddr::V4(..) => c::AF_INET,
+ SocketAddr::V6(..) => c::AF_INET6,
+ };
+ let socket = unsafe {
+ c::WSASocketW(
+ family,
+ ty,
+ 0,
+ ptr::null_mut(),
+ 0,
+ c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT,
+ )
+ };
+
+ if socket != c::INVALID_SOCKET {
+ unsafe { Ok(Self::from_raw_socket(socket)) }
+ } else {
+ let error = unsafe { c::WSAGetLastError() };
+
+ if error != c::WSAEPROTOTYPE && error != c::WSAEINVAL {
+ return Err(io::Error::from_raw_os_error(error));
+ }
+
+ let socket =
+ unsafe { c::WSASocketW(family, ty, 0, ptr::null_mut(), 0, c::WSA_FLAG_OVERLAPPED) };
+
+ if socket == c::INVALID_SOCKET {
+ return Err(last_error());
+ }
+
+ unsafe {
+ let socket = Self::from_raw_socket(socket);
+ socket.0.set_no_inherit()?;
+ Ok(socket)
+ }
+ }
+ }
+
+ pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
+ self.set_nonblocking(true)?;
+ let result = {
+ let (addr, len) = addr.into_inner();
+ let result = unsafe { c::connect(self.as_raw_socket(), addr.as_ptr(), len) };
+ cvt(result).map(drop)
+ };
+ self.set_nonblocking(false)?;
+
+ match result {
+ Err(ref error) if error.kind() == io::ErrorKind::WouldBlock => {
+ if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+
+ let mut timeout = c::timeval {
+ tv_sec: timeout.as_secs() as c_long,
+ tv_usec: (timeout.subsec_nanos() / 1000) as c_long,
+ };
+
+ if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
+ timeout.tv_usec = 1;
+ }
+
+ let fds = {
+ let mut fds = unsafe { mem::zeroed::<c::fd_set>() };
+ fds.fd_count = 1;
+ fds.fd_array[0] = self.as_raw_socket();
+ fds
+ };
+
+ let mut writefds = fds;
+ let mut errorfds = fds;
+
+ let count = {
+ let result = unsafe {
+ c::select(1, ptr::null_mut(), &mut writefds, &mut errorfds, &timeout)
+ };
+ cvt(result)?
+ };
+
+ match count {
+ 0 => Err(io::const_io_error!(io::ErrorKind::TimedOut, "connection timed out")),
+ _ => {
+ if writefds.fd_count != 1 {
+ if let Some(e) = self.take_error()? {
+ return Err(e);
+ }
+ }
+
+ Ok(())
+ }
+ }
+ }
+ _ => result,
+ }
+ }
+
+ pub fn accept(&self, storage: *mut c::SOCKADDR, len: *mut c_int) -> io::Result<Socket> {
+ let socket = unsafe { c::accept(self.as_raw_socket(), storage, len) };
+
+ match socket {
+ c::INVALID_SOCKET => Err(last_error()),
+ _ => unsafe { Ok(Self::from_raw_socket(socket)) },
+ }
+ }
+
+ pub fn duplicate(&self) -> io::Result<Socket> {
+ Ok(Self(self.0.try_clone()?))
+ }
+
+ fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ // On unix when a socket is shut down all further reads return 0, so we
+ // do the same on windows to map a shut down socket to returning EOF.
+ let length = cmp::min(buf.len(), i32::MAX as usize) as i32;
+ let result =
+ unsafe { c::recv(self.as_raw_socket(), buf.as_mut_ptr() as *mut _, length, flags) };
+
+ match result {
+ c::SOCKET_ERROR => {
+ let error = unsafe { c::WSAGetLastError() };
+
+ if error == c::WSAESHUTDOWN {
+ Ok(0)
+ } else {
+ Err(io::Error::from_raw_os_error(error))
+ }
+ }
+ _ => Ok(result as usize),
+ }
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, 0)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ // On unix when a socket is shut down all further reads return 0, so we
+ // do the same on windows to map a shut down socket to returning EOF.
+ let length = cmp::min(bufs.len(), c::DWORD::MAX as usize) as c::DWORD;
+ let mut nread = 0;
+ let mut flags = 0;
+ let result = unsafe {
+ c::WSARecv(
+ self.as_raw_socket(),
+ bufs.as_mut_ptr() as *mut c::WSABUF,
+ length,
+ &mut nread,
+ &mut flags,
+ ptr::null_mut(),
+ ptr::null_mut(),
+ )
+ };
+
+ match result {
+ 0 => Ok(nread as usize),
+ _ => {
+ let error = unsafe { c::WSAGetLastError() };
+
+ if error == c::WSAESHUTDOWN {
+ Ok(0)
+ } else {
+ Err(io::Error::from_raw_os_error(error))
+ }
+ }
+ }
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, c::MSG_PEEK)
+ }
+
+ fn recv_from_with_flags(
+ &self,
+ buf: &mut [u8],
+ flags: c_int,
+ ) -> io::Result<(usize, SocketAddr)> {
+ let mut storage = unsafe { mem::zeroed::<c::SOCKADDR_STORAGE_LH>() };
+ let mut addrlen = mem::size_of_val(&storage) as c::socklen_t;
+ let length = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
+
+ // On unix when a socket is shut down all further reads return 0, so we
+ // do the same on windows to map a shut down socket to returning EOF.
+ let result = unsafe {
+ c::recvfrom(
+ self.as_raw_socket(),
+ buf.as_mut_ptr() as *mut _,
+ length,
+ flags,
+ &mut storage as *mut _ as *mut _,
+ &mut addrlen,
+ )
+ };
+
+ match result {
+ c::SOCKET_ERROR => {
+ let error = unsafe { c::WSAGetLastError() };
+
+ if error == c::WSAESHUTDOWN {
+ Ok((0, net::sockaddr_to_addr(&storage, addrlen as usize)?))
+ } else {
+ Err(io::Error::from_raw_os_error(error))
+ }
+ }
+ _ => Ok((result as usize, net::sockaddr_to_addr(&storage, addrlen as usize)?)),
+ }
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, 0)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, c::MSG_PEEK)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let length = cmp::min(bufs.len(), c::DWORD::MAX as usize) as c::DWORD;
+ let mut nwritten = 0;
+ let result = unsafe {
+ c::WSASend(
+ self.as_raw_socket(),
+ bufs.as_ptr() as *const c::WSABUF as *mut _,
+ length,
+ &mut nwritten,
+ 0,
+ ptr::null_mut(),
+ ptr::null_mut(),
+ )
+ };
+ cvt(result).map(|_| nwritten as usize)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
+ let timeout = match dur {
+ Some(dur) => {
+ let timeout = sys::dur2timeout(dur);
+ if timeout == 0 {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "cannot set a 0 duration timeout",
+ ));
+ }
+ timeout
+ }
+ None => 0,
+ };
+ net::setsockopt(self, c::SOL_SOCKET, kind, timeout)
+ }
+
+ pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> {
+ let raw: c::DWORD = net::getsockopt(self, c::SOL_SOCKET, kind)?;
+ if raw == 0 {
+ Ok(None)
+ } else {
+ let secs = raw / 1000;
+ let nsec = (raw % 1000) * 1000000;
+ Ok(Some(Duration::new(secs as u64, nsec as u32)))
+ }
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let how = match how {
+ Shutdown::Write => c::SD_SEND,
+ Shutdown::Read => c::SD_RECEIVE,
+ Shutdown::Both => c::SD_BOTH,
+ };
+ let result = unsafe { c::shutdown(self.as_raw_socket(), how) };
+ cvt(result).map(drop)
+ }
+
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ let mut nonblocking = nonblocking as c_ulong;
+ let result =
+ unsafe { c::ioctlsocket(self.as_raw_socket(), c::FIONBIO as c_int, &mut nonblocking) };
+ cvt(result).map(drop)
+ }
+
+ pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+ let linger = c::linger {
+ l_onoff: linger.is_some() as c_ushort,
+ l_linger: linger.unwrap_or_default().as_secs() as c_ushort,
+ };
+
+ net::setsockopt(self, c::SOL_SOCKET, c::SO_LINGER, linger)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ let val: c::linger = net::getsockopt(self, c::SOL_SOCKET, c::SO_LINGER)?;
+
+ Ok((val.l_onoff != 0).then(|| Duration::from_secs(val.l_linger as u64)))
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ net::setsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY, nodelay as c::BOOL)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ let raw: c::BOOL = net::getsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY)?;
+ Ok(raw != 0)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ let raw: c_int = net::getsockopt(self, c::SOL_SOCKET, c::SO_ERROR)?;
+ if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
+ }
+
+ // This is used by sys_common code to abstract over Windows and Unix.
+ pub fn as_raw(&self) -> RawSocket {
+ self.as_inner().as_raw_socket()
+ }
+}
+
+#[unstable(reason = "not public", issue = "none", feature = "fd_read")]
+impl<'a> Read for &'a Socket {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (**self).read(buf)
+ }
+}
+
+impl AsInner<OwnedSocket> for Socket {
+ fn as_inner(&self) -> &OwnedSocket {
+ &self.0
+ }
+}
+
+impl FromInner<OwnedSocket> for Socket {
+ fn from_inner(sock: OwnedSocket) -> Socket {
+ Socket(sock)
+ }
+}
+
+impl IntoInner<OwnedSocket> for Socket {
+ fn into_inner(self) -> OwnedSocket {
+ self.0
+ }
+}
+
+impl AsSocket for Socket {
+ fn as_socket(&self) -> BorrowedSocket<'_> {
+ self.0.as_socket()
+ }
+}
+
+impl AsRawSocket for Socket {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.0.as_raw_socket()
+ }
+}
+
+impl IntoRawSocket for Socket {
+ fn into_raw_socket(self) -> RawSocket {
+ self.0.into_raw_socket()
+ }
+}
+
+impl FromRawSocket for Socket {
+ unsafe fn from_raw_socket(raw_socket: RawSocket) -> Self {
+ Self(FromRawSocket::from_raw_socket(raw_socket))
+ }
+}
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
new file mode 100644
index 000000000..bcac996c0
--- /dev/null
+++ b/library/std/src/sys/windows/os.rs
@@ -0,0 +1,328 @@
+//! Implementation of `std::os` functionality for Windows.
+
+#![allow(nonstandard_style)]
+
+#[cfg(test)]
+mod tests;
+
+use crate::os::windows::prelude::*;
+
+use crate::error::Error as StdError;
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io;
+use crate::os::windows::ffi::EncodeWide;
+use crate::path::{self, PathBuf};
+use crate::ptr;
+use crate::slice;
+use crate::sys::{c, cvt};
+
+use super::to_u16s;
+
+pub fn errno() -> i32 {
+ unsafe { c::GetLastError() as i32 }
+}
+
+/// Gets a detailed string description for the given error number.
+pub fn error_string(mut errnum: i32) -> String {
+ // This value is calculated from the macro
+ // MAKELANGID(LANG_SYSTEM_DEFAULT, SUBLANG_SYS_DEFAULT)
+ let langId = 0x0800 as c::DWORD;
+
+ let mut buf = [0 as c::WCHAR; 2048];
+
+ unsafe {
+ let mut module = ptr::null_mut();
+ let mut flags = 0;
+
+ // NTSTATUS errors may be encoded as HRESULT, which may returned from
+ // GetLastError. For more information about Windows error codes, see
+ // `[MS-ERREF]`: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/0642cb2f-2075-4469-918c-4441e69c548a
+ if (errnum & c::FACILITY_NT_BIT as i32) != 0 {
+ // format according to https://support.microsoft.com/en-us/help/259693
+ const NTDLL_DLL: &[u16] = &[
+ 'N' as _, 'T' as _, 'D' as _, 'L' as _, 'L' as _, '.' as _, 'D' as _, 'L' as _,
+ 'L' as _, 0,
+ ];
+ module = c::GetModuleHandleW(NTDLL_DLL.as_ptr());
+
+ if !module.is_null() {
+ errnum ^= c::FACILITY_NT_BIT as i32;
+ flags = c::FORMAT_MESSAGE_FROM_HMODULE;
+ }
+ }
+
+ let res = c::FormatMessageW(
+ flags | c::FORMAT_MESSAGE_FROM_SYSTEM | c::FORMAT_MESSAGE_IGNORE_INSERTS,
+ module,
+ errnum as c::DWORD,
+ langId,
+ buf.as_mut_ptr(),
+ buf.len() as c::DWORD,
+ ptr::null(),
+ ) as usize;
+ if res == 0 {
+ // Sometimes FormatMessageW can fail e.g., system doesn't like langId,
+ let fm_err = errno();
+ return format!("OS Error {errnum} (FormatMessageW() returned error {fm_err})");
+ }
+
+ match String::from_utf16(&buf[..res]) {
+ Ok(mut msg) => {
+ // Trim trailing CRLF inserted by FormatMessageW
+ let len = msg.trim_end().len();
+ msg.truncate(len);
+ msg
+ }
+ Err(..) => format!(
+ "OS Error {} (FormatMessageW() returned \
+ invalid UTF-16)",
+ errnum
+ ),
+ }
+ }
+}
+
+pub struct Env {
+ base: c::LPWCH,
+ cur: c::LPWCH,
+}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ loop {
+ unsafe {
+ if *self.cur == 0 {
+ return None;
+ }
+ let p = self.cur as *const u16;
+ let mut len = 0;
+ while *p.offset(len) != 0 {
+ len += 1;
+ }
+ let s = slice::from_raw_parts(p, len as usize);
+ self.cur = self.cur.offset(len + 1);
+
+ // Windows allows environment variables to start with an equals
+ // symbol (in any other position, this is the separator between
+ // variable name and value). Since`s` has at least length 1 at
+ // this point (because the empty string terminates the array of
+ // environment variables), we can safely slice.
+ let pos = match s[1..].iter().position(|&u| u == b'=' as u16).map(|p| p + 1) {
+ Some(p) => p,
+ None => continue,
+ };
+ return Some((
+ OsStringExt::from_wide(&s[..pos]),
+ OsStringExt::from_wide(&s[pos + 1..]),
+ ));
+ }
+ }
+ }
+}
+
+impl Drop for Env {
+ fn drop(&mut self) {
+ unsafe {
+ c::FreeEnvironmentStringsW(self.base);
+ }
+ }
+}
+
+pub fn env() -> Env {
+ unsafe {
+ let ch = c::GetEnvironmentStringsW();
+ if ch.is_null() {
+ panic!("failure getting env string from OS: {}", io::Error::last_os_error());
+ }
+ Env { base: ch, cur: ch }
+ }
+}
+
+pub struct SplitPaths<'a> {
+ data: EncodeWide<'a>,
+ must_yield: bool,
+}
+
+pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> {
+ SplitPaths { data: unparsed.encode_wide(), must_yield: true }
+}
+
+impl<'a> Iterator for SplitPaths<'a> {
+ type Item = PathBuf;
+ fn next(&mut self) -> Option<PathBuf> {
+ // On Windows, the PATH environment variable is semicolon separated.
+ // Double quotes are used as a way of introducing literal semicolons
+ // (since c:\some;dir is a valid Windows path). Double quotes are not
+ // themselves permitted in path names, so there is no way to escape a
+ // double quote. Quoted regions can appear in arbitrary locations, so
+ //
+ // c:\foo;c:\som"e;di"r;c:\bar
+ //
+ // Should parse as [c:\foo, c:\some;dir, c:\bar].
+ //
+ // (The above is based on testing; there is no clear reference available
+ // for the grammar.)
+
+ let must_yield = self.must_yield;
+ self.must_yield = false;
+
+ let mut in_progress = Vec::new();
+ let mut in_quote = false;
+ for b in self.data.by_ref() {
+ if b == '"' as u16 {
+ in_quote = !in_quote;
+ } else if b == ';' as u16 && !in_quote {
+ self.must_yield = true;
+ break;
+ } else {
+ in_progress.push(b)
+ }
+ }
+
+ if !must_yield && in_progress.is_empty() {
+ None
+ } else {
+ Some(super::os2path(&in_progress))
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct JoinPathsError;
+
+pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError>
+where
+ I: Iterator<Item = T>,
+ T: AsRef<OsStr>,
+{
+ let mut joined = Vec::new();
+ let sep = b';' as u16;
+
+ for (i, path) in paths.enumerate() {
+ let path = path.as_ref();
+ if i > 0 {
+ joined.push(sep)
+ }
+ let v = path.encode_wide().collect::<Vec<u16>>();
+ if v.contains(&(b'"' as u16)) {
+ return Err(JoinPathsError);
+ } else if v.contains(&sep) {
+ joined.push(b'"' as u16);
+ joined.extend_from_slice(&v[..]);
+ joined.push(b'"' as u16);
+ } else {
+ joined.extend_from_slice(&v[..]);
+ }
+ }
+
+ Ok(OsStringExt::from_wide(&joined[..]))
+}
+
+impl fmt::Display for JoinPathsError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "path segment contains `\"`".fmt(f)
+ }
+}
+
+impl StdError for JoinPathsError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "failed to join paths"
+ }
+}
+
+pub fn current_exe() -> io::Result<PathBuf> {
+ super::fill_utf16_buf(
+ |buf, sz| unsafe { c::GetModuleFileNameW(ptr::null_mut(), buf, sz) },
+ super::os2path,
+ )
+}
+
+pub fn getcwd() -> io::Result<PathBuf> {
+ super::fill_utf16_buf(|buf, sz| unsafe { c::GetCurrentDirectoryW(sz, buf) }, super::os2path)
+}
+
+pub fn chdir(p: &path::Path) -> io::Result<()> {
+ let p: &OsStr = p.as_ref();
+ let mut p = p.encode_wide().collect::<Vec<_>>();
+ p.push(0);
+
+ cvt(unsafe { c::SetCurrentDirectoryW(p.as_ptr()) }).map(drop)
+}
+
+pub fn getenv(k: &OsStr) -> Option<OsString> {
+ let k = to_u16s(k).ok()?;
+ super::fill_utf16_buf(
+ |buf, sz| unsafe { c::GetEnvironmentVariableW(k.as_ptr(), buf, sz) },
+ |buf| OsStringExt::from_wide(buf),
+ )
+ .ok()
+}
+
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ let k = to_u16s(k)?;
+ let v = to_u16s(v)?;
+
+ cvt(unsafe { c::SetEnvironmentVariableW(k.as_ptr(), v.as_ptr()) }).map(drop)
+}
+
+pub fn unsetenv(n: &OsStr) -> io::Result<()> {
+ let v = to_u16s(n)?;
+ cvt(unsafe { c::SetEnvironmentVariableW(v.as_ptr(), ptr::null()) }).map(drop)
+}
+
+pub fn temp_dir() -> PathBuf {
+ super::fill_utf16_buf(|buf, sz| unsafe { c::GetTempPath2W(sz, buf) }, super::os2path).unwrap()
+}
+
+#[cfg(not(target_vendor = "uwp"))]
+fn home_dir_crt() -> Option<PathBuf> {
+ unsafe {
+ // The magic constant -4 can be used as the token passed to GetUserProfileDirectoryW below
+ // instead of us having to go through these multiple steps to get a token. However this is
+ // not implemented on Windows 7, only Windows 8 and up. When we drop support for Windows 7
+ // we can simplify this code. See #90144 for details.
+ use crate::sys::handle::Handle;
+
+ let me = c::GetCurrentProcess();
+ let mut token = ptr::null_mut();
+ if c::OpenProcessToken(me, c::TOKEN_READ, &mut token) == 0 {
+ return None;
+ }
+ let _handle = Handle::from_raw_handle(token);
+ super::fill_utf16_buf(
+ |buf, mut sz| {
+ match c::GetUserProfileDirectoryW(token, buf, &mut sz) {
+ 0 if c::GetLastError() != c::ERROR_INSUFFICIENT_BUFFER => 0,
+ 0 => sz,
+ _ => sz - 1, // sz includes the null terminator
+ }
+ },
+ super::os2path,
+ )
+ .ok()
+ }
+}
+
+#[cfg(target_vendor = "uwp")]
+fn home_dir_crt() -> Option<PathBuf> {
+ None
+}
+
+pub fn home_dir() -> Option<PathBuf> {
+ crate::env::var_os("HOME")
+ .or_else(|| crate::env::var_os("USERPROFILE"))
+ .map(PathBuf::from)
+ .or_else(|| home_dir_crt())
+}
+
+pub fn exit(code: i32) -> ! {
+ unsafe { c::ExitProcess(code as c::UINT) }
+}
+
+pub fn getpid() -> u32 {
+ unsafe { c::GetCurrentProcessId() as u32 }
+}
diff --git a/library/std/src/sys/windows/os/tests.rs b/library/std/src/sys/windows/os/tests.rs
new file mode 100644
index 000000000..458d6e11c
--- /dev/null
+++ b/library/std/src/sys/windows/os/tests.rs
@@ -0,0 +1,13 @@
+use crate::io::Error;
+use crate::sys::c;
+
+// tests `error_string` above
+#[test]
+fn ntstatus_error() {
+ const STATUS_UNSUCCESSFUL: u32 = 0xc000_0001;
+ assert!(
+ !Error::from_raw_os_error((STATUS_UNSUCCESSFUL | c::FACILITY_NT_BIT) as _)
+ .to_string()
+ .contains("FormatMessageW() returned error")
+ );
+}
diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs
new file mode 100644
index 000000000..11883f150
--- /dev/null
+++ b/library/std/src/sys/windows/os_str.rs
@@ -0,0 +1,226 @@
+/// The underlying OsString/OsStr implementation on Windows is a
+/// wrapper around the "WTF-8" encoding; see the `wtf8` module for more.
+use crate::borrow::Cow;
+use crate::collections::TryReserveError;
+use crate::fmt;
+use crate::mem;
+use crate::rc::Rc;
+use crate::sync::Arc;
+use crate::sys_common::wtf8::{Wtf8, Wtf8Buf};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
+
+#[derive(Clone, Hash)]
+pub struct Buf {
+ pub inner: Wtf8Buf,
+}
+
+impl IntoInner<Wtf8Buf> for Buf {
+ fn into_inner(self) -> Wtf8Buf {
+ self.inner
+ }
+}
+
+impl FromInner<Wtf8Buf> for Buf {
+ fn from_inner(inner: Wtf8Buf) -> Self {
+ Buf { inner }
+ }
+}
+
+impl AsInner<Wtf8> for Buf {
+ fn as_inner(&self) -> &Wtf8 {
+ &self.inner
+ }
+}
+
+impl fmt::Debug for Buf {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.as_slice(), formatter)
+ }
+}
+
+impl fmt::Display for Buf {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.as_slice(), formatter)
+ }
+}
+
+#[repr(transparent)]
+pub struct Slice {
+ pub inner: Wtf8,
+}
+
+impl fmt::Debug for Slice {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, formatter)
+ }
+}
+
+impl fmt::Display for Slice {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.inner, formatter)
+ }
+}
+
+impl Buf {
+ pub fn with_capacity(capacity: usize) -> Buf {
+ Buf { inner: Wtf8Buf::with_capacity(capacity) }
+ }
+
+ pub fn clear(&mut self) {
+ self.inner.clear()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.inner.capacity()
+ }
+
+ pub fn from_string(s: String) -> Buf {
+ Buf { inner: Wtf8Buf::from_string(s) }
+ }
+
+ pub fn as_slice(&self) -> &Slice {
+ // SAFETY: Slice is just a wrapper for Wtf8,
+ // and self.inner.as_slice() returns &Wtf8.
+ // Therefore, transmuting &Wtf8 to &Slice is safe.
+ unsafe { mem::transmute(self.inner.as_slice()) }
+ }
+
+ pub fn as_mut_slice(&mut self) -> &mut Slice {
+ // SAFETY: Slice is just a wrapper for Wtf8,
+ // and self.inner.as_mut_slice() returns &mut Wtf8.
+ // Therefore, transmuting &mut Wtf8 to &mut Slice is safe.
+ // Additionally, care should be taken to ensure the slice
+ // is always valid Wtf8.
+ unsafe { mem::transmute(self.inner.as_mut_slice()) }
+ }
+
+ pub fn into_string(self) -> Result<String, Buf> {
+ self.inner.into_string().map_err(|buf| Buf { inner: buf })
+ }
+
+ pub fn push_slice(&mut self, s: &Slice) {
+ self.inner.push_wtf8(&s.inner)
+ }
+
+ pub fn reserve(&mut self, additional: usize) {
+ self.inner.reserve(additional)
+ }
+
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve(additional)
+ }
+
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.inner.reserve_exact(additional)
+ }
+
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.inner.try_reserve_exact(additional)
+ }
+
+ pub fn shrink_to_fit(&mut self) {
+ self.inner.shrink_to_fit()
+ }
+
+ #[inline]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.inner.shrink_to(min_capacity)
+ }
+
+ #[inline]
+ pub fn into_box(self) -> Box<Slice> {
+ unsafe { mem::transmute(self.inner.into_box()) }
+ }
+
+ #[inline]
+ pub fn from_box(boxed: Box<Slice>) -> Buf {
+ let inner: Box<Wtf8> = unsafe { mem::transmute(boxed) };
+ Buf { inner: Wtf8Buf::from_box(inner) }
+ }
+
+ #[inline]
+ pub fn into_arc(&self) -> Arc<Slice> {
+ self.as_slice().into_arc()
+ }
+
+ #[inline]
+ pub fn into_rc(&self) -> Rc<Slice> {
+ self.as_slice().into_rc()
+ }
+}
+
+impl Slice {
+ #[inline]
+ pub fn from_str(s: &str) -> &Slice {
+ unsafe { mem::transmute(Wtf8::from_str(s)) }
+ }
+
+ pub fn to_str(&self) -> Option<&str> {
+ self.inner.as_str()
+ }
+
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ self.inner.to_string_lossy()
+ }
+
+ pub fn to_owned(&self) -> Buf {
+ let mut buf = Wtf8Buf::with_capacity(self.inner.len());
+ buf.push_wtf8(&self.inner);
+ Buf { inner: buf }
+ }
+
+ pub fn clone_into(&self, buf: &mut Buf) {
+ self.inner.clone_into(&mut buf.inner)
+ }
+
+ #[inline]
+ pub fn into_box(&self) -> Box<Slice> {
+ unsafe { mem::transmute(self.inner.into_box()) }
+ }
+
+ pub fn empty_box() -> Box<Slice> {
+ unsafe { mem::transmute(Wtf8::empty_box()) }
+ }
+
+ #[inline]
+ pub fn into_arc(&self) -> Arc<Slice> {
+ let arc = self.inner.into_arc();
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Slice) }
+ }
+
+ #[inline]
+ pub fn into_rc(&self) -> Rc<Slice> {
+ let rc = self.inner.into_rc();
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Slice) }
+ }
+
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ self.inner.make_ascii_lowercase()
+ }
+
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ self.inner.make_ascii_uppercase()
+ }
+
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Buf {
+ Buf { inner: self.inner.to_ascii_lowercase() }
+ }
+
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Buf {
+ Buf { inner: self.inner.to_ascii_uppercase() }
+ }
+
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ self.inner.is_ascii()
+ }
+
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
+ self.inner.eq_ignore_ascii_case(&other.inner)
+ }
+}
diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs
new file mode 100644
index 000000000..beeca1917
--- /dev/null
+++ b/library/std/src/sys/windows/path.rs
@@ -0,0 +1,333 @@
+use super::{c, fill_utf16_buf, to_u16s};
+use crate::ffi::{OsStr, OsString};
+use crate::io;
+use crate::mem;
+use crate::path::{Path, PathBuf, Prefix};
+use crate::ptr;
+
+#[cfg(test)]
+mod tests;
+
+pub const MAIN_SEP_STR: &str = "\\";
+pub const MAIN_SEP: char = '\\';
+
+/// # Safety
+///
+/// `bytes` must be a valid wtf8 encoded slice
+#[inline]
+unsafe fn bytes_as_os_str(bytes: &[u8]) -> &OsStr {
+ // &OsStr is layout compatible with &Slice, which is compatible with &Wtf8,
+ // which is compatible with &[u8].
+ mem::transmute(bytes)
+}
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'/' || b == b'\\'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'\\'
+}
+
+/// Returns true if `path` looks like a lone filename.
+pub(crate) fn is_file_name(path: &OsStr) -> bool {
+ !path.bytes().iter().copied().any(is_sep_byte)
+}
+pub(crate) fn has_trailing_slash(path: &OsStr) -> bool {
+ let is_verbatim = path.bytes().starts_with(br"\\?\");
+ let is_separator = if is_verbatim { is_verbatim_sep } else { is_sep_byte };
+ if let Some(&c) = path.bytes().last() { is_separator(c) } else { false }
+}
+
+/// Appends a suffix to a path.
+///
+/// Can be used to append an extension without removing an existing extension.
+pub(crate) fn append_suffix(path: PathBuf, suffix: &OsStr) -> PathBuf {
+ let mut path = OsString::from(path);
+ path.push(suffix);
+ path.into()
+}
+
+struct PrefixParser<'a, const LEN: usize> {
+ path: &'a OsStr,
+ prefix: [u8; LEN],
+}
+
+impl<'a, const LEN: usize> PrefixParser<'a, LEN> {
+ #[inline]
+ fn get_prefix(path: &OsStr) -> [u8; LEN] {
+ let mut prefix = [0; LEN];
+ // SAFETY: Only ASCII characters are modified.
+ for (i, &ch) in path.bytes().iter().take(LEN).enumerate() {
+ prefix[i] = if ch == b'/' { b'\\' } else { ch };
+ }
+ prefix
+ }
+
+ fn new(path: &'a OsStr) -> Self {
+ Self { path, prefix: Self::get_prefix(path) }
+ }
+
+ fn as_slice(&self) -> PrefixParserSlice<'a, '_> {
+ PrefixParserSlice {
+ path: self.path,
+ prefix: &self.prefix[..LEN.min(self.path.len())],
+ index: 0,
+ }
+ }
+}
+
+struct PrefixParserSlice<'a, 'b> {
+ path: &'a OsStr,
+ prefix: &'b [u8],
+ index: usize,
+}
+
+impl<'a> PrefixParserSlice<'a, '_> {
+ fn strip_prefix(&self, prefix: &str) -> Option<Self> {
+ self.prefix[self.index..]
+ .starts_with(prefix.as_bytes())
+ .then(|| Self { index: self.index + prefix.len(), ..*self })
+ }
+
+ fn prefix_bytes(&self) -> &'a [u8] {
+ &self.path.bytes()[..self.index]
+ }
+
+ fn finish(self) -> &'a OsStr {
+ // SAFETY: The unsafety here stems from converting between &OsStr and
+ // &[u8] and back. This is safe to do because (1) we only look at ASCII
+ // contents of the encoding and (2) new &OsStr values are produced only
+ // from ASCII-bounded slices of existing &OsStr values.
+ unsafe { bytes_as_os_str(&self.path.bytes()[self.index..]) }
+ }
+}
+
+pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> {
+ use Prefix::{DeviceNS, Disk, Verbatim, VerbatimDisk, VerbatimUNC, UNC};
+
+ let parser = PrefixParser::<8>::new(path);
+ let parser = parser.as_slice();
+ if let Some(parser) = parser.strip_prefix(r"\\") {
+ // \\
+
+ // The meaning of verbatim paths can change when they use a different
+ // separator.
+ if let Some(parser) = parser.strip_prefix(r"?\") && !parser.prefix_bytes().iter().any(|&x| x == b'/') {
+ // \\?\
+ if let Some(parser) = parser.strip_prefix(r"UNC\") {
+ // \\?\UNC\server\share
+
+ let path = parser.finish();
+ let (server, path) = parse_next_component(path, true);
+ let (share, _) = parse_next_component(path, true);
+
+ Some(VerbatimUNC(server, share))
+ } else {
+ let path = parser.finish();
+
+ // in verbatim paths only recognize an exact drive prefix
+ if let Some(drive) = parse_drive_exact(path) {
+ // \\?\C:
+ Some(VerbatimDisk(drive))
+ } else {
+ // \\?\prefix
+ let (prefix, _) = parse_next_component(path, true);
+ Some(Verbatim(prefix))
+ }
+ }
+ } else if let Some(parser) = parser.strip_prefix(r".\") {
+ // \\.\COM42
+ let path = parser.finish();
+ let (prefix, _) = parse_next_component(path, false);
+ Some(DeviceNS(prefix))
+ } else {
+ let path = parser.finish();
+ let (server, path) = parse_next_component(path, false);
+ let (share, _) = parse_next_component(path, false);
+
+ if !server.is_empty() && !share.is_empty() {
+ // \\server\share
+ Some(UNC(server, share))
+ } else {
+ // no valid prefix beginning with "\\" recognized
+ None
+ }
+ }
+ } else if let Some(drive) = parse_drive(path) {
+ // C:
+ Some(Disk(drive))
+ } else {
+ // no prefix
+ None
+ }
+}
+
+// Parses a drive prefix, e.g. "C:" and "C:\whatever"
+fn parse_drive(path: &OsStr) -> Option<u8> {
+ // In most DOS systems, it is not possible to have more than 26 drive letters.
+ // See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>.
+ fn is_valid_drive_letter(drive: &u8) -> bool {
+ drive.is_ascii_alphabetic()
+ }
+
+ match path.bytes() {
+ [drive, b':', ..] if is_valid_drive_letter(drive) => Some(drive.to_ascii_uppercase()),
+ _ => None,
+ }
+}
+
+// Parses a drive prefix exactly, e.g. "C:"
+fn parse_drive_exact(path: &OsStr) -> Option<u8> {
+ // only parse two bytes: the drive letter and the drive separator
+ if path.bytes().get(2).map(|&x| is_sep_byte(x)).unwrap_or(true) {
+ parse_drive(path)
+ } else {
+ None
+ }
+}
+
+// Parse the next path component.
+//
+// Returns the next component and the rest of the path excluding the component and separator.
+// Does not recognize `/` as a separator character if `verbatim` is true.
+fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
+ let separator = if verbatim { is_verbatim_sep } else { is_sep_byte };
+
+ match path.bytes().iter().position(|&x| separator(x)) {
+ Some(separator_start) => {
+ let separator_end = separator_start + 1;
+
+ let component = &path.bytes()[..separator_start];
+
+ // Panic safe
+ // The max `separator_end` is `bytes.len()` and `bytes[bytes.len()..]` is a valid index.
+ let path = &path.bytes()[separator_end..];
+
+ // SAFETY: `path` is a valid wtf8 encoded slice and each of the separators ('/', '\')
+ // is encoded in a single byte, therefore `bytes[separator_start]` and
+ // `bytes[separator_end]` must be code point boundaries and thus
+ // `bytes[..separator_start]` and `bytes[separator_end..]` are valid wtf8 slices.
+ unsafe { (bytes_as_os_str(component), bytes_as_os_str(path)) }
+ }
+ None => (path, OsStr::new("")),
+ }
+}
+
+/// Returns a UTF-16 encoded path capable of bypassing the legacy `MAX_PATH` limits.
+///
+/// This path may or may not have a verbatim prefix.
+pub(crate) fn maybe_verbatim(path: &Path) -> io::Result<Vec<u16>> {
+ // Normally the MAX_PATH is 260 UTF-16 code units (including the NULL).
+ // However, for APIs such as CreateDirectory[1], the limit is 248.
+ //
+ // [1]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createdirectorya#parameters
+ const LEGACY_MAX_PATH: usize = 248;
+ // UTF-16 encoded code points, used in parsing and building UTF-16 paths.
+ // All of these are in the ASCII range so they can be cast directly to `u16`.
+ const SEP: u16 = b'\\' as _;
+ const ALT_SEP: u16 = b'/' as _;
+ const QUERY: u16 = b'?' as _;
+ const COLON: u16 = b':' as _;
+ const DOT: u16 = b'.' as _;
+ const U: u16 = b'U' as _;
+ const N: u16 = b'N' as _;
+ const C: u16 = b'C' as _;
+
+ // \\?\
+ const VERBATIM_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP];
+ // \??\
+ const NT_PREFIX: &[u16] = &[SEP, QUERY, QUERY, SEP];
+ // \\?\UNC\
+ const UNC_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP, U, N, C, SEP];
+
+ let mut path = to_u16s(path)?;
+ if path.starts_with(VERBATIM_PREFIX) || path.starts_with(NT_PREFIX) || path == &[0] {
+ // Early return for paths that are already verbatim or empty.
+ return Ok(path);
+ } else if path.len() < LEGACY_MAX_PATH {
+ // Early return if an absolute path is less < 260 UTF-16 code units.
+ // This is an optimization to avoid calling `GetFullPathNameW` unnecessarily.
+ match path.as_slice() {
+ // Starts with `D:`, `D:\`, `D:/`, etc.
+ // Does not match if the path starts with a `\` or `/`.
+ [drive, COLON, 0] | [drive, COLON, SEP | ALT_SEP, ..]
+ if *drive != SEP && *drive != ALT_SEP =>
+ {
+ return Ok(path);
+ }
+ // Starts with `\\`, `//`, etc
+ [SEP | ALT_SEP, SEP | ALT_SEP, ..] => return Ok(path),
+ _ => {}
+ }
+ }
+
+ // Firstly, get the absolute path using `GetFullPathNameW`.
+ // https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew
+ let lpfilename = path.as_ptr();
+ fill_utf16_buf(
+ // SAFETY: `fill_utf16_buf` ensures the `buffer` and `size` are valid.
+ // `lpfilename` is a pointer to a null terminated string that is not
+ // invalidated until after `GetFullPathNameW` returns successfully.
+ |buffer, size| unsafe { c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()) },
+ |mut absolute| {
+ path.clear();
+
+ // Secondly, add the verbatim prefix. This is easier here because we know the
+ // path is now absolute and fully normalized (e.g. `/` has been changed to `\`).
+ let prefix = match absolute {
+ // C:\ => \\?\C:\
+ [_, COLON, SEP, ..] => VERBATIM_PREFIX,
+ // \\.\ => \\?\
+ [SEP, SEP, DOT, SEP, ..] => {
+ absolute = &absolute[4..];
+ VERBATIM_PREFIX
+ }
+ // Leave \\?\ and \??\ as-is.
+ [SEP, SEP, QUERY, SEP, ..] | [SEP, QUERY, QUERY, SEP, ..] => &[],
+ // \\ => \\?\UNC\
+ [SEP, SEP, ..] => {
+ absolute = &absolute[2..];
+ UNC_PREFIX
+ }
+ // Anything else we leave alone.
+ _ => &[],
+ };
+
+ path.reserve_exact(prefix.len() + absolute.len() + 1);
+ path.extend_from_slice(prefix);
+ path.extend_from_slice(absolute);
+ path.push(0);
+ },
+ )?;
+ Ok(path)
+}
+
+/// Make a Windows path absolute.
+pub(crate) fn absolute(path: &Path) -> io::Result<PathBuf> {
+ let path = path.as_os_str();
+ let prefix = parse_prefix(path);
+ // Verbatim paths should not be modified.
+ if prefix.map(|x| x.is_verbatim()).unwrap_or(false) {
+ // NULs in verbatim paths are rejected for consistency.
+ if path.bytes().contains(&0) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "strings passed to WinAPI cannot contain NULs",
+ ));
+ }
+ return Ok(path.to_owned().into());
+ }
+
+ let path = to_u16s(path)?;
+ let lpfilename = path.as_ptr();
+ fill_utf16_buf(
+ // SAFETY: `fill_utf16_buf` ensures the `buffer` and `size` are valid.
+ // `lpfilename` is a pointer to a null terminated string that is not
+ // invalidated until after `GetFullPathNameW` returns successfully.
+ |buffer, size| unsafe { c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()) },
+ super::os2path,
+ )
+}
diff --git a/library/std/src/sys/windows/path/tests.rs b/library/std/src/sys/windows/path/tests.rs
new file mode 100644
index 000000000..6eab38cab
--- /dev/null
+++ b/library/std/src/sys/windows/path/tests.rs
@@ -0,0 +1,137 @@
+use super::*;
+
+#[test]
+fn test_parse_next_component() {
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server\share"), true),
+ (OsStr::new(r"server"), OsStr::new(r"share"))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server/share"), true),
+ (OsStr::new(r"server/share"), OsStr::new(r""))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server/share"), false),
+ (OsStr::new(r"server"), OsStr::new(r"share"))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server\"), false),
+ (OsStr::new(r"server"), OsStr::new(r""))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"\server\"), false),
+ (OsStr::new(r""), OsStr::new(r"server\"))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"servershare"), false),
+ (OsStr::new(r"servershare"), OsStr::new(""))
+ );
+}
+
+#[test]
+fn verbatim() {
+ use crate::path::Path;
+ fn check(path: &str, expected: &str) {
+ let verbatim = maybe_verbatim(Path::new(path)).unwrap();
+ let verbatim = String::from_utf16_lossy(verbatim.strip_suffix(&[0]).unwrap());
+ assert_eq!(&verbatim, expected, "{}", path);
+ }
+
+ // Ensure long paths are correctly prefixed.
+ check(
+ r"C:\Program Files\Rust\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\C:\Program Files\Rust\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ check(
+ r"\\server\share\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\UNC\server\share\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ check(
+ r"\\.\PIPE\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\PIPE\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ // `\\?\` prefixed paths are left unchanged...
+ check(
+ r"\\?\verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ // But `//?/` is not a verbatim prefix so it will be normalized.
+ check(
+ r"//?/E:/verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\E:\verbatim\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+
+ // For performance, short absolute paths are left unchanged.
+ check(r"C:\Program Files\Rust", r"C:\Program Files\Rust");
+ check(r"\\server\share", r"\\server\share");
+ check(r"\\.\COM1", r"\\.\COM1");
+
+ // Check that paths of length 247 are converted to verbatim.
+ // This is necessary for `CreateDirectory`.
+ check(
+ r"C:\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ r"\\?\C:\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ );
+
+ // Make sure opening a drive will work.
+ check("Z:", "Z:");
+
+ // A path that contains null is not a valid path.
+ assert!(maybe_verbatim(Path::new("\0")).is_err());
+}
+
+fn parse_prefix(path: &str) -> Option<Prefix<'_>> {
+ super::parse_prefix(OsStr::new(path))
+}
+
+#[test]
+fn test_parse_prefix_verbatim() {
+ let prefix = Some(Prefix::VerbatimDisk(b'C'));
+ assert_eq!(prefix, parse_prefix(r"\\?\C:/windows/system32/notepad.exe"));
+ assert_eq!(prefix, parse_prefix(r"\\?\C:\windows\system32\notepad.exe"));
+}
+
+#[test]
+fn test_parse_prefix_verbatim_device() {
+ let prefix = Some(Prefix::UNC(OsStr::new("?"), OsStr::new("C:")));
+ assert_eq!(prefix, parse_prefix(r"//?/C:/windows/system32/notepad.exe"));
+ assert_eq!(prefix, parse_prefix(r"//?/C:\windows\system32\notepad.exe"));
+ assert_eq!(prefix, parse_prefix(r"/\?\C:\windows\system32\notepad.exe"));
+ assert_eq!(prefix, parse_prefix(r"\\?/C:\windows\system32\notepad.exe"));
+}
+
+// See #93586 for more infomation.
+#[test]
+fn test_windows_prefix_components() {
+ use crate::path::Path;
+
+ let path = Path::new("C:");
+ let mut components = path.components();
+ let drive = components.next().expect("drive is expected here");
+ assert_eq!(drive.as_os_str(), OsStr::new("C:"));
+ assert_eq!(components.as_path(), Path::new(""));
+}
+
+/// See #101358.
+///
+/// Note that the exact behaviour here may change in the future.
+/// In which case this test will need to adjusted.
+#[test]
+fn broken_unc_path() {
+ use crate::path::Component;
+
+ let mut components = Path::new(r"\\foo\\bar\\").components();
+ assert_eq!(components.next(), Some(Component::RootDir));
+ assert_eq!(components.next(), Some(Component::Normal("foo".as_ref())));
+ assert_eq!(components.next(), Some(Component::Normal("bar".as_ref())));
+
+ let mut components = Path::new("//foo//bar//").components();
+ assert_eq!(components.next(), Some(Component::RootDir));
+ assert_eq!(components.next(), Some(Component::Normal("foo".as_ref())));
+ assert_eq!(components.next(), Some(Component::Normal("bar".as_ref())));
+}
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
new file mode 100644
index 000000000..013c776c4
--- /dev/null
+++ b/library/std/src/sys/windows/pipe.rs
@@ -0,0 +1,538 @@
+use crate::os::windows::prelude::*;
+
+use crate::ffi::OsStr;
+use crate::io::{self, IoSlice, IoSliceMut};
+use crate::mem;
+use crate::path::Path;
+use crate::ptr;
+use crate::slice;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::sys::c;
+use crate::sys::fs::{File, OpenOptions};
+use crate::sys::handle::Handle;
+use crate::sys::hashmap_random_keys;
+use crate::sys_common::IntoInner;
+
+////////////////////////////////////////////////////////////////////////////////
+// Anonymous pipes
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct AnonPipe {
+ inner: Handle,
+}
+
+impl IntoInner<Handle> for AnonPipe {
+ fn into_inner(self) -> Handle {
+ self.inner
+ }
+}
+
+pub struct Pipes {
+ pub ours: AnonPipe,
+ pub theirs: AnonPipe,
+}
+
+/// Although this looks similar to `anon_pipe` in the Unix module it's actually
+/// subtly different. Here we'll return two pipes in the `Pipes` return value,
+/// but one is intended for "us" where as the other is intended for "someone
+/// else".
+///
+/// Currently the only use case for this function is pipes for stdio on
+/// processes in the standard library, so "ours" is the one that'll stay in our
+/// process whereas "theirs" will be inherited to a child.
+///
+/// The ours/theirs pipes are *not* specifically readable or writable. Each
+/// one only supports a read or a write, but which is which depends on the
+/// boolean flag given. If `ours_readable` is `true`, then `ours` is readable and
+/// `theirs` is writable. Conversely, if `ours_readable` is `false`, then `ours`
+/// is writable and `theirs` is readable.
+///
+/// Also note that the `ours` pipe is always a handle opened up in overlapped
+/// mode. This means that technically speaking it should only ever be used
+/// with `OVERLAPPED` instances, but also works out ok if it's only ever used
+/// once at a time (which we do indeed guarantee).
+pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Result<Pipes> {
+ // A 64kb pipe capacity is the same as a typical Linux default.
+ const PIPE_BUFFER_CAPACITY: u32 = 64 * 1024;
+
+ // Note that we specifically do *not* use `CreatePipe` here because
+ // unfortunately the anonymous pipes returned do not support overlapped
+ // operations. Instead, we create a "hopefully unique" name and create a
+ // named pipe which has overlapped operations enabled.
+ //
+ // Once we do this, we connect do it as usual via `CreateFileW`, and then
+ // we return those reader/writer halves. Note that the `ours` pipe return
+ // value is always the named pipe, whereas `theirs` is just the normal file.
+ // This should hopefully shield us from child processes which assume their
+ // stdout is a named pipe, which would indeed be odd!
+ unsafe {
+ let ours;
+ let mut name;
+ let mut tries = 0;
+ let mut reject_remote_clients_flag = c::PIPE_REJECT_REMOTE_CLIENTS;
+ loop {
+ tries += 1;
+ name = format!(
+ r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}",
+ c::GetCurrentProcessId(),
+ random_number()
+ );
+ let wide_name = OsStr::new(&name).encode_wide().chain(Some(0)).collect::<Vec<_>>();
+ let mut flags = c::FILE_FLAG_FIRST_PIPE_INSTANCE | c::FILE_FLAG_OVERLAPPED;
+ if ours_readable {
+ flags |= c::PIPE_ACCESS_INBOUND;
+ } else {
+ flags |= c::PIPE_ACCESS_OUTBOUND;
+ }
+
+ let handle = c::CreateNamedPipeW(
+ wide_name.as_ptr(),
+ flags,
+ c::PIPE_TYPE_BYTE
+ | c::PIPE_READMODE_BYTE
+ | c::PIPE_WAIT
+ | reject_remote_clients_flag,
+ 1,
+ PIPE_BUFFER_CAPACITY,
+ PIPE_BUFFER_CAPACITY,
+ 0,
+ ptr::null_mut(),
+ );
+
+ // We pass the `FILE_FLAG_FIRST_PIPE_INSTANCE` flag above, and we're
+ // also just doing a best effort at selecting a unique name. If
+ // `ERROR_ACCESS_DENIED` is returned then it could mean that we
+ // accidentally conflicted with an already existing pipe, so we try
+ // again.
+ //
+ // Don't try again too much though as this could also perhaps be a
+ // legit error.
+ // If `ERROR_INVALID_PARAMETER` is returned, this probably means we're
+ // running on pre-Vista version where `PIPE_REJECT_REMOTE_CLIENTS` is
+ // not supported, so we continue retrying without it. This implies
+ // reduced security on Windows versions older than Vista by allowing
+ // connections to this pipe from remote machines.
+ // Proper fix would increase the number of FFI imports and introduce
+ // significant amount of Windows XP specific code with no clean
+ // testing strategy
+ // For more info, see https://github.com/rust-lang/rust/pull/37677.
+ if handle == c::INVALID_HANDLE_VALUE {
+ let err = io::Error::last_os_error();
+ let raw_os_err = err.raw_os_error();
+ if tries < 10 {
+ if raw_os_err == Some(c::ERROR_ACCESS_DENIED as i32) {
+ continue;
+ } else if reject_remote_clients_flag != 0
+ && raw_os_err == Some(c::ERROR_INVALID_PARAMETER as i32)
+ {
+ reject_remote_clients_flag = 0;
+ tries -= 1;
+ continue;
+ }
+ }
+ return Err(err);
+ }
+ ours = Handle::from_raw_handle(handle);
+ break;
+ }
+
+ // Connect to the named pipe we just created. This handle is going to be
+ // returned in `theirs`, so if `ours` is readable we want this to be
+ // writable, otherwise if `ours` is writable we want this to be
+ // readable.
+ //
+ // Additionally we don't enable overlapped mode on this because most
+ // client processes aren't enabled to work with that.
+ let mut opts = OpenOptions::new();
+ opts.write(ours_readable);
+ opts.read(!ours_readable);
+ opts.share_mode(0);
+ let size = mem::size_of::<c::SECURITY_ATTRIBUTES>();
+ let mut sa = c::SECURITY_ATTRIBUTES {
+ nLength: size as c::DWORD,
+ lpSecurityDescriptor: ptr::null_mut(),
+ bInheritHandle: their_handle_inheritable as i32,
+ };
+ opts.security_attributes(&mut sa);
+ let theirs = File::open(Path::new(&name), &opts)?;
+ let theirs = AnonPipe { inner: theirs.into_inner() };
+
+ Ok(Pipes {
+ ours: AnonPipe { inner: ours },
+ theirs: AnonPipe { inner: theirs.into_inner() },
+ })
+ }
+}
+
+/// Takes an asynchronous source pipe and returns a synchronous pipe suitable
+/// for sending to a child process.
+///
+/// This is achieved by creating a new set of pipes and spawning a thread that
+/// relays messages between the source and the synchronous pipe.
+pub fn spawn_pipe_relay(
+ source: &AnonPipe,
+ ours_readable: bool,
+ their_handle_inheritable: bool,
+) -> io::Result<AnonPipe> {
+ // We need this handle to live for the lifetime of the thread spawned below.
+ let source = source.duplicate()?;
+
+ // create a new pair of anon pipes.
+ let Pipes { theirs, ours } = anon_pipe(ours_readable, their_handle_inheritable)?;
+
+ // Spawn a thread that passes messages from one pipe to the other.
+ // Any errors will simply cause the thread to exit.
+ let (reader, writer) = if ours_readable { (ours, source) } else { (source, ours) };
+ crate::thread::spawn(move || {
+ let mut buf = [0_u8; 4096];
+ 'reader: while let Ok(len) = reader.read(&mut buf) {
+ if len == 0 {
+ break;
+ }
+ let mut start = 0;
+ while let Ok(written) = writer.write(&buf[start..len]) {
+ start += written;
+ if start == len {
+ continue 'reader;
+ }
+ }
+ break;
+ }
+ });
+
+ // Return the pipe that should be sent to the child process.
+ Ok(theirs)
+}
+
+fn random_number() -> usize {
+ static N: AtomicUsize = AtomicUsize::new(0);
+ loop {
+ if N.load(SeqCst) != 0 {
+ return N.fetch_add(1, SeqCst);
+ }
+
+ N.store(hashmap_random_keys().0 as usize, SeqCst);
+ }
+}
+
+// Abstracts over `ReadFileEx` and `WriteFileEx`
+type AlertableIoFn = unsafe extern "system" fn(
+ BorrowedHandle<'_>,
+ c::LPVOID,
+ c::DWORD,
+ c::LPOVERLAPPED,
+ c::LPOVERLAPPED_COMPLETION_ROUTINE,
+) -> c::BOOL;
+
+impl AnonPipe {
+ pub fn handle(&self) -> &Handle {
+ &self.inner
+ }
+ pub fn into_handle(self) -> Handle {
+ self.inner
+ }
+ fn duplicate(&self) -> io::Result<Self> {
+ self.inner.duplicate(0, false, c::DUPLICATE_SAME_ACCESS).map(|inner| AnonPipe { inner })
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let result = unsafe {
+ let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
+ self.alertable_io_internal(c::ReadFileEx, buf.as_mut_ptr() as _, len)
+ };
+
+ match result {
+ // The special treatment of BrokenPipe is to deal with Windows
+ // pipe semantics, which yields this error when *reading* from
+ // a pipe after the other end has closed; we interpret that as
+ // EOF on the pipe.
+ Err(ref e) if e.kind() == io::ErrorKind::BrokenPipe => Ok(0),
+ _ => result,
+ }
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ unsafe {
+ let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
+ self.alertable_io_internal(c::WriteFileEx, buf.as_ptr() as _, len)
+ }
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ /// Synchronizes asynchronous reads or writes using our anonymous pipe.
+ ///
+ /// This is a wrapper around [`ReadFileEx`] or [`WriteFileEx`] that uses
+ /// [Asynchronous Procedure Call] (APC) to synchronize reads or writes.
+ ///
+ /// Note: This should not be used for handles we don't create.
+ ///
+ /// # Safety
+ ///
+ /// `buf` must be a pointer to a buffer that's valid for reads or writes
+ /// up to `len` bytes. The `AlertableIoFn` must be either `ReadFileEx` or `WriteFileEx`
+ ///
+ /// [`ReadFileEx`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfileex
+ /// [`WriteFileEx`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-writefileex
+ /// [Asynchronous Procedure Call]: https://docs.microsoft.com/en-us/windows/win32/sync/asynchronous-procedure-calls
+ unsafe fn alertable_io_internal(
+ &self,
+ io: AlertableIoFn,
+ buf: c::LPVOID,
+ len: c::DWORD,
+ ) -> io::Result<usize> {
+ // Use "alertable I/O" to synchronize the pipe I/O.
+ // This has four steps.
+ //
+ // STEP 1: Start the asynchronous I/O operation.
+ // This simply calls either `ReadFileEx` or `WriteFileEx`,
+ // giving it a pointer to the buffer and callback function.
+ //
+ // STEP 2: Enter an alertable state.
+ // The callback set in step 1 will not be called until the thread
+ // enters an "alertable" state. This can be done using `SleepEx`.
+ //
+ // STEP 3: The callback
+ // Once the I/O is complete and the thread is in an alertable state,
+ // the callback will be run on the same thread as the call to
+ // `ReadFileEx` or `WriteFileEx` done in step 1.
+ // In the callback we simply set the result of the async operation.
+ //
+ // STEP 4: Return the result.
+ // At this point we'll have a result from the callback function
+ // and can simply return it. Note that we must not return earlier,
+ // while the I/O is still in progress.
+
+ // The result that will be set from the asynchronous callback.
+ let mut async_result: Option<AsyncResult> = None;
+ struct AsyncResult {
+ error: u32,
+ transfered: u32,
+ }
+
+ // STEP 3: The callback.
+ unsafe extern "system" fn callback(
+ dwErrorCode: u32,
+ dwNumberOfBytesTransfered: u32,
+ lpOverlapped: *mut c::OVERLAPPED,
+ ) {
+ // Set `async_result` using a pointer smuggled through `hEvent`.
+ let result = AsyncResult { error: dwErrorCode, transfered: dwNumberOfBytesTransfered };
+ *(*lpOverlapped).hEvent.cast::<Option<AsyncResult>>() = Some(result);
+ }
+
+ // STEP 1: Start the I/O operation.
+ let mut overlapped: c::OVERLAPPED = crate::mem::zeroed();
+ // `hEvent` is unused by `ReadFileEx` and `WriteFileEx`.
+ // Therefore the documentation suggests using it to smuggle a pointer to the callback.
+ overlapped.hEvent = &mut async_result as *mut _ as *mut _;
+
+ // Asynchronous read of the pipe.
+ // If successful, `callback` will be called once it completes.
+ let result = io(self.inner.as_handle(), buf, len, &mut overlapped, callback);
+ if result == c::FALSE {
+ // We can return here because the call failed.
+ // After this we must not return until the I/O completes.
+ return Err(io::Error::last_os_error());
+ }
+
+ // Wait indefinitely for the result.
+ let result = loop {
+ // STEP 2: Enter an alertable state.
+ // The second parameter of `SleepEx` is used to make this sleep alertable.
+ c::SleepEx(c::INFINITE, c::TRUE);
+ if let Some(result) = async_result {
+ break result;
+ }
+ };
+ // STEP 4: Return the result.
+ // `async_result` is always `Some` at this point
+ match result.error {
+ c::ERROR_SUCCESS => Ok(result.transfered as usize),
+ error => Err(io::Error::from_raw_os_error(error as _)),
+ }
+ }
+}
+
+pub fn read2(p1: AnonPipe, v1: &mut Vec<u8>, p2: AnonPipe, v2: &mut Vec<u8>) -> io::Result<()> {
+ let p1 = p1.into_handle();
+ let p2 = p2.into_handle();
+
+ let mut p1 = AsyncPipe::new(p1, v1)?;
+ let mut p2 = AsyncPipe::new(p2, v2)?;
+ let objs = [p1.event.as_raw_handle(), p2.event.as_raw_handle()];
+
+ // In a loop we wait for either pipe's scheduled read operation to complete.
+ // If the operation completes with 0 bytes, that means EOF was reached, in
+ // which case we just finish out the other pipe entirely.
+ //
+ // Note that overlapped I/O is in general super unsafe because we have to
+ // be careful to ensure that all pointers in play are valid for the entire
+ // duration of the I/O operation (where tons of operations can also fail).
+ // The destructor for `AsyncPipe` ends up taking care of most of this.
+ loop {
+ let res = unsafe { c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE) };
+ if res == c::WAIT_OBJECT_0 {
+ if !p1.result()? || !p1.schedule_read()? {
+ return p2.finish();
+ }
+ } else if res == c::WAIT_OBJECT_0 + 1 {
+ if !p2.result()? || !p2.schedule_read()? {
+ return p1.finish();
+ }
+ } else {
+ return Err(io::Error::last_os_error());
+ }
+ }
+}
+
+struct AsyncPipe<'a> {
+ pipe: Handle,
+ event: Handle,
+ overlapped: Box<c::OVERLAPPED>, // needs a stable address
+ dst: &'a mut Vec<u8>,
+ state: State,
+}
+
+#[derive(PartialEq, Debug)]
+enum State {
+ NotReading,
+ Reading,
+ Read(usize),
+}
+
+impl<'a> AsyncPipe<'a> {
+ fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> {
+ // Create an event which we'll use to coordinate our overlapped
+ // operations, this event will be used in WaitForMultipleObjects
+ // and passed as part of the OVERLAPPED handle.
+ //
+ // Note that we do a somewhat clever thing here by flagging the
+ // event as being manually reset and setting it initially to the
+ // signaled state. This means that we'll naturally fall through the
+ // WaitForMultipleObjects call above for pipes created initially,
+ // and the only time an even will go back to "unset" will be once an
+ // I/O operation is successfully scheduled (what we want).
+ let event = Handle::new_event(true, true)?;
+ let mut overlapped: Box<c::OVERLAPPED> = unsafe { Box::new(mem::zeroed()) };
+ overlapped.hEvent = event.as_raw_handle();
+ Ok(AsyncPipe { pipe, overlapped, event, dst, state: State::NotReading })
+ }
+
+ /// Executes an overlapped read operation.
+ ///
+ /// Must not currently be reading, and returns whether the pipe is currently
+ /// at EOF or not. If the pipe is not at EOF then `result()` must be called
+ /// to complete the read later on (may block), but if the pipe is at EOF
+ /// then `result()` should not be called as it will just block forever.
+ fn schedule_read(&mut self) -> io::Result<bool> {
+ assert_eq!(self.state, State::NotReading);
+ let amt = unsafe {
+ let slice = slice_to_end(self.dst);
+ self.pipe.read_overlapped(slice, &mut *self.overlapped)?
+ };
+
+ // If this read finished immediately then our overlapped event will
+ // remain signaled (it was signaled coming in here) and we'll progress
+ // down to the method below.
+ //
+ // Otherwise the I/O operation is scheduled and the system set our event
+ // to not signaled, so we flag ourselves into the reading state and move
+ // on.
+ self.state = match amt {
+ Some(0) => return Ok(false),
+ Some(amt) => State::Read(amt),
+ None => State::Reading,
+ };
+ Ok(true)
+ }
+
+ /// Wait for the result of the overlapped operation previously executed.
+ ///
+ /// Takes a parameter `wait` which indicates if this pipe is currently being
+ /// read whether the function should block waiting for the read to complete.
+ ///
+ /// Returns values:
+ ///
+ /// * `true` - finished any pending read and the pipe is not at EOF (keep
+ /// going)
+ /// * `false` - finished any pending read and pipe is at EOF (stop issuing
+ /// reads)
+ fn result(&mut self) -> io::Result<bool> {
+ let amt = match self.state {
+ State::NotReading => return Ok(true),
+ State::Reading => self.pipe.overlapped_result(&mut *self.overlapped, true)?,
+ State::Read(amt) => amt,
+ };
+ self.state = State::NotReading;
+ unsafe {
+ let len = self.dst.len();
+ self.dst.set_len(len + amt);
+ }
+ Ok(amt != 0)
+ }
+
+ /// Finishes out reading this pipe entirely.
+ ///
+ /// Waits for any pending and schedule read, and then calls `read_to_end`
+ /// if necessary to read all the remaining information.
+ fn finish(&mut self) -> io::Result<()> {
+ while self.result()? && self.schedule_read()? {
+ // ...
+ }
+ Ok(())
+ }
+}
+
+impl<'a> Drop for AsyncPipe<'a> {
+ fn drop(&mut self) {
+ match self.state {
+ State::Reading => {}
+ _ => return,
+ }
+
+ // If we have a pending read operation, then we have to make sure that
+ // it's *done* before we actually drop this type. The kernel requires
+ // that the `OVERLAPPED` and buffer pointers are valid for the entire
+ // I/O operation.
+ //
+ // To do that, we call `CancelIo` to cancel any pending operation, and
+ // if that succeeds we wait for the overlapped result.
+ //
+ // If anything here fails, there's not really much we can do, so we leak
+ // the buffer/OVERLAPPED pointers to ensure we're at least memory safe.
+ if self.pipe.cancel_io().is_err() || self.result().is_err() {
+ let buf = mem::take(self.dst);
+ let overlapped = Box::new(unsafe { mem::zeroed() });
+ let overlapped = mem::replace(&mut self.overlapped, overlapped);
+ mem::forget((buf, overlapped));
+ }
+ }
+}
+
+unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
+ if v.capacity() == 0 {
+ v.reserve(16);
+ }
+ if v.capacity() == v.len() {
+ v.reserve(1);
+ }
+ slice::from_raw_parts_mut(v.as_mut_ptr().add(v.len()), v.capacity() - v.len())
+}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
new file mode 100644
index 000000000..02d5af471
--- /dev/null
+++ b/library/std/src/sys/windows/process.rs
@@ -0,0 +1,847 @@
+#![unstable(feature = "process_internals", issue = "none")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::collections::BTreeMap;
+use crate::env;
+use crate::env::consts::{EXE_EXTENSION, EXE_SUFFIX};
+use crate::ffi::{OsStr, OsString};
+use crate::fmt;
+use crate::io::{self, Error, ErrorKind};
+use crate::mem;
+use crate::num::NonZeroI32;
+use crate::os::windows::ffi::{OsStrExt, OsStringExt};
+use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle};
+use crate::path::{Path, PathBuf};
+use crate::ptr;
+use crate::sys::args::{self, Arg};
+use crate::sys::c;
+use crate::sys::c::NonZeroDWORD;
+use crate::sys::cvt;
+use crate::sys::fs::{File, OpenOptions};
+use crate::sys::handle::Handle;
+use crate::sys::path;
+use crate::sys::pipe::{self, AnonPipe};
+use crate::sys::stdio;
+use crate::sys_common::mutex::StaticMutex;
+use crate::sys_common::process::{CommandEnv, CommandEnvs};
+use crate::sys_common::IntoInner;
+
+use libc::{c_void, EXIT_FAILURE, EXIT_SUCCESS};
+
+////////////////////////////////////////////////////////////////////////////////
+// Command
+////////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug, Eq)]
+#[doc(hidden)]
+pub struct EnvKey {
+ os_string: OsString,
+ // This stores a UTF-16 encoded string to workaround the mismatch between
+ // Rust's OsString (WTF-8) and the Windows API string type (UTF-16).
+ // Normally converting on every API call is acceptable but here
+ // `c::CompareStringOrdinal` will be called for every use of `==`.
+ utf16: Vec<u16>,
+}
+
+impl EnvKey {
+ fn new<T: Into<OsString>>(key: T) -> Self {
+ EnvKey::from(key.into())
+ }
+}
+
+// Comparing Windows environment variable keys[1] are behaviourally the
+// composition of two operations[2]:
+//
+// 1. Case-fold both strings. This is done using a language-independent
+// uppercase mapping that's unique to Windows (albeit based on data from an
+// older Unicode spec). It only operates on individual UTF-16 code units so
+// surrogates are left unchanged. This uppercase mapping can potentially change
+// between Windows versions.
+//
+// 2. Perform an ordinal comparison of the strings. A comparison using ordinal
+// is just a comparison based on the numerical value of each UTF-16 code unit[3].
+//
+// Because the case-folding mapping is unique to Windows and not guaranteed to
+// be stable, we ask the OS to compare the strings for us. This is done by
+// calling `CompareStringOrdinal`[4] with `bIgnoreCase` set to `TRUE`.
+//
+// [1] https://docs.microsoft.com/en-us/dotnet/standard/base-types/best-practices-strings#choosing-a-stringcomparison-member-for-your-method-call
+// [2] https://docs.microsoft.com/en-us/dotnet/standard/base-types/best-practices-strings#stringtoupper-and-stringtolower
+// [3] https://docs.microsoft.com/en-us/dotnet/api/system.stringcomparison?view=net-5.0#System_StringComparison_Ordinal
+// [4] https://docs.microsoft.com/en-us/windows/win32/api/stringapiset/nf-stringapiset-comparestringordinal
+impl Ord for EnvKey {
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ unsafe {
+ let result = c::CompareStringOrdinal(
+ self.utf16.as_ptr(),
+ self.utf16.len() as _,
+ other.utf16.as_ptr(),
+ other.utf16.len() as _,
+ c::TRUE,
+ );
+ match result {
+ c::CSTR_LESS_THAN => cmp::Ordering::Less,
+ c::CSTR_EQUAL => cmp::Ordering::Equal,
+ c::CSTR_GREATER_THAN => cmp::Ordering::Greater,
+ // `CompareStringOrdinal` should never fail so long as the parameters are correct.
+ _ => panic!("comparing environment keys failed: {}", Error::last_os_error()),
+ }
+ }
+ }
+}
+impl PartialOrd for EnvKey {
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl PartialEq for EnvKey {
+ fn eq(&self, other: &Self) -> bool {
+ if self.utf16.len() != other.utf16.len() {
+ false
+ } else {
+ self.cmp(other) == cmp::Ordering::Equal
+ }
+ }
+}
+impl PartialOrd<str> for EnvKey {
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ Some(self.cmp(&EnvKey::new(other)))
+ }
+}
+impl PartialEq<str> for EnvKey {
+ fn eq(&self, other: &str) -> bool {
+ if self.os_string.len() != other.len() {
+ false
+ } else {
+ self.cmp(&EnvKey::new(other)) == cmp::Ordering::Equal
+ }
+ }
+}
+
+// Environment variable keys should preserve their original case even though
+// they are compared using a caseless string mapping.
+impl From<OsString> for EnvKey {
+ fn from(k: OsString) -> Self {
+ EnvKey { utf16: k.encode_wide().collect(), os_string: k }
+ }
+}
+
+impl From<EnvKey> for OsString {
+ fn from(k: EnvKey) -> Self {
+ k.os_string
+ }
+}
+
+impl From<&OsStr> for EnvKey {
+ fn from(k: &OsStr) -> Self {
+ Self::from(k.to_os_string())
+ }
+}
+
+impl AsRef<OsStr> for EnvKey {
+ fn as_ref(&self) -> &OsStr {
+ &self.os_string
+ }
+}
+
+pub(crate) fn ensure_no_nuls<T: AsRef<OsStr>>(str: T) -> io::Result<T> {
+ if str.as_ref().encode_wide().any(|b| b == 0) {
+ Err(io::const_io_error!(ErrorKind::InvalidInput, "nul byte found in provided data"))
+ } else {
+ Ok(str)
+ }
+}
+
+pub struct Command {
+ program: OsString,
+ args: Vec<Arg>,
+ env: CommandEnv,
+ cwd: Option<OsString>,
+ flags: u32,
+ detach: bool, // not currently exposed in std::process
+ stdin: Option<Stdio>,
+ stdout: Option<Stdio>,
+ stderr: Option<Stdio>,
+ force_quotes_enabled: bool,
+}
+
+pub enum Stdio {
+ Inherit,
+ Null,
+ MakePipe,
+ Pipe(AnonPipe),
+ Handle(Handle),
+}
+
+pub struct StdioPipes {
+ pub stdin: Option<AnonPipe>,
+ pub stdout: Option<AnonPipe>,
+ pub stderr: Option<AnonPipe>,
+}
+
+impl Command {
+ pub fn new(program: &OsStr) -> Command {
+ Command {
+ program: program.to_os_string(),
+ args: Vec::new(),
+ env: Default::default(),
+ cwd: None,
+ flags: 0,
+ detach: false,
+ stdin: None,
+ stdout: None,
+ stderr: None,
+ force_quotes_enabled: false,
+ }
+ }
+
+ pub fn arg(&mut self, arg: &OsStr) {
+ self.args.push(Arg::Regular(arg.to_os_string()))
+ }
+ pub fn env_mut(&mut self) -> &mut CommandEnv {
+ &mut self.env
+ }
+ pub fn cwd(&mut self, dir: &OsStr) {
+ self.cwd = Some(dir.to_os_string())
+ }
+ pub fn stdin(&mut self, stdin: Stdio) {
+ self.stdin = Some(stdin);
+ }
+ pub fn stdout(&mut self, stdout: Stdio) {
+ self.stdout = Some(stdout);
+ }
+ pub fn stderr(&mut self, stderr: Stdio) {
+ self.stderr = Some(stderr);
+ }
+ pub fn creation_flags(&mut self, flags: u32) {
+ self.flags = flags;
+ }
+
+ pub fn force_quotes(&mut self, enabled: bool) {
+ self.force_quotes_enabled = enabled;
+ }
+
+ pub fn raw_arg(&mut self, command_str_to_append: &OsStr) {
+ self.args.push(Arg::Raw(command_str_to_append.to_os_string()))
+ }
+
+ pub fn get_program(&self) -> &OsStr {
+ &self.program
+ }
+
+ pub fn get_args(&self) -> CommandArgs<'_> {
+ let iter = self.args.iter();
+ CommandArgs { iter }
+ }
+
+ pub fn get_envs(&self) -> CommandEnvs<'_> {
+ self.env.iter()
+ }
+
+ pub fn get_current_dir(&self) -> Option<&Path> {
+ self.cwd.as_ref().map(|cwd| Path::new(cwd))
+ }
+
+ pub fn spawn(
+ &mut self,
+ default: Stdio,
+ needs_stdin: bool,
+ ) -> io::Result<(Process, StdioPipes)> {
+ let maybe_env = self.env.capture_if_changed();
+
+ let mut si = zeroed_startupinfo();
+ si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD;
+ si.dwFlags = c::STARTF_USESTDHANDLES;
+
+ let child_paths = if let Some(env) = maybe_env.as_ref() {
+ env.get(&EnvKey::new("PATH")).map(|s| s.as_os_str())
+ } else {
+ None
+ };
+ let program = resolve_exe(&self.program, || env::var_os("PATH"), child_paths)?;
+ // Case insensitive "ends_with" of UTF-16 encoded ".bat" or ".cmd"
+ let is_batch_file = matches!(
+ program.len().checked_sub(5).and_then(|i| program.get(i..)),
+ Some([46, 98 | 66, 97 | 65, 116 | 84, 0] | [46, 99 | 67, 109 | 77, 100 | 68, 0])
+ );
+ let (program, mut cmd_str) = if is_batch_file {
+ (
+ command_prompt()?,
+ args::make_bat_command_line(
+ &args::to_user_path(program)?,
+ &self.args,
+ self.force_quotes_enabled,
+ )?,
+ )
+ } else {
+ let cmd_str = make_command_line(&self.program, &self.args, self.force_quotes_enabled)?;
+ (program, cmd_str)
+ };
+ cmd_str.push(0); // add null terminator
+
+ // stolen from the libuv code.
+ let mut flags = self.flags | c::CREATE_UNICODE_ENVIRONMENT;
+ if self.detach {
+ flags |= c::DETACHED_PROCESS | c::CREATE_NEW_PROCESS_GROUP;
+ }
+
+ let (envp, _data) = make_envp(maybe_env)?;
+ let (dirp, _data) = make_dirp(self.cwd.as_ref())?;
+ let mut pi = zeroed_process_information();
+
+ // Prepare all stdio handles to be inherited by the child. This
+ // currently involves duplicating any existing ones with the ability to
+ // be inherited by child processes. Note, however, that once an
+ // inheritable handle is created, *any* spawned child will inherit that
+ // handle. We only want our own child to inherit this handle, so we wrap
+ // the remaining portion of this spawn in a mutex.
+ //
+ // For more information, msdn also has an article about this race:
+ // https://support.microsoft.com/kb/315939
+ static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
+
+ let _guard = unsafe { CREATE_PROCESS_LOCK.lock() };
+
+ let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None };
+ let null = Stdio::Null;
+ let default_stdin = if needs_stdin { &default } else { &null };
+ let stdin = self.stdin.as_ref().unwrap_or(default_stdin);
+ let stdout = self.stdout.as_ref().unwrap_or(&default);
+ let stderr = self.stderr.as_ref().unwrap_or(&default);
+ let stdin = stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin)?;
+ let stdout = stdout.to_handle(c::STD_OUTPUT_HANDLE, &mut pipes.stdout)?;
+ let stderr = stderr.to_handle(c::STD_ERROR_HANDLE, &mut pipes.stderr)?;
+ si.hStdInput = stdin.as_raw_handle();
+ si.hStdOutput = stdout.as_raw_handle();
+ si.hStdError = stderr.as_raw_handle();
+
+ unsafe {
+ cvt(c::CreateProcessW(
+ program.as_ptr(),
+ cmd_str.as_mut_ptr(),
+ ptr::null_mut(),
+ ptr::null_mut(),
+ c::TRUE,
+ flags,
+ envp,
+ dirp,
+ &mut si,
+ &mut pi,
+ ))
+ }?;
+
+ unsafe {
+ Ok((
+ Process {
+ handle: Handle::from_raw_handle(pi.hProcess),
+ main_thread_handle: Handle::from_raw_handle(pi.hThread),
+ },
+ pipes,
+ ))
+ }
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.program.fmt(f)?;
+ for arg in &self.args {
+ f.write_str(" ")?;
+ match arg {
+ Arg::Regular(s) => s.fmt(f),
+ Arg::Raw(s) => f.write_str(&s.to_string_lossy()),
+ }?;
+ }
+ Ok(())
+ }
+}
+
+// Resolve `exe_path` to the executable name.
+//
+// * If the path is simply a file name then use the paths given by `search_paths` to find the executable.
+// * Otherwise use the `exe_path` as given.
+//
+// This function may also append `.exe` to the name. The rationale for doing so is as follows:
+//
+// It is a very strong convention that Windows executables have the `exe` extension.
+// In Rust, it is common to omit this extension.
+// Therefore this functions first assumes `.exe` was intended.
+// It falls back to the plain file name if a full path is given and the extension is omitted
+// or if only a file name is given and it already contains an extension.
+fn resolve_exe<'a>(
+ exe_path: &'a OsStr,
+ parent_paths: impl FnOnce() -> Option<OsString>,
+ child_paths: Option<&OsStr>,
+) -> io::Result<Vec<u16>> {
+ // Early return if there is no filename.
+ if exe_path.is_empty() || path::has_trailing_slash(exe_path) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "program path has no file name",
+ ));
+ }
+ // Test if the file name has the `exe` extension.
+ // This does a case-insensitive `ends_with`.
+ let has_exe_suffix = if exe_path.len() >= EXE_SUFFIX.len() {
+ exe_path.bytes()[exe_path.len() - EXE_SUFFIX.len()..]
+ .eq_ignore_ascii_case(EXE_SUFFIX.as_bytes())
+ } else {
+ false
+ };
+
+ // If `exe_path` is an absolute path or a sub-path then don't search `PATH` for it.
+ if !path::is_file_name(exe_path) {
+ if has_exe_suffix {
+ // The application name is a path to a `.exe` file.
+ // Let `CreateProcessW` figure out if it exists or not.
+ return path::maybe_verbatim(Path::new(exe_path));
+ }
+ let mut path = PathBuf::from(exe_path);
+
+ // Append `.exe` if not already there.
+ path = path::append_suffix(path, EXE_SUFFIX.as_ref());
+ if let Some(path) = program_exists(&path) {
+ return Ok(path);
+ } else {
+ // It's ok to use `set_extension` here because the intent is to
+ // remove the extension that was just added.
+ path.set_extension("");
+ return path::maybe_verbatim(&path);
+ }
+ } else {
+ ensure_no_nuls(exe_path)?;
+ // From the `CreateProcessW` docs:
+ // > If the file name does not contain an extension, .exe is appended.
+ // Note that this rule only applies when searching paths.
+ let has_extension = exe_path.bytes().contains(&b'.');
+
+ // Search the directories given by `search_paths`.
+ let result = search_paths(parent_paths, child_paths, |mut path| {
+ path.push(&exe_path);
+ if !has_extension {
+ path.set_extension(EXE_EXTENSION);
+ }
+ program_exists(&path)
+ });
+ if let Some(path) = result {
+ return Ok(path);
+ }
+ }
+ // If we get here then the executable cannot be found.
+ Err(io::const_io_error!(io::ErrorKind::NotFound, "program not found"))
+}
+
+// Calls `f` for every path that should be used to find an executable.
+// Returns once `f` returns the path to an executable or all paths have been searched.
+fn search_paths<Paths, Exists>(
+ parent_paths: Paths,
+ child_paths: Option<&OsStr>,
+ mut exists: Exists,
+) -> Option<Vec<u16>>
+where
+ Paths: FnOnce() -> Option<OsString>,
+ Exists: FnMut(PathBuf) -> Option<Vec<u16>>,
+{
+ // 1. Child paths
+ // This is for consistency with Rust's historic behaviour.
+ if let Some(paths) = child_paths {
+ for path in env::split_paths(paths).filter(|p| !p.as_os_str().is_empty()) {
+ if let Some(path) = exists(path) {
+ return Some(path);
+ }
+ }
+ }
+
+ // 2. Application path
+ if let Ok(mut app_path) = env::current_exe() {
+ app_path.pop();
+ if let Some(path) = exists(app_path) {
+ return Some(path);
+ }
+ }
+
+ // 3 & 4. System paths
+ // SAFETY: This uses `fill_utf16_buf` to safely call the OS functions.
+ unsafe {
+ if let Ok(Some(path)) = super::fill_utf16_buf(
+ |buf, size| c::GetSystemDirectoryW(buf, size),
+ |buf| exists(PathBuf::from(OsString::from_wide(buf))),
+ ) {
+ return Some(path);
+ }
+ #[cfg(not(target_vendor = "uwp"))]
+ {
+ if let Ok(Some(path)) = super::fill_utf16_buf(
+ |buf, size| c::GetWindowsDirectoryW(buf, size),
+ |buf| exists(PathBuf::from(OsString::from_wide(buf))),
+ ) {
+ return Some(path);
+ }
+ }
+ }
+
+ // 5. Parent paths
+ if let Some(parent_paths) = parent_paths() {
+ for path in env::split_paths(&parent_paths).filter(|p| !p.as_os_str().is_empty()) {
+ if let Some(path) = exists(path) {
+ return Some(path);
+ }
+ }
+ }
+ None
+}
+
+/// Check if a file exists without following symlinks.
+fn program_exists(path: &Path) -> Option<Vec<u16>> {
+ unsafe {
+ let path = path::maybe_verbatim(path).ok()?;
+ // Getting attributes using `GetFileAttributesW` does not follow symlinks
+ // and it will almost always be successful if the link exists.
+ // There are some exceptions for special system files (e.g. the pagefile)
+ // but these are not executable.
+ if c::GetFileAttributesW(path.as_ptr()) == c::INVALID_FILE_ATTRIBUTES {
+ None
+ } else {
+ Some(path)
+ }
+ }
+}
+
+impl Stdio {
+ fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Result<Handle> {
+ match *self {
+ // If no stdio handle is available, then inherit means that it
+ // should still be unavailable so propagate the
+ // INVALID_HANDLE_VALUE.
+ Stdio::Inherit => match stdio::get_handle(stdio_id) {
+ Ok(io) => unsafe {
+ let io = Handle::from_raw_handle(io);
+ let ret = io.duplicate(0, true, c::DUPLICATE_SAME_ACCESS);
+ io.into_raw_handle();
+ ret
+ },
+ Err(..) => unsafe { Ok(Handle::from_raw_handle(c::INVALID_HANDLE_VALUE)) },
+ },
+
+ Stdio::MakePipe => {
+ let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
+ let pipes = pipe::anon_pipe(ours_readable, true)?;
+ *pipe = Some(pipes.ours);
+ Ok(pipes.theirs.into_handle())
+ }
+
+ Stdio::Pipe(ref source) => {
+ let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
+ pipe::spawn_pipe_relay(source, ours_readable, true).map(AnonPipe::into_handle)
+ }
+
+ Stdio::Handle(ref handle) => handle.duplicate(0, true, c::DUPLICATE_SAME_ACCESS),
+
+ // Open up a reference to NUL with appropriate read/write
+ // permissions as well as the ability to be inherited to child
+ // processes (as this is about to be inherited).
+ Stdio::Null => {
+ let size = mem::size_of::<c::SECURITY_ATTRIBUTES>();
+ let mut sa = c::SECURITY_ATTRIBUTES {
+ nLength: size as c::DWORD,
+ lpSecurityDescriptor: ptr::null_mut(),
+ bInheritHandle: 1,
+ };
+ let mut opts = OpenOptions::new();
+ opts.read(stdio_id == c::STD_INPUT_HANDLE);
+ opts.write(stdio_id != c::STD_INPUT_HANDLE);
+ opts.security_attributes(&mut sa);
+ File::open(Path::new("NUL"), &opts).map(|file| file.into_inner())
+ }
+ }
+ }
+}
+
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ Stdio::Pipe(pipe)
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(file: File) -> Stdio {
+ Stdio::Handle(file.into_inner())
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Processes
+////////////////////////////////////////////////////////////////////////////////
+
+/// A value representing a child process.
+///
+/// The lifetime of this value is linked to the lifetime of the actual
+/// process - the Process destructor calls self.finish() which waits
+/// for the process to terminate.
+pub struct Process {
+ handle: Handle,
+ main_thread_handle: Handle,
+}
+
+impl Process {
+ pub fn kill(&mut self) -> io::Result<()> {
+ cvt(unsafe { c::TerminateProcess(self.handle.as_raw_handle(), 1) })?;
+ Ok(())
+ }
+
+ pub fn id(&self) -> u32 {
+ unsafe { c::GetProcessId(self.handle.as_raw_handle()) as u32 }
+ }
+
+ pub fn main_thread_handle(&self) -> BorrowedHandle<'_> {
+ self.main_thread_handle.as_handle()
+ }
+
+ pub fn wait(&mut self) -> io::Result<ExitStatus> {
+ unsafe {
+ let res = c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE);
+ if res != c::WAIT_OBJECT_0 {
+ return Err(Error::last_os_error());
+ }
+ let mut status = 0;
+ cvt(c::GetExitCodeProcess(self.handle.as_raw_handle(), &mut status))?;
+ Ok(ExitStatus(status))
+ }
+ }
+
+ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ unsafe {
+ match c::WaitForSingleObject(self.handle.as_raw_handle(), 0) {
+ c::WAIT_OBJECT_0 => {}
+ c::WAIT_TIMEOUT => {
+ return Ok(None);
+ }
+ _ => return Err(io::Error::last_os_error()),
+ }
+ let mut status = 0;
+ cvt(c::GetExitCodeProcess(self.handle.as_raw_handle(), &mut status))?;
+ Ok(Some(ExitStatus(status)))
+ }
+ }
+
+ pub fn handle(&self) -> &Handle {
+ &self.handle
+ }
+
+ pub fn into_handle(self) -> Handle {
+ self.handle
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatus(c::DWORD);
+
+impl ExitStatus {
+ pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
+ match NonZeroDWORD::try_from(self.0) {
+ /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
+ /* was zero, couldn't convert */ Err(_) => Ok(()),
+ }
+ }
+ pub fn code(&self) -> Option<i32> {
+ Some(self.0 as i32)
+ }
+}
+
+/// Converts a raw `c::DWORD` to a type-safe `ExitStatus` by wrapping it without copying.
+impl From<c::DWORD> for ExitStatus {
+ fn from(u: c::DWORD) -> ExitStatus {
+ ExitStatus(u)
+ }
+}
+
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Windows exit codes with the high bit set typically mean some form of
+ // unhandled exception or warning. In this scenario printing the exit
+ // code in decimal doesn't always make sense because it's a very large
+ // and somewhat gibberish number. The hex code is a bit more
+ // recognizable and easier to search for, so print that.
+ if self.0 & 0x80000000 != 0 {
+ write!(f, "exit code: {:#x}", self.0)
+ } else {
+ write!(f, "exit code: {}", self.0)
+ }
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitStatusError(c::NonZeroDWORD);
+
+impl Into<ExitStatus> for ExitStatusError {
+ fn into(self) -> ExitStatus {
+ ExitStatus(self.0.into())
+ }
+}
+
+impl ExitStatusError {
+ pub fn code(self) -> Option<NonZeroI32> {
+ Some((u32::from(self.0) as i32).try_into().unwrap())
+ }
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+pub struct ExitCode(c::DWORD);
+
+impl ExitCode {
+ pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _);
+ pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _);
+
+ #[inline]
+ pub fn as_i32(&self) -> i32 {
+ self.0 as i32
+ }
+}
+
+impl From<u8> for ExitCode {
+ fn from(code: u8) -> Self {
+ ExitCode(c::DWORD::from(code))
+ }
+}
+
+impl From<u32> for ExitCode {
+ fn from(code: u32) -> Self {
+ ExitCode(c::DWORD::from(code))
+ }
+}
+
+fn zeroed_startupinfo() -> c::STARTUPINFO {
+ c::STARTUPINFO {
+ cb: 0,
+ lpReserved: ptr::null_mut(),
+ lpDesktop: ptr::null_mut(),
+ lpTitle: ptr::null_mut(),
+ dwX: 0,
+ dwY: 0,
+ dwXSize: 0,
+ dwYSize: 0,
+ dwXCountChars: 0,
+ dwYCountCharts: 0,
+ dwFillAttribute: 0,
+ dwFlags: 0,
+ wShowWindow: 0,
+ cbReserved2: 0,
+ lpReserved2: ptr::null_mut(),
+ hStdInput: c::INVALID_HANDLE_VALUE,
+ hStdOutput: c::INVALID_HANDLE_VALUE,
+ hStdError: c::INVALID_HANDLE_VALUE,
+ }
+}
+
+fn zeroed_process_information() -> c::PROCESS_INFORMATION {
+ c::PROCESS_INFORMATION {
+ hProcess: ptr::null_mut(),
+ hThread: ptr::null_mut(),
+ dwProcessId: 0,
+ dwThreadId: 0,
+ }
+}
+
+// Produces a wide string *without terminating null*; returns an error if
+// `prog` or any of the `args` contain a nul.
+fn make_command_line(argv0: &OsStr, args: &[Arg], force_quotes: bool) -> io::Result<Vec<u16>> {
+ // Encode the command and arguments in a command line string such
+ // that the spawned process may recover them using CommandLineToArgvW.
+ let mut cmd: Vec<u16> = Vec::new();
+
+ // Always quote the program name so CreateProcess to avoid ambiguity when
+ // the child process parses its arguments.
+ // Note that quotes aren't escaped here because they can't be used in arg0.
+ // But that's ok because file paths can't contain quotes.
+ cmd.push(b'"' as u16);
+ cmd.extend(argv0.encode_wide());
+ cmd.push(b'"' as u16);
+
+ for arg in args {
+ cmd.push(' ' as u16);
+ args::append_arg(&mut cmd, arg, force_quotes)?;
+ }
+ Ok(cmd)
+}
+
+// Get `cmd.exe` for use with bat scripts, encoded as a UTF-16 string.
+fn command_prompt() -> io::Result<Vec<u16>> {
+ let mut system: Vec<u16> = super::fill_utf16_buf(
+ |buf, size| unsafe { c::GetSystemDirectoryW(buf, size) },
+ |buf| buf.into(),
+ )?;
+ system.extend("\\cmd.exe".encode_utf16().chain([0]));
+ Ok(system)
+}
+
+fn make_envp(maybe_env: Option<BTreeMap<EnvKey, OsString>>) -> io::Result<(*mut c_void, Vec<u16>)> {
+ // On Windows we pass an "environment block" which is not a char**, but
+ // rather a concatenation of null-terminated k=v\0 sequences, with a final
+ // \0 to terminate.
+ if let Some(env) = maybe_env {
+ let mut blk = Vec::new();
+
+ // If there are no environment variables to set then signal this by
+ // pushing a null.
+ if env.is_empty() {
+ blk.push(0);
+ }
+
+ for (k, v) in env {
+ ensure_no_nuls(k.os_string)?;
+ blk.extend(k.utf16);
+ blk.push('=' as u16);
+ blk.extend(ensure_no_nuls(v)?.encode_wide());
+ blk.push(0);
+ }
+ blk.push(0);
+ Ok((blk.as_mut_ptr() as *mut c_void, blk))
+ } else {
+ Ok((ptr::null_mut(), Vec::new()))
+ }
+}
+
+fn make_dirp(d: Option<&OsString>) -> io::Result<(*const u16, Vec<u16>)> {
+ match d {
+ Some(dir) => {
+ let mut dir_str: Vec<u16> = ensure_no_nuls(dir)?.encode_wide().collect();
+ dir_str.push(0);
+ Ok((dir_str.as_ptr(), dir_str))
+ }
+ None => Ok((ptr::null(), Vec::new())),
+ }
+}
+
+pub struct CommandArgs<'a> {
+ iter: crate::slice::Iter<'a, Arg>,
+}
+
+impl<'a> Iterator for CommandArgs<'a> {
+ type Item = &'a OsStr;
+ fn next(&mut self) -> Option<&'a OsStr> {
+ self.iter.next().map(|arg| match arg {
+ Arg::Regular(s) | Arg::Raw(s) => s.as_ref(),
+ })
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<'a> ExactSizeIterator for CommandArgs<'a> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+impl<'a> fmt::Debug for CommandArgs<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter.clone()).finish()
+ }
+}
diff --git a/library/std/src/sys/windows/process/tests.rs b/library/std/src/sys/windows/process/tests.rs
new file mode 100644
index 000000000..3fc0c7524
--- /dev/null
+++ b/library/std/src/sys/windows/process/tests.rs
@@ -0,0 +1,222 @@
+use super::make_command_line;
+use super::Arg;
+use crate::env;
+use crate::ffi::{OsStr, OsString};
+use crate::process::Command;
+
+#[test]
+fn test_raw_args() {
+ let command_line = &make_command_line(
+ OsStr::new("quoted exe"),
+ &[
+ Arg::Regular(OsString::from("quote me")),
+ Arg::Raw(OsString::from("quote me *not*")),
+ Arg::Raw(OsString::from("\t\\")),
+ Arg::Raw(OsString::from("internal \\\"backslash-\"quote")),
+ Arg::Regular(OsString::from("optional-quotes")),
+ ],
+ false,
+ )
+ .unwrap();
+ assert_eq!(
+ String::from_utf16(command_line).unwrap(),
+ "\"quoted exe\" \"quote me\" quote me *not* \t\\ internal \\\"backslash-\"quote optional-quotes"
+ );
+}
+
+#[test]
+fn test_thread_handle() {
+ use crate::os::windows::io::BorrowedHandle;
+ use crate::os::windows::process::{ChildExt, CommandExt};
+ const CREATE_SUSPENDED: u32 = 0x00000004;
+
+ let p = Command::new("cmd").args(&["/C", "exit 0"]).creation_flags(CREATE_SUSPENDED).spawn();
+ assert!(p.is_ok());
+ let mut p = p.unwrap();
+
+ extern "system" {
+ fn ResumeThread(_: BorrowedHandle<'_>) -> u32;
+ }
+ unsafe {
+ ResumeThread(p.main_thread_handle());
+ }
+
+ crate::thread::sleep(crate::time::Duration::from_millis(100));
+
+ let res = p.try_wait();
+ assert!(res.is_ok());
+ assert!(res.unwrap().is_some());
+ assert!(p.try_wait().unwrap().unwrap().success());
+}
+
+#[test]
+fn test_make_command_line() {
+ fn test_wrapper(prog: &str, args: &[&str], force_quotes: bool) -> String {
+ let command_line = &make_command_line(
+ OsStr::new(prog),
+ &args.iter().map(|a| Arg::Regular(OsString::from(a))).collect::<Vec<_>>(),
+ force_quotes,
+ )
+ .unwrap();
+ String::from_utf16(command_line).unwrap()
+ }
+
+ assert_eq!(test_wrapper("prog", &["aaa", "bbb", "ccc"], false), "\"prog\" aaa bbb ccc");
+
+ assert_eq!(test_wrapper("prog", &[r"C:\"], false), r#""prog" C:\"#);
+ assert_eq!(test_wrapper("prog", &[r"2slashes\\"], false), r#""prog" 2slashes\\"#);
+ assert_eq!(test_wrapper("prog", &[r" C:\"], false), r#""prog" " C:\\""#);
+ assert_eq!(test_wrapper("prog", &[r" 2slashes\\"], false), r#""prog" " 2slashes\\\\""#);
+
+ assert_eq!(
+ test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa"], false),
+ "\"C:\\Program Files\\blah\\blah.exe\" aaa"
+ );
+ assert_eq!(
+ test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], false),
+ "\"C:\\Program Files\\blah\\blah.exe\" aaa v*"
+ );
+ assert_eq!(
+ test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], true),
+ "\"C:\\Program Files\\blah\\blah.exe\" \"aaa\" \"v*\""
+ );
+ assert_eq!(
+ test_wrapper("C:\\Program Files\\test", &["aa\"bb"], false),
+ "\"C:\\Program Files\\test\" aa\\\"bb"
+ );
+ assert_eq!(test_wrapper("echo", &["a b c"], false), "\"echo\" \"a b c\"");
+ assert_eq!(
+ test_wrapper("echo", &["\" \\\" \\", "\\"], false),
+ "\"echo\" \"\\\" \\\\\\\" \\\\\" \\"
+ );
+ assert_eq!(
+ test_wrapper("\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}", &[], false),
+ "\"\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}\""
+ );
+}
+
+// On Windows, environment args are case preserving but comparisons are case-insensitive.
+// See: #85242
+#[test]
+fn windows_env_unicode_case() {
+ let test_cases = [
+ ("ä", "Ä"),
+ ("ß", "SS"),
+ ("Ä", "Ö"),
+ ("Ä", "Ö"),
+ ("I", "İ"),
+ ("I", "i"),
+ ("I", "ı"),
+ ("i", "I"),
+ ("i", "İ"),
+ ("i", "ı"),
+ ("İ", "I"),
+ ("İ", "i"),
+ ("İ", "ı"),
+ ("ı", "I"),
+ ("ı", "i"),
+ ("ı", "İ"),
+ ("ä", "Ä"),
+ ("ß", "SS"),
+ ("Ä", "Ö"),
+ ("Ä", "Ö"),
+ ("I", "İ"),
+ ("I", "i"),
+ ("I", "ı"),
+ ("i", "I"),
+ ("i", "İ"),
+ ("i", "ı"),
+ ("İ", "I"),
+ ("İ", "i"),
+ ("İ", "ı"),
+ ("ı", "I"),
+ ("ı", "i"),
+ ("ı", "İ"),
+ ];
+ // Test that `cmd.env` matches `env::set_var` when setting two strings that
+ // may (or may not) be case-folded when compared.
+ for (a, b) in test_cases.iter() {
+ let mut cmd = Command::new("cmd");
+ cmd.env(a, "1");
+ cmd.env(b, "2");
+ env::set_var(a, "1");
+ env::set_var(b, "2");
+
+ for (key, value) in cmd.get_envs() {
+ assert_eq!(
+ env::var(key).ok(),
+ value.map(|s| s.to_string_lossy().into_owned()),
+ "command environment mismatch: {a} {b}",
+ );
+ }
+ }
+}
+
+// UWP applications run in a restricted environment which means this test may not work.
+#[cfg(not(target_vendor = "uwp"))]
+#[test]
+fn windows_exe_resolver() {
+ use super::resolve_exe;
+ use crate::io;
+ use crate::sys::fs::symlink;
+ use crate::sys_common::io::test::tmpdir;
+
+ let env_paths = || env::var_os("PATH");
+
+ // Test a full path, with and without the `exe` extension.
+ let mut current_exe = env::current_exe().unwrap();
+ assert!(resolve_exe(current_exe.as_ref(), env_paths, None).is_ok());
+ current_exe.set_extension("");
+ assert!(resolve_exe(current_exe.as_ref(), env_paths, None).is_ok());
+
+ // Test lone file names.
+ assert!(resolve_exe(OsStr::new("cmd"), env_paths, None).is_ok());
+ assert!(resolve_exe(OsStr::new("cmd.exe"), env_paths, None).is_ok());
+ assert!(resolve_exe(OsStr::new("cmd.EXE"), env_paths, None).is_ok());
+ assert!(resolve_exe(OsStr::new("fc"), env_paths, None).is_ok());
+
+ // Invalid file names should return InvalidInput.
+ assert_eq!(
+ resolve_exe(OsStr::new(""), env_paths, None).unwrap_err().kind(),
+ io::ErrorKind::InvalidInput
+ );
+ assert_eq!(
+ resolve_exe(OsStr::new("\0"), env_paths, None).unwrap_err().kind(),
+ io::ErrorKind::InvalidInput
+ );
+ // Trailing slash, therefore there's no file name component.
+ assert_eq!(
+ resolve_exe(OsStr::new(r"C:\Path\to\"), env_paths, None).unwrap_err().kind(),
+ io::ErrorKind::InvalidInput
+ );
+
+ /*
+ Some of the following tests may need to be changed if you are deliberately
+ changing the behaviour of `resolve_exe`.
+ */
+
+ let empty_paths = || None;
+
+ // The resolver looks in system directories even when `PATH` is empty.
+ assert!(resolve_exe(OsStr::new("cmd.exe"), empty_paths, None).is_ok());
+
+ // The application's directory is also searched.
+ let current_exe = env::current_exe().unwrap();
+ assert!(resolve_exe(current_exe.file_name().unwrap().as_ref(), empty_paths, None).is_ok());
+
+ // Create a temporary path and add a broken symlink.
+ let temp = tmpdir();
+ let mut exe_path = temp.path().to_owned();
+ exe_path.push("exists.exe");
+
+ // A broken symlink should still be resolved.
+ // Skip this check if not in CI and creating symlinks isn't possible.
+ let is_ci = env::var("CI").is_ok();
+ let result = symlink("<DOES NOT EXIST>".as_ref(), &exe_path);
+ if is_ci || result.is_ok() {
+ result.unwrap();
+ assert!(
+ resolve_exe(OsStr::new("exists.exe"), empty_paths, Some(temp.path().as_ref())).is_ok()
+ );
+ }
+}
diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs
new file mode 100644
index 000000000..f8fd93a73
--- /dev/null
+++ b/library/std/src/sys/windows/rand.rs
@@ -0,0 +1,35 @@
+use crate::io;
+use crate::mem;
+use crate::ptr;
+use crate::sys::c;
+
+pub fn hashmap_random_keys() -> (u64, u64) {
+ let mut v = (0, 0);
+ let ret = unsafe {
+ c::BCryptGenRandom(
+ ptr::null_mut(),
+ &mut v as *mut _ as *mut u8,
+ mem::size_of_val(&v) as c::ULONG,
+ c::BCRYPT_USE_SYSTEM_PREFERRED_RNG,
+ )
+ };
+ if ret != 0 { fallback_rng() } else { v }
+}
+
+/// Generate random numbers using the fallback RNG function (RtlGenRandom)
+#[cfg(not(target_vendor = "uwp"))]
+#[inline(never)]
+fn fallback_rng() -> (u64, u64) {
+ let mut v = (0, 0);
+ let ret =
+ unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) };
+
+ if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) }
+}
+
+/// We can't use RtlGenRandom with UWP, so there is no fallback
+#[cfg(target_vendor = "uwp")]
+#[inline(never)]
+fn fallback_rng() -> (u64, u64) {
+ panic!("fallback RNG broken: RtlGenRandom() not supported on UWP");
+}
diff --git a/library/std/src/sys/windows/stack_overflow.rs b/library/std/src/sys/windows/stack_overflow.rs
new file mode 100644
index 000000000..18a2a36ad
--- /dev/null
+++ b/library/std/src/sys/windows/stack_overflow.rs
@@ -0,0 +1,42 @@
+#![cfg_attr(test, allow(dead_code))]
+
+use crate::sys::c;
+use crate::thread;
+
+pub struct Handler;
+
+impl Handler {
+ pub unsafe fn new() -> Handler {
+ // This API isn't available on XP, so don't panic in that case and just
+ // pray it works out ok.
+ if c::SetThreadStackGuarantee(&mut 0x5000) == 0
+ && c::GetLastError() as u32 != c::ERROR_CALL_NOT_IMPLEMENTED as u32
+ {
+ panic!("failed to reserve stack space for exception handling");
+ }
+ Handler
+ }
+}
+
+extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> c::LONG {
+ unsafe {
+ let rec = &(*(*ExceptionInfo).ExceptionRecord);
+ let code = rec.ExceptionCode;
+
+ if code == c::EXCEPTION_STACK_OVERFLOW {
+ rtprintpanic!(
+ "\nthread '{}' has overflowed its stack\n",
+ thread::current().name().unwrap_or("<unknown>")
+ );
+ }
+ c::EXCEPTION_CONTINUE_SEARCH
+ }
+}
+
+pub unsafe fn init() {
+ if c::AddVectoredExceptionHandler(0, vectored_handler).is_null() {
+ panic!("failed to install exception handler");
+ }
+ // Set the thread stack guarantee for the main thread.
+ let _h = Handler::new();
+}
diff --git a/library/std/src/sys/windows/stack_overflow_uwp.rs b/library/std/src/sys/windows/stack_overflow_uwp.rs
new file mode 100644
index 000000000..afdf7f566
--- /dev/null
+++ b/library/std/src/sys/windows/stack_overflow_uwp.rs
@@ -0,0 +1,11 @@
+#![cfg_attr(test, allow(dead_code))]
+
+pub struct Handler;
+
+impl Handler {
+ pub fn new() -> Handler {
+ Handler
+ }
+}
+
+pub unsafe fn init() {}
diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs
new file mode 100644
index 000000000..a001d6b98
--- /dev/null
+++ b/library/std/src/sys/windows/stdio.rs
@@ -0,0 +1,422 @@
+#![unstable(issue = "none", feature = "windows_stdio")]
+
+use crate::char::decode_utf16;
+use crate::cmp;
+use crate::io;
+use crate::os::windows::io::{FromRawHandle, IntoRawHandle};
+use crate::ptr;
+use crate::str;
+use crate::sys::c;
+use crate::sys::cvt;
+use crate::sys::handle::Handle;
+use core::str::utf8_char_width;
+
+// Don't cache handles but get them fresh for every read/write. This allows us to track changes to
+// the value over time (such as if a process calls `SetStdHandle` while it's running). See #40490.
+pub struct Stdin {
+ surrogate: u16,
+ incomplete_utf8: IncompleteUtf8,
+}
+
+pub struct Stdout {
+ incomplete_utf8: IncompleteUtf8,
+}
+
+pub struct Stderr {
+ incomplete_utf8: IncompleteUtf8,
+}
+
+struct IncompleteUtf8 {
+ bytes: [u8; 4],
+ len: u8,
+}
+
+impl IncompleteUtf8 {
+ // Implemented for use in Stdin::read.
+ fn read(&mut self, buf: &mut [u8]) -> usize {
+ // Write to buffer until the buffer is full or we run out of bytes.
+ let to_write = cmp::min(buf.len(), self.len as usize);
+ buf[..to_write].copy_from_slice(&self.bytes[..to_write]);
+
+ // Rotate the remaining bytes if not enough remaining space in buffer.
+ if usize::from(self.len) > buf.len() {
+ self.bytes.copy_within(to_write.., 0);
+ self.len -= to_write as u8;
+ } else {
+ self.len = 0;
+ }
+
+ to_write
+ }
+}
+
+// Apparently Windows doesn't handle large reads on stdin or writes to stdout/stderr well (see
+// #13304 for details).
+//
+// From MSDN (2011): "The storage for this buffer is allocated from a shared heap for the
+// process that is 64 KB in size. The maximum size of the buffer will depend on heap usage."
+//
+// We choose the cap at 8 KiB because libuv does the same, and it seems to be acceptable so far.
+const MAX_BUFFER_SIZE: usize = 8192;
+
+// The standard buffer size of BufReader for Stdin should be able to hold 3x more bytes than there
+// are `u16`'s in MAX_BUFFER_SIZE. This ensures the read data can always be completely decoded from
+// UTF-16 to UTF-8.
+pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3;
+
+pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> {
+ let handle = unsafe { c::GetStdHandle(handle_id) };
+ if handle == c::INVALID_HANDLE_VALUE {
+ Err(io::Error::last_os_error())
+ } else if handle.is_null() {
+ Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32))
+ } else {
+ Ok(handle)
+ }
+}
+
+fn is_console(handle: c::HANDLE) -> bool {
+ // `GetConsoleMode` will return false (0) if this is a pipe (we don't care about the reported
+ // mode). This will only detect Windows Console, not other terminals connected to a pipe like
+ // MSYS. Which is exactly what we need, as only Windows Console needs a conversion to UTF-16.
+ let mut mode = 0;
+ unsafe { c::GetConsoleMode(handle, &mut mode) != 0 }
+}
+
+fn write(
+ handle_id: c::DWORD,
+ data: &[u8],
+ incomplete_utf8: &mut IncompleteUtf8,
+) -> io::Result<usize> {
+ if data.is_empty() {
+ return Ok(0);
+ }
+
+ let handle = get_handle(handle_id)?;
+ if !is_console(handle) {
+ unsafe {
+ let handle = Handle::from_raw_handle(handle);
+ let ret = handle.write(data);
+ handle.into_raw_handle(); // Don't close the handle
+ return ret;
+ }
+ }
+
+ if incomplete_utf8.len > 0 {
+ assert!(
+ incomplete_utf8.len < 4,
+ "Unexpected number of bytes for incomplete UTF-8 codepoint."
+ );
+ if data[0] >> 6 != 0b10 {
+ // not a continuation byte - reject
+ incomplete_utf8.len = 0;
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Windows stdio in console mode does not support writing non-UTF-8 byte sequences",
+ ));
+ }
+ incomplete_utf8.bytes[incomplete_utf8.len as usize] = data[0];
+ incomplete_utf8.len += 1;
+ let char_width = utf8_char_width(incomplete_utf8.bytes[0]);
+ if (incomplete_utf8.len as usize) < char_width {
+ // more bytes needed
+ return Ok(1);
+ }
+ let s = str::from_utf8(&incomplete_utf8.bytes[0..incomplete_utf8.len as usize]);
+ incomplete_utf8.len = 0;
+ match s {
+ Ok(s) => {
+ assert_eq!(char_width, s.len());
+ let written = write_valid_utf8_to_console(handle, s)?;
+ assert_eq!(written, s.len()); // guaranteed by write_valid_utf8_to_console() for single codepoint writes
+ return Ok(1);
+ }
+ Err(_) => {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Windows stdio in console mode does not support writing non-UTF-8 byte sequences",
+ ));
+ }
+ }
+ }
+
+ // As the console is meant for presenting text, we assume bytes of `data` are encoded as UTF-8,
+ // which needs to be encoded as UTF-16.
+ //
+ // If the data is not valid UTF-8 we write out as many bytes as are valid.
+ // If the first byte is invalid it is either first byte of a multi-byte sequence but the
+ // provided byte slice is too short or it is the first byte of an invalid multi-byte sequence.
+ let len = cmp::min(data.len(), MAX_BUFFER_SIZE / 2);
+ let utf8 = match str::from_utf8(&data[..len]) {
+ Ok(s) => s,
+ Err(ref e) if e.valid_up_to() == 0 => {
+ let first_byte_char_width = utf8_char_width(data[0]);
+ if first_byte_char_width > 1 && data.len() < first_byte_char_width {
+ incomplete_utf8.bytes[0] = data[0];
+ incomplete_utf8.len = 1;
+ return Ok(1);
+ } else {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Windows stdio in console mode does not support writing non-UTF-8 byte sequences",
+ ));
+ }
+ }
+ Err(e) => str::from_utf8(&data[..e.valid_up_to()]).unwrap(),
+ };
+
+ write_valid_utf8_to_console(handle, utf8)
+}
+
+fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usize> {
+ let mut utf16 = [0u16; MAX_BUFFER_SIZE / 2];
+ let mut len_utf16 = 0;
+ for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) {
+ *dest = chr;
+ len_utf16 += 1;
+ }
+ let utf16 = &utf16[..len_utf16];
+
+ let mut written = write_u16s(handle, &utf16)?;
+
+ // Figure out how many bytes of as UTF-8 were written away as UTF-16.
+ if written == utf16.len() {
+ Ok(utf8.len())
+ } else {
+ // Make sure we didn't end up writing only half of a surrogate pair (even though the chance
+ // is tiny). Because it is not possible for user code to re-slice `data` in such a way that
+ // a missing surrogate can be produced (and also because of the UTF-8 validation above),
+ // write the missing surrogate out now.
+ // Buffering it would mean we have to lie about the number of bytes written.
+ let first_char_remaining = utf16[written];
+ if first_char_remaining >= 0xDCEE && first_char_remaining <= 0xDFFF {
+ // low surrogate
+ // We just hope this works, and give up otherwise
+ let _ = write_u16s(handle, &utf16[written..written + 1]);
+ written += 1;
+ }
+ // Calculate the number of bytes of `utf8` that were actually written.
+ let mut count = 0;
+ for ch in utf16[..written].iter() {
+ count += match ch {
+ 0x0000..=0x007F => 1,
+ 0x0080..=0x07FF => 2,
+ 0xDCEE..=0xDFFF => 1, // Low surrogate. We already counted 3 bytes for the other.
+ _ => 3,
+ };
+ }
+ debug_assert!(String::from_utf16(&utf16[..written]).unwrap() == utf8[..count]);
+ Ok(count)
+ }
+}
+
+fn write_u16s(handle: c::HANDLE, data: &[u16]) -> io::Result<usize> {
+ let mut written = 0;
+ cvt(unsafe {
+ c::WriteConsoleW(
+ handle,
+ data.as_ptr() as c::LPCVOID,
+ data.len() as u32,
+ &mut written,
+ ptr::null_mut(),
+ )
+ })?;
+ Ok(written as usize)
+}
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin { surrogate: 0, incomplete_utf8: IncompleteUtf8::new() }
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let handle = get_handle(c::STD_INPUT_HANDLE)?;
+ if !is_console(handle) {
+ unsafe {
+ let handle = Handle::from_raw_handle(handle);
+ let ret = handle.read(buf);
+ handle.into_raw_handle(); // Don't close the handle
+ return ret;
+ }
+ }
+
+ // If there are bytes in the incomplete utf-8, start with those.
+ // (No-op if there is nothing in the buffer.)
+ let mut bytes_copied = self.incomplete_utf8.read(buf);
+
+ if bytes_copied == buf.len() {
+ return Ok(bytes_copied);
+ } else if buf.len() - bytes_copied < 4 {
+ // Not enough space to get a UTF-8 byte. We will use the incomplete UTF8.
+ let mut utf16_buf = [0u16; 1];
+ // Read one u16 character.
+ let read = read_u16s_fixup_surrogates(handle, &mut utf16_buf, 1, &mut self.surrogate)?;
+ // Read bytes, using the (now-empty) self.incomplete_utf8 as extra space.
+ let read_bytes = utf16_to_utf8(&utf16_buf[..read], &mut self.incomplete_utf8.bytes)?;
+
+ // Read in the bytes from incomplete_utf8 until the buffer is full.
+ self.incomplete_utf8.len = read_bytes as u8;
+ // No-op if no bytes.
+ bytes_copied += self.incomplete_utf8.read(&mut buf[bytes_copied..]);
+ Ok(bytes_copied)
+ } else {
+ let mut utf16_buf = [0u16; MAX_BUFFER_SIZE / 2];
+ // In the worst case, a UTF-8 string can take 3 bytes for every `u16` of a UTF-16. So
+ // we can read at most a third of `buf.len()` chars and uphold the guarantee no data gets
+ // lost.
+ let amount = cmp::min(buf.len() / 3, utf16_buf.len());
+ let read =
+ read_u16s_fixup_surrogates(handle, &mut utf16_buf, amount, &mut self.surrogate)?;
+
+ match utf16_to_utf8(&utf16_buf[..read], buf) {
+ Ok(value) => return Ok(bytes_copied + value),
+ Err(e) => return Err(e),
+ }
+ }
+ }
+}
+
+// We assume that if the last `u16` is an unpaired surrogate they got sliced apart by our
+// buffer size, and keep it around for the next read hoping to put them together.
+// This is a best effort, and might not work if we are not the only reader on Stdin.
+fn read_u16s_fixup_surrogates(
+ handle: c::HANDLE,
+ buf: &mut [u16],
+ mut amount: usize,
+ surrogate: &mut u16,
+) -> io::Result<usize> {
+ // Insert possibly remaining unpaired surrogate from last read.
+ let mut start = 0;
+ if *surrogate != 0 {
+ buf[0] = *surrogate;
+ *surrogate = 0;
+ start = 1;
+ if amount == 1 {
+ // Special case: `Stdin::read` guarantees we can always read at least one new `u16`
+ // and combine it with an unpaired surrogate, because the UTF-8 buffer is at least
+ // 4 bytes.
+ amount = 2;
+ }
+ }
+ let mut amount = read_u16s(handle, &mut buf[start..amount])? + start;
+
+ if amount > 0 {
+ let last_char = buf[amount - 1];
+ if last_char >= 0xD800 && last_char <= 0xDBFF {
+ // high surrogate
+ *surrogate = last_char;
+ amount -= 1;
+ }
+ }
+ Ok(amount)
+}
+
+fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
+ // Configure the `pInputControl` parameter to not only return on `\r\n` but also Ctrl-Z, the
+ // traditional DOS method to indicate end of character stream / user input (SUB).
+ // See #38274 and https://stackoverflow.com/questions/43836040/win-api-readconsole.
+ const CTRL_Z: u16 = 0x1A;
+ const CTRL_Z_MASK: c::ULONG = 1 << CTRL_Z;
+ let mut input_control = c::CONSOLE_READCONSOLE_CONTROL {
+ nLength: crate::mem::size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as c::ULONG,
+ nInitialChars: 0,
+ dwCtrlWakeupMask: CTRL_Z_MASK,
+ dwControlKeyState: 0,
+ };
+
+ let mut amount = 0;
+ loop {
+ cvt(unsafe {
+ c::SetLastError(0);
+ c::ReadConsoleW(
+ handle,
+ buf.as_mut_ptr() as c::LPVOID,
+ buf.len() as u32,
+ &mut amount,
+ &mut input_control as c::PCONSOLE_READCONSOLE_CONTROL,
+ )
+ })?;
+
+ // ReadConsoleW returns success with ERROR_OPERATION_ABORTED for Ctrl-C or Ctrl-Break.
+ // Explicitly check for that case here and try again.
+ if amount == 0 && unsafe { c::GetLastError() } == c::ERROR_OPERATION_ABORTED {
+ continue;
+ }
+ break;
+ }
+
+ if amount > 0 && buf[amount as usize - 1] == CTRL_Z {
+ amount -= 1;
+ }
+ Ok(amount as usize)
+}
+
+#[allow(unused)]
+fn utf16_to_utf8(utf16: &[u16], utf8: &mut [u8]) -> io::Result<usize> {
+ let mut written = 0;
+ for chr in decode_utf16(utf16.iter().cloned()) {
+ match chr {
+ Ok(chr) => {
+ chr.encode_utf8(&mut utf8[written..]);
+ written += chr.len_utf8();
+ }
+ Err(_) => {
+ // We can't really do any better than forget all data and return an error.
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidData,
+ "Windows stdin in console mode does not support non-UTF-16 input; \
+ encountered unpaired surrogate",
+ ));
+ }
+ }
+ }
+ Ok(written)
+}
+
+impl IncompleteUtf8 {
+ pub const fn new() -> IncompleteUtf8 {
+ IncompleteUtf8 { bytes: [0; 4], len: 0 }
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout { incomplete_utf8: IncompleteUtf8::new() }
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ write(c::STD_OUTPUT_HANDLE, buf, &mut self.incomplete_utf8)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr { incomplete_utf8: IncompleteUtf8::new() }
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ write(c::STD_ERROR_HANDLE, buf, &mut self.incomplete_utf8)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32)
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
diff --git a/library/std/src/sys/windows/stdio_uwp.rs b/library/std/src/sys/windows/stdio_uwp.rs
new file mode 100644
index 000000000..32550f796
--- /dev/null
+++ b/library/std/src/sys/windows/stdio_uwp.rs
@@ -0,0 +1,87 @@
+#![unstable(issue = "none", feature = "windows_stdio")]
+
+use crate::io;
+use crate::mem::ManuallyDrop;
+use crate::os::windows::io::FromRawHandle;
+use crate::sys::c;
+use crate::sys::handle::Handle;
+
+pub struct Stdin {}
+pub struct Stdout;
+pub struct Stderr;
+
+const MAX_BUFFER_SIZE: usize = 8192;
+pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3;
+
+pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> {
+ let handle = unsafe { c::GetStdHandle(handle_id) };
+ if handle == c::INVALID_HANDLE_VALUE {
+ Err(io::Error::last_os_error())
+ } else if handle.is_null() {
+ Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32))
+ } else {
+ Ok(handle)
+ }
+}
+
+fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> {
+ let handle = get_handle(handle_id)?;
+ // SAFETY: The handle returned from `get_handle` must be valid and non-null.
+ let handle = unsafe { Handle::from_raw_handle(handle) };
+ ManuallyDrop::new(handle).write(data)
+}
+
+impl Stdin {
+ pub const fn new() -> Stdin {
+ Stdin {}
+ }
+}
+
+impl io::Read for Stdin {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let handle = get_handle(c::STD_INPUT_HANDLE)?;
+ // SAFETY: The handle returned from `get_handle` must be valid and non-null.
+ let handle = unsafe { Handle::from_raw_handle(handle) };
+ ManuallyDrop::new(handle).read(buf)
+ }
+}
+
+impl Stdout {
+ pub const fn new() -> Stdout {
+ Stdout
+ }
+}
+
+impl io::Write for Stdout {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ write(c::STD_OUTPUT_HANDLE, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Stderr {
+ pub const fn new() -> Stderr {
+ Stderr
+ }
+}
+
+impl io::Write for Stderr {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ write(c::STD_ERROR_HANDLE, buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+pub fn is_ebadf(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32)
+}
+
+pub fn panic_output() -> Option<impl io::Write> {
+ Some(Stderr::new())
+}
diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs
new file mode 100644
index 000000000..c5c9e97e6
--- /dev/null
+++ b/library/std/src/sys/windows/thread.rs
@@ -0,0 +1,125 @@
+use crate::ffi::CStr;
+use crate::io;
+use crate::num::NonZeroUsize;
+use crate::os::windows::io::AsRawHandle;
+use crate::ptr;
+use crate::sys::c;
+use crate::sys::handle::Handle;
+use crate::sys::stack_overflow;
+use crate::sys_common::FromInner;
+use crate::time::Duration;
+
+use libc::c_void;
+
+use super::to_u16s;
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
+
+pub struct Thread {
+ handle: Handle,
+}
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(box p);
+
+ // FIXME On UNIX, we guard against stack sizes that are too small but
+ // that's because pthreads enforces that stacks are at least
+ // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
+ // just that below a certain threshold you can't do anything useful.
+ // That threshold is application and architecture-specific, however.
+ let ret = c::CreateThread(
+ ptr::null_mut(),
+ stack,
+ thread_start,
+ p as *mut _,
+ c::STACK_SIZE_PARAM_IS_A_RESERVATION,
+ ptr::null_mut(),
+ );
+
+ return if let Ok(handle) = ret.try_into() {
+ Ok(Thread { handle: Handle::from_inner(handle) })
+ } else {
+ // The thread failed to start and as a result p was not consumed. Therefore, it is
+ // safe to reconstruct the box so that it gets deallocated.
+ drop(Box::from_raw(p));
+ Err(io::Error::last_os_error())
+ };
+
+ extern "system" fn thread_start(main: *mut c_void) -> c::DWORD {
+ unsafe {
+ // Next, set up our stack overflow handler which may get triggered if we run
+ // out of stack.
+ let _handler = stack_overflow::Handler::new();
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+ 0
+ }
+ }
+
+ pub fn set_name(name: &CStr) {
+ if let Ok(utf8) = name.to_str() {
+ if let Ok(utf16) = to_u16s(utf8) {
+ unsafe {
+ c::SetThreadDescription(c::GetCurrentThread(), utf16.as_ptr());
+ };
+ };
+ };
+ }
+
+ pub fn join(self) {
+ let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) };
+ if rc == c::WAIT_FAILED {
+ panic!("failed to join on thread: {}", io::Error::last_os_error());
+ }
+ }
+
+ pub fn yield_now() {
+ // This function will return 0 if there are no other threads to execute,
+ // but this also means that the yield was useless so this isn't really a
+ // case that needs to be worried about.
+ unsafe {
+ c::SwitchToThread();
+ }
+ }
+
+ pub fn sleep(dur: Duration) {
+ unsafe { c::Sleep(super::dur2timeout(dur)) }
+ }
+
+ pub fn handle(&self) -> &Handle {
+ &self.handle
+ }
+
+ pub fn into_handle(self) -> Handle {
+ self.handle
+ }
+}
+
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ let res = unsafe {
+ let mut sysinfo: c::SYSTEM_INFO = crate::mem::zeroed();
+ c::GetSystemInfo(&mut sysinfo);
+ sysinfo.dwNumberOfProcessors as usize
+ };
+ match res {
+ 0 => Err(io::const_io_error!(
+ io::ErrorKind::NotFound,
+ "The number of hardware threads is not known for the target platform",
+ )),
+ cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus) }),
+ }
+}
+
+#[cfg_attr(test, allow(dead_code))]
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs
new file mode 100644
index 000000000..25d1c6e8e
--- /dev/null
+++ b/library/std/src/sys/windows/thread_local_dtor.rs
@@ -0,0 +1,28 @@
+//! Implements thread-local destructors that are not associated with any
+//! particular data.
+
+#![unstable(feature = "thread_local_internals", issue = "none")]
+#![cfg(target_thread_local)]
+
+// Using a per-thread list avoids the problems in synchronizing global state.
+#[thread_local]
+static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+
+pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ DESTRUCTORS.push((t, dtor));
+}
+
+/// Runs destructors. This should not be called until thread exit.
+pub unsafe fn run_keyless_dtors() {
+ // Drop all the destructors.
+ //
+ // Note: While this is potentially an infinite loop, it *should* be
+ // the case that this loop always terminates because we provide the
+ // guarantee that a TLS key cannot be set after it is flagged for
+ // destruction.
+ while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
+ (dtor)(ptr);
+ }
+ // We're done so free the memory.
+ DESTRUCTORS = Vec::new();
+}
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
new file mode 100644
index 000000000..ec670238e
--- /dev/null
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -0,0 +1,238 @@
+use crate::mem::ManuallyDrop;
+use crate::ptr;
+use crate::sync::atomic::AtomicPtr;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::sys::c;
+
+pub type Key = c::DWORD;
+pub type Dtor = unsafe extern "C" fn(*mut u8);
+
+// Turns out, like pretty much everything, Windows is pretty close the
+// functionality that Unix provides, but slightly different! In the case of
+// TLS, Windows does not provide an API to provide a destructor for a TLS
+// variable. This ends up being pretty crucial to this implementation, so we
+// need a way around this.
+//
+// The solution here ended up being a little obscure, but fear not, the
+// internet has informed me [1][2] that this solution is not unique (no way
+// I could have thought of it as well!). The key idea is to insert some hook
+// somewhere to run arbitrary code on thread termination. With this in place
+// we'll be able to run anything we like, including all TLS destructors!
+//
+// To accomplish this feat, we perform a number of threads, all contained
+// within this module:
+//
+// * All TLS destructors are tracked by *us*, not the windows runtime. This
+// means that we have a global list of destructors for each TLS key that
+// we know about.
+// * When a thread exits, we run over the entire list and run dtors for all
+// non-null keys. This attempts to match Unix semantics in this regard.
+//
+// This ends up having the overhead of using a global list, having some
+// locks here and there, and in general just adding some more code bloat. We
+// attempt to optimize runtime by forgetting keys that don't have
+// destructors, but this only gets us so far.
+//
+// For more details and nitty-gritty, see the code sections below!
+//
+// [1]: https://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
+// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base
+// /threading/thread_local_storage_win.cc#L42
+
+// -------------------------------------------------------------------------
+// Native bindings
+//
+// This section is just raw bindings to the native functions that Windows
+// provides, There's a few extra calls to deal with destructors.
+
+#[inline]
+pub unsafe fn create(dtor: Option<Dtor>) -> Key {
+ let key = c::TlsAlloc();
+ assert!(key != c::TLS_OUT_OF_INDEXES);
+ if let Some(f) = dtor {
+ register_dtor(key, f);
+ }
+ key
+}
+
+#[inline]
+pub unsafe fn set(key: Key, value: *mut u8) {
+ let r = c::TlsSetValue(key, value as c::LPVOID);
+ debug_assert!(r != 0);
+}
+
+#[inline]
+pub unsafe fn get(key: Key) -> *mut u8 {
+ c::TlsGetValue(key) as *mut u8
+}
+
+#[inline]
+pub unsafe fn destroy(_key: Key) {
+ rtabort!("can't destroy tls keys on windows")
+}
+
+#[inline]
+pub fn requires_synchronized_create() -> bool {
+ true
+}
+
+// -------------------------------------------------------------------------
+// Dtor registration
+//
+// Windows has no native support for running destructors so we manage our own
+// list of destructors to keep track of how to destroy keys. We then install a
+// callback later to get invoked whenever a thread exits, running all
+// appropriate destructors.
+//
+// Currently unregistration from this list is not supported. A destructor can be
+// registered but cannot be unregistered. There's various simplifying reasons
+// for doing this, the big ones being:
+//
+// 1. Currently we don't even support deallocating TLS keys, so normal operation
+// doesn't need to deallocate a destructor.
+// 2. There is no point in time where we know we can unregister a destructor
+// because it could always be getting run by some remote thread.
+//
+// Typically processes have a statically known set of TLS keys which is pretty
+// small, and we'd want to keep this memory alive for the whole process anyway
+// really.
+//
+// Perhaps one day we can fold the `Box` here into a static allocation,
+// expanding the `StaticKey` structure to contain not only a slot for the TLS
+// key but also a slot for the destructor queue on windows. An optimization for
+// another day!
+
+static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
+
+struct Node {
+ dtor: Dtor,
+ key: Key,
+ next: *mut Node,
+}
+
+unsafe fn register_dtor(key: Key, dtor: Dtor) {
+ let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
+
+ let mut head = DTORS.load(SeqCst);
+ loop {
+ node.next = head;
+ match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
+ Ok(_) => return, // nothing to drop, we successfully added the node to the list
+ Err(cur) => head = cur,
+ }
+ }
+}
+
+// -------------------------------------------------------------------------
+// Where the Magic (TM) Happens
+//
+// If you're looking at this code, and wondering "what is this doing?",
+// you're not alone! I'll try to break this down step by step:
+//
+// # What's up with CRT$XLB?
+//
+// For anything about TLS destructors to work on Windows, we have to be able
+// to run *something* when a thread exits. To do so, we place a very special
+// static in a very special location. If this is encoded in just the right
+// way, the kernel's loader is apparently nice enough to run some function
+// of ours whenever a thread exits! How nice of the kernel!
+//
+// Lots of detailed information can be found in source [1] above, but the
+// gist of it is that this is leveraging a feature of Microsoft's PE format
+// (executable format) which is not actually used by any compilers today.
+// This apparently translates to any callbacks in the ".CRT$XLB" section
+// being run on certain events.
+//
+// So after all that, we use the compiler's #[link_section] feature to place
+// a callback pointer into the magic section so it ends up being called.
+//
+// # What's up with this callback?
+//
+// The callback specified receives a number of parameters from... someone!
+// (the kernel? the runtime? I'm not quite sure!) There are a few events that
+// this gets invoked for, but we're currently only interested on when a
+// thread or a process "detaches" (exits). The process part happens for the
+// last thread and the thread part happens for any normal thread.
+//
+// # Ok, what's up with running all these destructors?
+//
+// This will likely need to be improved over time, but this function
+// attempts a "poor man's" destructor callback system. Once we've got a list
+// of what to run, we iterate over all keys, check their values, and then run
+// destructors if the values turn out to be non null (setting them to null just
+// beforehand). We do this a few times in a loop to basically match Unix
+// semantics. If we don't reach a fixed point after a short while then we just
+// inevitably leak something most likely.
+//
+// # The article mentions weird stuff about "/INCLUDE"?
+//
+// It sure does! Specifically we're talking about this quote:
+//
+// The Microsoft run-time library facilitates this process by defining a
+// memory image of the TLS Directory and giving it the special name
+// “__tls_used” (Intel x86 platforms) or “_tls_used” (other platforms). The
+// linker looks for this memory image and uses the data there to create the
+// TLS Directory. Other compilers that support TLS and work with the
+// Microsoft linker must use this same technique.
+//
+// Basically what this means is that if we want support for our TLS
+// destructors/our hook being called then we need to make sure the linker does
+// not omit this symbol. Otherwise it will omit it and our callback won't be
+// wired up.
+//
+// We don't actually use the `/INCLUDE` linker flag here like the article
+// mentions because the Rust compiler doesn't propagate linker flags, but
+// instead we use a shim function which performs a volatile 1-byte load from
+// the address of the symbol to ensure it sticks around.
+
+#[link_section = ".CRT$XLB"]
+#[allow(dead_code, unused_variables)]
+#[used] // we don't want LLVM eliminating this symbol for any reason, and
+// when the symbol makes it to the linker the linker will take over
+pub static p_thread_callback: unsafe extern "system" fn(c::LPVOID, c::DWORD, c::LPVOID) =
+ on_tls_callback;
+
+#[allow(dead_code, unused_variables)]
+unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID) {
+ if dwReason == c::DLL_THREAD_DETACH || dwReason == c::DLL_PROCESS_DETACH {
+ run_dtors();
+ #[cfg(target_thread_local)]
+ super::thread_local_dtor::run_keyless_dtors();
+ }
+
+ // See comments above for what this is doing. Note that we don't need this
+ // trickery on GNU windows, just on MSVC.
+ reference_tls_used();
+ #[cfg(target_env = "msvc")]
+ unsafe fn reference_tls_used() {
+ extern "C" {
+ static _tls_used: u8;
+ }
+ crate::intrinsics::volatile_load(&_tls_used);
+ }
+ #[cfg(not(target_env = "msvc"))]
+ unsafe fn reference_tls_used() {}
+}
+
+#[allow(dead_code)] // actually called above
+unsafe fn run_dtors() {
+ let mut any_run = true;
+ for _ in 0..5 {
+ if !any_run {
+ break;
+ }
+ any_run = false;
+ let mut cur = DTORS.load(SeqCst);
+ while !cur.is_null() {
+ let ptr = c::TlsGetValue((*cur).key);
+
+ if !ptr.is_null() {
+ c::TlsSetValue((*cur).key, ptr::null_mut());
+ ((*cur).dtor)(ptr as *mut _);
+ any_run = true;
+ }
+
+ cur = (*cur).next;
+ }
+ }
+}
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
new file mode 100644
index 000000000..d876e0f6f
--- /dev/null
+++ b/library/std/src/sys/windows/thread_parker.rs
@@ -0,0 +1,255 @@
+// Thread parker implementation for Windows.
+//
+// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+).
+// This modern API is exactly the same as the futex syscalls the Linux thread
+// parker uses. When These APIs are available, the implementation of this
+// thread parker matches the Linux thread parker exactly.
+//
+// However, when the modern API is not available, this implementation falls
+// back to NT Keyed Events, which are similar, but have some important
+// differences. These are available since Windows XP.
+//
+// WaitOnAddress first checks the state of the thread parker to make sure it no
+// WakeByAddressSingle calls can be missed between updating the parker state
+// and calling the function.
+//
+// NtWaitForKeyedEvent does not have this option, and unconditionally blocks
+// without checking the parker state first. Instead, NtReleaseKeyedEvent
+// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for
+// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed,
+// but we need to be careful not to block unpark() if park_timeout() was woken
+// up by a timeout instead of unpark().
+//
+// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a
+// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure
+// a successfully awoken park() was awoken by unpark() and not a
+// NtReleaseKeyedEvent call from some other code, as these events are not only
+// matched by the key (address of the parker (state)), but also by this HANDLE.
+// We lazily allocate this handle the first time it is needed.
+//
+// The fast path (calling park() after unpark() was already called) and the
+// possible states are the same for both implementations. This is used here to
+// make sure the fast path does not even check which API to use, but can return
+// right away, independent of the used API. Only the slow paths (which will
+// actually block/wake a thread) check which API is available and have
+// different implementations.
+//
+// Unfortunately, NT Keyed Events are an undocumented Windows API. However:
+// - This API is relatively simple with obvious behaviour, and there are
+// several (unofficial) articles documenting the details. [1]
+// - `parking_lot` has been using this API for years (on Windows versions
+// before Windows 8). [2] Many big projects extensively use parking_lot,
+// such as servo and the Rust compiler itself.
+// - It is the underlying API used by Windows SRW locks and Windows critical
+// sections. [3] [4]
+// - The source code of the implementations of Wine, ReactOs, and Windows XP
+// are available and match the expected behaviour.
+// - The main risk with an undocumented API is that it might change in the
+// future. But since we only use it for older versions of Windows, that's not
+// a problem.
+// - Even if these functions do not block or wake as we expect (which is
+// unlikely, see all previous points), this implementation would still be
+// memory safe. The NT Keyed Events API is only used to sleep/block in the
+// right place.
+//
+// [1]: http://www.locklessinc.com/articles/keyed_events/
+// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e
+// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c
+// [4]: Windows Internals, Part 1, ISBN 9780735671300
+
+use crate::pin::Pin;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicI8, AtomicPtr,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::{c, dur2timeout};
+use crate::time::Duration;
+
+pub struct Parker {
+ state: AtomicI8,
+}
+
+const PARKED: i8 = -1;
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+
+// Notes about memory ordering:
+//
+// Memory ordering is only relevant for the relative ordering of operations
+// between different variables. Even Ordering::Relaxed guarantees a
+// monotonic/consistent order when looking at just a single atomic variable.
+//
+// So, since this parker is just a single atomic variable, we only need to look
+// at the ordering guarantees we need to provide to the 'outside world'.
+//
+// The only memory ordering guarantee that parking and unparking provide, is
+// that things which happened before unpark() are visible on the thread
+// returning from park() afterwards. Otherwise, it was effectively unparked
+// before unpark() was called while still consuming the 'token'.
+//
+// In other words, unpark() needs to synchronize with the part of park() that
+// consumes the token and returns.
+//
+// This is done with a release-acquire synchronization, by using
+// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
+// Ordering::Acquire when reading this state in park() after waking up.
+impl Parker {
+ /// Construct the Windows parker. The UNIX parker implementation
+ /// requires this to happen in-place.
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Self { state: AtomicI8::new(EMPTY) });
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
+ // but other implementations do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if let Some(wait_on_address) = c::WaitOnAddress::option() {
+ loop {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE);
+ // Change NOTIFIED=>EMPTY but leave PARKED alone.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
+ // Actually woken up by unpark().
+ return;
+ } else {
+ // Spurious wake up. We loop to try again.
+ }
+ }
+ } else {
+ // Wait for unpark() to produce this event.
+ c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ self.state.swap(EMPTY, Acquire);
+ }
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
+ // but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if let Some(wait_on_address) = c::WaitOnAddress::option() {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout));
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ if self.state.swap(EMPTY, Acquire) == NOTIFIED {
+ // Actually woken up by unpark().
+ } else {
+ // Timeout or spurious wake up.
+ // We return either way, because we can't easily tell if it was the
+ // timeout or not.
+ }
+ } else {
+ // Need to wait for unpark() using NtWaitForKeyedEvent.
+ let handle = keyed_event_handle();
+
+ // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative
+ // values to indicate a relative time on the monotonic clock.
+ // This is documented here for the underlying KeWaitForSingleObject function:
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
+ let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) {
+ Ok(t) => -t,
+ Err(_) => i64::MIN,
+ };
+
+ // Wait for unpark() to produce this event.
+ let unparked =
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS;
+
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ let prev_state = self.state.swap(EMPTY, Acquire);
+
+ if !unparked && prev_state == NOTIFIED {
+ // We were awoken by a timeout, not by unpark(), but the state
+ // was set to NOTIFIED, which means we *just* missed an
+ // unpark(), which is now blocked on us to wait for it.
+ // Wait for it to consume the event and unblock that thread.
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ pub fn unpark(self: Pin<&Self>) {
+ // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
+ // wake the thread in the first case.
+ //
+ // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
+ // purpose, to make sure every unpark() has a release-acquire ordering
+ // with park().
+ if self.state.swap(NOTIFIED, Release) == PARKED {
+ if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
+ unsafe {
+ wake_by_address_single(self.ptr());
+ }
+ } else {
+ // If we run NtReleaseKeyedEvent before the waiting thread runs
+ // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+ // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+ // (e.g. due to a timeout), this blocks until we do wake up a thread.
+ // To prevent this thread from blocking indefinitely in that case,
+ // park_impl() will, after seeing the state set to NOTIFIED after
+ // waking up, call NtWaitForKeyedEvent again to unblock us.
+ unsafe {
+ c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+ }
+
+ fn ptr(&self) -> c::LPVOID {
+ &self.state as *const _ as c::LPVOID
+ }
+}
+
+fn keyed_event_handle() -> c::HANDLE {
+ const INVALID: c::HANDLE = ptr::invalid_mut(!0);
+ static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID);
+ match HANDLE.load(Relaxed) {
+ INVALID => {
+ let mut handle = c::INVALID_HANDLE_VALUE;
+ unsafe {
+ match c::NtCreateKeyedEvent(
+ &mut handle,
+ c::GENERIC_READ | c::GENERIC_WRITE,
+ ptr::null_mut(),
+ 0,
+ ) {
+ c::STATUS_SUCCESS => {}
+ r => panic!("Unable to create keyed event handle: error {r}"),
+ }
+ }
+ match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) {
+ Ok(_) => handle,
+ Err(h) => {
+ // Lost the race to another thread initializing HANDLE before we did.
+ // Closing our handle and using theirs instead.
+ unsafe {
+ c::CloseHandle(handle);
+ }
+ h
+ }
+ }
+ }
+ handle => handle,
+ }
+}
diff --git a/library/std/src/sys/windows/time.rs b/library/std/src/sys/windows/time.rs
new file mode 100644
index 000000000..b8209a854
--- /dev/null
+++ b/library/std/src/sys/windows/time.rs
@@ -0,0 +1,224 @@
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::mem;
+use crate::sys::c;
+use crate::sys_common::IntoInner;
+use crate::time::Duration;
+
+use core::hash::{Hash, Hasher};
+
+const NANOS_PER_SEC: u64 = 1_000_000_000;
+const INTERVALS_PER_SEC: u64 = NANOS_PER_SEC / 100;
+
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
+pub struct Instant {
+ // This duration is relative to an arbitrary microsecond epoch
+ // from the winapi QueryPerformanceCounter function.
+ t: Duration,
+}
+
+#[derive(Copy, Clone)]
+pub struct SystemTime {
+ t: c::FILETIME,
+}
+
+const INTERVALS_TO_UNIX_EPOCH: u64 = 11_644_473_600 * INTERVALS_PER_SEC;
+
+pub const UNIX_EPOCH: SystemTime = SystemTime {
+ t: c::FILETIME {
+ dwLowDateTime: INTERVALS_TO_UNIX_EPOCH as u32,
+ dwHighDateTime: (INTERVALS_TO_UNIX_EPOCH >> 32) as u32,
+ },
+};
+
+impl Instant {
+ pub fn now() -> Instant {
+ // High precision timing on windows operates in "Performance Counter"
+ // units, as returned by the WINAPI QueryPerformanceCounter function.
+ // These relate to seconds by a factor of QueryPerformanceFrequency.
+ // In order to keep unit conversions out of normal interval math, we
+ // measure in QPC units and immediately convert to nanoseconds.
+ perf_counter::PerformanceCounterInstant::now().into()
+ }
+
+ pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
+ // On windows there's a threshold below which we consider two timestamps
+ // equivalent due to measurement error. For more details + doc link,
+ // check the docs on epsilon.
+ let epsilon = perf_counter::PerformanceCounterInstant::epsilon();
+ if other.t > self.t && other.t - self.t <= epsilon {
+ Some(Duration::new(0, 0))
+ } else {
+ self.t.checked_sub(other.t)
+ }
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_add(*other)? })
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> {
+ Some(Instant { t: self.t.checked_sub(*other)? })
+ }
+}
+
+impl SystemTime {
+ pub fn now() -> SystemTime {
+ unsafe {
+ let mut t: SystemTime = mem::zeroed();
+ c::GetSystemTimePreciseAsFileTime(&mut t.t);
+ t
+ }
+ }
+
+ fn from_intervals(intervals: i64) -> SystemTime {
+ SystemTime {
+ t: c::FILETIME {
+ dwLowDateTime: intervals as c::DWORD,
+ dwHighDateTime: (intervals >> 32) as c::DWORD,
+ },
+ }
+ }
+
+ fn intervals(&self) -> i64 {
+ (self.t.dwLowDateTime as i64) | ((self.t.dwHighDateTime as i64) << 32)
+ }
+
+ pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
+ let me = self.intervals();
+ let other = other.intervals();
+ if me >= other {
+ Ok(intervals2dur((me - other) as u64))
+ } else {
+ Err(intervals2dur((other - me) as u64))
+ }
+ }
+
+ pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> {
+ let intervals = self.intervals().checked_add(checked_dur2intervals(other)?)?;
+ Some(SystemTime::from_intervals(intervals))
+ }
+
+ pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> {
+ let intervals = self.intervals().checked_sub(checked_dur2intervals(other)?)?;
+ Some(SystemTime::from_intervals(intervals))
+ }
+}
+
+impl PartialEq for SystemTime {
+ fn eq(&self, other: &SystemTime) -> bool {
+ self.intervals() == other.intervals()
+ }
+}
+
+impl Eq for SystemTime {}
+
+impl PartialOrd for SystemTime {
+ fn partial_cmp(&self, other: &SystemTime) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for SystemTime {
+ fn cmp(&self, other: &SystemTime) -> Ordering {
+ self.intervals().cmp(&other.intervals())
+ }
+}
+
+impl fmt::Debug for SystemTime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SystemTime").field("intervals", &self.intervals()).finish()
+ }
+}
+
+impl From<c::FILETIME> for SystemTime {
+ fn from(t: c::FILETIME) -> SystemTime {
+ SystemTime { t }
+ }
+}
+
+impl IntoInner<c::FILETIME> for SystemTime {
+ fn into_inner(self) -> c::FILETIME {
+ self.t
+ }
+}
+
+impl Hash for SystemTime {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.intervals().hash(state)
+ }
+}
+
+fn checked_dur2intervals(dur: &Duration) -> Option<i64> {
+ dur.as_secs()
+ .checked_mul(INTERVALS_PER_SEC)?
+ .checked_add(dur.subsec_nanos() as u64 / 100)?
+ .try_into()
+ .ok()
+}
+
+fn intervals2dur(intervals: u64) -> Duration {
+ Duration::new(intervals / INTERVALS_PER_SEC, ((intervals % INTERVALS_PER_SEC) * 100) as u32)
+}
+
+mod perf_counter {
+ use super::NANOS_PER_SEC;
+ use crate::sync::atomic::{AtomicU64, Ordering};
+ use crate::sys::c;
+ use crate::sys::cvt;
+ use crate::sys_common::mul_div_u64;
+ use crate::time::Duration;
+
+ pub struct PerformanceCounterInstant {
+ ts: c::LARGE_INTEGER,
+ }
+ impl PerformanceCounterInstant {
+ pub fn now() -> Self {
+ Self { ts: query() }
+ }
+
+ // Per microsoft docs, the margin of error for cross-thread time comparisons
+ // using QueryPerformanceCounter is 1 "tick" -- defined as 1/frequency().
+ // Reference: https://docs.microsoft.com/en-us/windows/desktop/SysInfo
+ // /acquiring-high-resolution-time-stamps
+ pub fn epsilon() -> Duration {
+ let epsilon = NANOS_PER_SEC / (frequency() as u64);
+ Duration::from_nanos(epsilon)
+ }
+ }
+ impl From<PerformanceCounterInstant> for super::Instant {
+ fn from(other: PerformanceCounterInstant) -> Self {
+ let freq = frequency() as u64;
+ let instant_nsec = mul_div_u64(other.ts as u64, NANOS_PER_SEC, freq);
+ Self { t: Duration::from_nanos(instant_nsec) }
+ }
+ }
+
+ fn frequency() -> c::LARGE_INTEGER {
+ // Either the cached result of `QueryPerformanceFrequency` or `0` for
+ // uninitialized. Storing this as a single `AtomicU64` allows us to use
+ // `Relaxed` operations, as we are only interested in the effects on a
+ // single memory location.
+ static FREQUENCY: AtomicU64 = AtomicU64::new(0);
+
+ let cached = FREQUENCY.load(Ordering::Relaxed);
+ // If a previous thread has filled in this global state, use that.
+ if cached != 0 {
+ return cached as c::LARGE_INTEGER;
+ }
+ // ... otherwise learn for ourselves ...
+ let mut frequency = 0;
+ unsafe {
+ cvt(c::QueryPerformanceFrequency(&mut frequency)).unwrap();
+ }
+
+ FREQUENCY.store(frequency as u64, Ordering::Relaxed);
+ frequency
+ }
+
+ fn query() -> c::LARGE_INTEGER {
+ let mut qpc_value: c::LARGE_INTEGER = 0;
+ cvt(unsafe { c::QueryPerformanceCounter(&mut qpc_value) }).unwrap();
+ qpc_value
+ }
+}
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
new file mode 100644
index 000000000..31164afdc
--- /dev/null
+++ b/library/std/src/sys_common/backtrace.rs
@@ -0,0 +1,183 @@
+use crate::backtrace_rs::{self, BacktraceFmt, BytesOrWideString, PrintFmt};
+use crate::borrow::Cow;
+/// Common code for printing the backtrace in the same way across the different
+/// supported platforms.
+use crate::env;
+use crate::fmt;
+use crate::io;
+use crate::io::prelude::*;
+use crate::path::{self, Path, PathBuf};
+use crate::sys_common::mutex::StaticMutex;
+
+/// Max number of frames to print.
+const MAX_NB_FRAMES: usize = 100;
+
+// SAFETY: Don't attempt to lock this reentrantly.
+pub unsafe fn lock() -> impl Drop {
+ static LOCK: StaticMutex = StaticMutex::new();
+ LOCK.lock()
+}
+
+/// Prints the current backtrace.
+pub fn print(w: &mut dyn Write, format: PrintFmt) -> io::Result<()> {
+ // There are issues currently linking libbacktrace into tests, and in
+ // general during libstd's own unit tests we're not testing this path. In
+ // test mode immediately return here to optimize away any references to the
+ // libbacktrace symbols
+ if cfg!(test) {
+ return Ok(());
+ }
+
+ // Use a lock to prevent mixed output in multithreading context.
+ // Some platforms also requires it, like `SymFromAddr` on Windows.
+ unsafe {
+ let _lock = lock();
+ _print(w, format)
+ }
+}
+
+unsafe fn _print(w: &mut dyn Write, format: PrintFmt) -> io::Result<()> {
+ struct DisplayBacktrace {
+ format: PrintFmt,
+ }
+ impl fmt::Display for DisplayBacktrace {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ unsafe { _print_fmt(fmt, self.format) }
+ }
+ }
+ write!(w, "{}", DisplayBacktrace { format })
+}
+
+unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::Result {
+ // Always 'fail' to get the cwd when running under Miri -
+ // this allows Miri to display backtraces in isolation mode
+ let cwd = if !cfg!(miri) { env::current_dir().ok() } else { None };
+
+ let mut print_path = move |fmt: &mut fmt::Formatter<'_>, bows: BytesOrWideString<'_>| {
+ output_filename(fmt, bows, print_fmt, cwd.as_ref())
+ };
+ writeln!(fmt, "stack backtrace:")?;
+ let mut bt_fmt = BacktraceFmt::new(fmt, print_fmt, &mut print_path);
+ bt_fmt.add_context()?;
+ let mut idx = 0;
+ let mut res = Ok(());
+ // Start immediately if we're not using a short backtrace.
+ let mut start = print_fmt != PrintFmt::Short;
+ backtrace_rs::trace_unsynchronized(|frame| {
+ if print_fmt == PrintFmt::Short && idx > MAX_NB_FRAMES {
+ return false;
+ }
+
+ let mut hit = false;
+ let mut stop = false;
+ backtrace_rs::resolve_frame_unsynchronized(frame, |symbol| {
+ hit = true;
+ if print_fmt == PrintFmt::Short {
+ if let Some(sym) = symbol.name().and_then(|s| s.as_str()) {
+ if start && sym.contains("__rust_begin_short_backtrace") {
+ stop = true;
+ return;
+ }
+ if sym.contains("__rust_end_short_backtrace") {
+ start = true;
+ return;
+ }
+ }
+ }
+
+ if start {
+ res = bt_fmt.frame().symbol(frame, symbol);
+ }
+ });
+ if stop {
+ return false;
+ }
+ if !hit && start {
+ res = bt_fmt.frame().print_raw(frame.ip(), None, None, None);
+ }
+
+ idx += 1;
+ res.is_ok()
+ });
+ res?;
+ bt_fmt.finish()?;
+ if print_fmt == PrintFmt::Short {
+ writeln!(
+ fmt,
+ "note: Some details are omitted, \
+ run with `RUST_BACKTRACE=full` for a verbose backtrace."
+ )?;
+ }
+ Ok(())
+}
+
+/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
+/// this is only inline(never) when backtraces in libstd are enabled, otherwise
+/// it's fine to optimize away.
+#[cfg_attr(feature = "backtrace", inline(never))]
+pub fn __rust_begin_short_backtrace<F, T>(f: F) -> T
+where
+ F: FnOnce() -> T,
+{
+ let result = f();
+
+ // prevent this frame from being tail-call optimised away
+ crate::hint::black_box(());
+
+ result
+}
+
+/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
+/// this is only inline(never) when backtraces in libstd are enabled, otherwise
+/// it's fine to optimize away.
+#[cfg_attr(feature = "backtrace", inline(never))]
+pub fn __rust_end_short_backtrace<F, T>(f: F) -> T
+where
+ F: FnOnce() -> T,
+{
+ let result = f();
+
+ // prevent this frame from being tail-call optimised away
+ crate::hint::black_box(());
+
+ result
+}
+
+/// Prints the filename of the backtrace frame.
+///
+/// See also `output`.
+pub fn output_filename(
+ fmt: &mut fmt::Formatter<'_>,
+ bows: BytesOrWideString<'_>,
+ print_fmt: PrintFmt,
+ cwd: Option<&PathBuf>,
+) -> fmt::Result {
+ let file: Cow<'_, Path> = match bows {
+ #[cfg(unix)]
+ BytesOrWideString::Bytes(bytes) => {
+ use crate::os::unix::prelude::*;
+ Path::new(crate::ffi::OsStr::from_bytes(bytes)).into()
+ }
+ #[cfg(not(unix))]
+ BytesOrWideString::Bytes(bytes) => {
+ Path::new(crate::str::from_utf8(bytes).unwrap_or("<unknown>")).into()
+ }
+ #[cfg(windows)]
+ BytesOrWideString::Wide(wide) => {
+ use crate::os::windows::prelude::*;
+ Cow::Owned(crate::ffi::OsString::from_wide(wide).into())
+ }
+ #[cfg(not(windows))]
+ BytesOrWideString::Wide(_wide) => Path::new("<unknown>").into(),
+ };
+ if print_fmt == PrintFmt::Short && file.is_absolute() {
+ if let Some(cwd) = cwd {
+ if let Ok(stripped) = file.strip_prefix(&cwd) {
+ if let Some(s) = stripped.to_str() {
+ return write!(fmt, ".{}{s}", path::MAIN_SEPARATOR);
+ }
+ }
+ }
+ }
+ fmt::Display::fmt(&file.display(), fmt)
+}
diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs
new file mode 100644
index 000000000..f3ac1061b
--- /dev/null
+++ b/library/std/src/sys_common/condvar.rs
@@ -0,0 +1,56 @@
+use crate::sys::locks as imp;
+use crate::sys_common::mutex::MovableMutex;
+use crate::time::Duration;
+
+mod check;
+
+type CondvarCheck = <imp::MovableMutex as check::CondvarCheck>::Check;
+
+/// An OS-based condition variable.
+pub struct Condvar {
+ inner: imp::MovableCondvar,
+ check: CondvarCheck,
+}
+
+impl Condvar {
+ /// Creates a new condition variable for use.
+ #[inline]
+ pub const fn new() -> Self {
+ Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() }
+ }
+
+ /// Signals one waiter on this condition variable to wake up.
+ #[inline]
+ pub fn notify_one(&self) {
+ unsafe { self.inner.notify_one() };
+ }
+
+ /// Awakens all current waiters on this condition variable.
+ #[inline]
+ pub fn notify_all(&self) {
+ unsafe { self.inner.notify_all() };
+ }
+
+ /// Waits for a signal on the specified mutex.
+ ///
+ /// Behavior is undefined if the mutex is not locked by the current thread.
+ ///
+ /// May panic if used with more than one mutex.
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &MovableMutex) {
+ self.check.verify(mutex);
+ self.inner.wait(mutex.raw())
+ }
+
+ /// Waits for a signal on the specified mutex with a timeout duration
+ /// specified by `dur` (a relative time into the future).
+ ///
+ /// Behavior is undefined if the mutex is not locked by the current thread.
+ ///
+ /// May panic if used with more than one mutex.
+ #[inline]
+ pub unsafe fn wait_timeout(&self, mutex: &MovableMutex, dur: Duration) -> bool {
+ self.check.verify(mutex);
+ self.inner.wait_timeout(mutex.raw(), dur)
+ }
+}
diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs
new file mode 100644
index 000000000..ce8f36704
--- /dev/null
+++ b/library/std/src/sys_common/condvar/check.rs
@@ -0,0 +1,57 @@
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering};
+use crate::sys::locks as imp;
+use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+use crate::sys_common::mutex::MovableMutex;
+
+pub trait CondvarCheck {
+ type Check;
+}
+
+/// For boxed mutexes, a `Condvar` will check it's only ever used with the same
+/// mutex, based on its (stable) address.
+impl<T: LazyInit> CondvarCheck for LazyBox<T> {
+ type Check = SameMutexCheck;
+}
+
+pub struct SameMutexCheck {
+ addr: AtomicPtr<()>,
+}
+
+#[allow(dead_code)]
+impl SameMutexCheck {
+ pub const fn new() -> Self {
+ Self { addr: AtomicPtr::new(ptr::null_mut()) }
+ }
+ pub fn verify(&self, mutex: &MovableMutex) {
+ let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _;
+ // Relaxed is okay here because we never read through `self.addr`, and only use it to
+ // compare addresses.
+ match self.addr.compare_exchange(
+ ptr::null_mut(),
+ addr,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => {} // Stored the address
+ Err(n) if n == addr => {} // Lost a race to store the same address
+ _ => panic!("attempted to use a condition variable with two mutexes"),
+ }
+ }
+}
+
+/// Unboxed mutexes may move, so `Condvar` can not require its address to stay
+/// constant.
+impl CondvarCheck for imp::Mutex {
+ type Check = NoCheck;
+}
+
+pub struct NoCheck;
+
+#[allow(dead_code)]
+impl NoCheck {
+ pub const fn new() -> Self {
+ Self
+ }
+ pub fn verify(&self, _: &MovableMutex) {}
+}
diff --git a/library/std/src/sys_common/fs.rs b/library/std/src/sys_common/fs.rs
new file mode 100644
index 000000000..617ac52e5
--- /dev/null
+++ b/library/std/src/sys_common/fs.rs
@@ -0,0 +1,51 @@
+#![allow(dead_code)] // not used on all platforms
+
+use crate::fs;
+use crate::io::{self, Error, ErrorKind};
+use crate::path::Path;
+
+pub(crate) const NOT_FILE_ERROR: Error = io::const_io_error!(
+ ErrorKind::InvalidInput,
+ "the source path is neither a regular file nor a symlink to a regular file",
+);
+
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+ let mut reader = fs::File::open(from)?;
+ let metadata = reader.metadata()?;
+
+ if !metadata.is_file() {
+ return Err(NOT_FILE_ERROR);
+ }
+
+ let mut writer = fs::File::create(to)?;
+ let perm = metadata.permissions();
+
+ let ret = io::copy(&mut reader, &mut writer)?;
+ writer.set_permissions(perm)?;
+ Ok(ret)
+}
+
+pub fn remove_dir_all(path: &Path) -> io::Result<()> {
+ let filetype = fs::symlink_metadata(path)?.file_type();
+ if filetype.is_symlink() { fs::remove_file(path) } else { remove_dir_all_recursive(path) }
+}
+
+fn remove_dir_all_recursive(path: &Path) -> io::Result<()> {
+ for child in fs::read_dir(path)? {
+ let child = child?;
+ if child.file_type()?.is_dir() {
+ remove_dir_all_recursive(&child.path())?;
+ } else {
+ fs::remove_file(&child.path())?;
+ }
+ }
+ fs::remove_dir(path)
+}
+
+pub fn try_exists(path: &Path) -> io::Result<bool> {
+ match fs::metadata(path) {
+ Ok(_) => Ok(true),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(false),
+ Err(error) => Err(error),
+ }
+}
diff --git a/library/std/src/sys_common/io.rs b/library/std/src/sys_common/io.rs
new file mode 100644
index 000000000..d1e9fed41
--- /dev/null
+++ b/library/std/src/sys_common/io.rs
@@ -0,0 +1,49 @@
+// Bare metal platforms usually have very small amounts of RAM
+// (in the order of hundreds of KB)
+pub const DEFAULT_BUF_SIZE: usize = if cfg!(target_os = "espidf") { 512 } else { 8 * 1024 };
+
+#[cfg(test)]
+#[allow(dead_code)] // not used on emscripten
+pub mod test {
+ use crate::env;
+ use crate::fs;
+ use crate::path::{Path, PathBuf};
+ use crate::thread;
+ use rand::RngCore;
+
+ pub struct TempDir(PathBuf);
+
+ impl TempDir {
+ pub fn join(&self, path: &str) -> PathBuf {
+ let TempDir(ref p) = *self;
+ p.join(path)
+ }
+
+ pub fn path(&self) -> &Path {
+ let TempDir(ref p) = *self;
+ p
+ }
+ }
+
+ impl Drop for TempDir {
+ fn drop(&mut self) {
+ // Gee, seeing how we're testing the fs module I sure hope that we
+ // at least implement this correctly!
+ let TempDir(ref p) = *self;
+ let result = fs::remove_dir_all(p);
+ // Avoid panicking while panicking as this causes the process to
+ // immediately abort, without displaying test results.
+ if !thread::panicking() {
+ result.unwrap();
+ }
+ }
+ }
+
+ pub fn tmpdir() -> TempDir {
+ let p = env::temp_dir();
+ let mut r = rand::thread_rng();
+ let ret = p.join(&format!("rust-{}", r.next_u32()));
+ fs::create_dir(&ret).unwrap();
+ TempDir(ret)
+ }
+}
diff --git a/library/std/src/sys_common/lazy_box.rs b/library/std/src/sys_common/lazy_box.rs
new file mode 100644
index 000000000..63c3316bd
--- /dev/null
+++ b/library/std/src/sys_common/lazy_box.rs
@@ -0,0 +1,90 @@
+#![allow(dead_code)] // Only used on some platforms.
+
+// This is used to wrap pthread {Mutex, Condvar, RwLock} in.
+
+use crate::marker::PhantomData;
+use crate::ops::{Deref, DerefMut};
+use crate::ptr::null_mut;
+use crate::sync::atomic::{
+ AtomicPtr,
+ Ordering::{AcqRel, Acquire},
+};
+
+pub(crate) struct LazyBox<T: LazyInit> {
+ ptr: AtomicPtr<T>,
+ _phantom: PhantomData<T>,
+}
+
+pub(crate) trait LazyInit {
+ /// This is called before the box is allocated, to provide the value to
+ /// move into the new box.
+ ///
+ /// It might be called more than once per LazyBox, as multiple threads
+ /// might race to initialize it concurrently, each constructing and initializing
+ /// their own box. All but one of them will be passed to `cancel_init` right after.
+ fn init() -> Box<Self>;
+
+ /// Any surplus boxes from `init()` that lost the initialization race
+ /// are passed to this function for disposal.
+ ///
+ /// The default implementation calls destroy().
+ fn cancel_init(x: Box<Self>) {
+ Self::destroy(x);
+ }
+
+ /// This is called to destroy a used box.
+ ///
+ /// The default implementation just drops it.
+ fn destroy(_: Box<Self>) {}
+}
+
+impl<T: LazyInit> LazyBox<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ Self { ptr: AtomicPtr::new(null_mut()), _phantom: PhantomData }
+ }
+
+ #[inline]
+ fn get_pointer(&self) -> *mut T {
+ let ptr = self.ptr.load(Acquire);
+ if ptr.is_null() { self.initialize() } else { ptr }
+ }
+
+ #[cold]
+ fn initialize(&self) -> *mut T {
+ let new_ptr = Box::into_raw(T::init());
+ match self.ptr.compare_exchange(null_mut(), new_ptr, AcqRel, Acquire) {
+ Ok(_) => new_ptr,
+ Err(ptr) => {
+ // Lost the race to another thread.
+ // Drop the box we created, and use the one from the other thread instead.
+ T::cancel_init(unsafe { Box::from_raw(new_ptr) });
+ ptr
+ }
+ }
+ }
+}
+
+impl<T: LazyInit> Deref for LazyBox<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ unsafe { &*self.get_pointer() }
+ }
+}
+
+impl<T: LazyInit> DerefMut for LazyBox<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.get_pointer() }
+ }
+}
+
+impl<T: LazyInit> Drop for LazyBox<T> {
+ fn drop(&mut self) {
+ let ptr = *self.ptr.get_mut();
+ if !ptr.is_null() {
+ T::destroy(unsafe { Box::from_raw(ptr) });
+ }
+ }
+}
diff --git a/library/std/src/sys_common/memchr.rs b/library/std/src/sys_common/memchr.rs
new file mode 100644
index 000000000..b219e8789
--- /dev/null
+++ b/library/std/src/sys_common/memchr.rs
@@ -0,0 +1,51 @@
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+use crate::sys::memchr as sys;
+
+#[cfg(test)]
+mod tests;
+
+/// A safe interface to `memchr`.
+///
+/// Returns the index corresponding to the first occurrence of `needle` in
+/// `haystack`, or `None` if one is not found.
+///
+/// memchr reduces to super-optimized machine code at around an order of
+/// magnitude faster than `haystack.iter().position(|&b| b == needle)`.
+/// (See benchmarks.)
+///
+/// # Examples
+///
+/// This shows how to find the first position of a byte in a byte string.
+///
+/// ```ignore (cannot-doctest-private-modules)
+/// use memchr::memchr;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memchr(b'k', haystack), Some(8));
+/// ```
+#[inline]
+pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ sys::memchr(needle, haystack)
+}
+
+/// A safe interface to `memrchr`.
+///
+/// Returns the index corresponding to the last occurrence of `needle` in
+/// `haystack`, or `None` if one is not found.
+///
+/// # Examples
+///
+/// This shows how to find the last position of a byte in a byte string.
+///
+/// ```ignore (cannot-doctest-private-modules)
+/// use memchr::memrchr;
+///
+/// let haystack = b"the quick brown fox";
+/// assert_eq!(memrchr(b'o', haystack), Some(17));
+/// ```
+#[inline]
+pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ sys::memrchr(needle, haystack)
+}
diff --git a/library/std/src/sys_common/memchr/tests.rs b/library/std/src/sys_common/memchr/tests.rs
new file mode 100644
index 000000000..557d749c7
--- /dev/null
+++ b/library/std/src/sys_common/memchr/tests.rs
@@ -0,0 +1,86 @@
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+// test the implementations for the current platform
+use super::{memchr, memrchr};
+
+#[test]
+fn matches_one() {
+ assert_eq!(Some(0), memchr(b'a', b"a"));
+}
+
+#[test]
+fn matches_begin() {
+ assert_eq!(Some(0), memchr(b'a', b"aaaa"));
+}
+
+#[test]
+fn matches_end() {
+ assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
+}
+
+#[test]
+fn matches_nul() {
+ assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
+}
+
+#[test]
+fn matches_past_nul() {
+ assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
+}
+
+#[test]
+fn no_match_empty() {
+ assert_eq!(None, memchr(b'a', b""));
+}
+
+#[test]
+fn no_match() {
+ assert_eq!(None, memchr(b'a', b"xyz"));
+}
+
+#[test]
+fn matches_one_reversed() {
+ assert_eq!(Some(0), memrchr(b'a', b"a"));
+}
+
+#[test]
+fn matches_begin_reversed() {
+ assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
+}
+
+#[test]
+fn matches_end_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
+}
+
+#[test]
+fn matches_nul_reversed() {
+ assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
+}
+
+#[test]
+fn matches_past_nul_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
+}
+
+#[test]
+fn no_match_empty_reversed() {
+ assert_eq!(None, memrchr(b'a', b""));
+}
+
+#[test]
+fn no_match_reversed() {
+ assert_eq!(None, memrchr(b'a', b"xyz"));
+}
+
+#[test]
+fn each_alignment() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memchr(needle, &data[start..]));
+ }
+}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
new file mode 100644
index 000000000..80f56bf75
--- /dev/null
+++ b/library/std/src/sys_common/mod.rs
@@ -0,0 +1,89 @@
+//! Platform-independent platform abstraction
+//!
+//! This is the platform-independent portion of the standard library's
+//! platform abstraction layer, whereas `std::sys` is the
+//! platform-specific portion.
+//!
+//! The relationship between `std::sys_common`, `std::sys` and the
+//! rest of `std` is complex, with dependencies going in all
+//! directions: `std` depending on `sys_common`, `sys_common`
+//! depending on `sys`, and `sys` depending on `sys_common` and `std`.
+//! This is because `sys_common` not only contains platform-independent code,
+//! but also code that is shared between the different platforms in `sys`.
+//! Ideally all that shared code should be moved to `sys::common`,
+//! and the dependencies between `std`, `sys_common` and `sys` all would form a dag.
+//! Progress on this is tracked in #84187.
+
+#![allow(missing_docs)]
+#![allow(missing_debug_implementations)]
+
+#[cfg(test)]
+mod tests;
+
+pub mod backtrace;
+pub mod condvar;
+pub mod fs;
+pub mod io;
+pub mod lazy_box;
+pub mod memchr;
+pub mod mutex;
+pub mod process;
+pub mod remutex;
+pub mod rwlock;
+pub mod thread;
+pub mod thread_info;
+pub mod thread_local_dtor;
+pub mod thread_local_key;
+pub mod thread_parker;
+pub mod wtf8;
+
+cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "l4re",
+ target_os = "hermit",
+ feature = "restricted-std",
+ all(target_family = "wasm", not(target_os = "emscripten")),
+ all(target_vendor = "fortanix", target_env = "sgx")))] {
+ pub use crate::sys::net;
+ } else {
+ pub mod net;
+ }
+}
+
+// common error constructors
+
+/// A trait for viewing representations from std types
+#[doc(hidden)]
+pub trait AsInner<Inner: ?Sized> {
+ fn as_inner(&self) -> &Inner;
+}
+
+/// A trait for viewing representations from std types
+#[doc(hidden)]
+pub trait AsInnerMut<Inner: ?Sized> {
+ fn as_inner_mut(&mut self) -> &mut Inner;
+}
+
+/// A trait for extracting representations from std types
+#[doc(hidden)]
+pub trait IntoInner<Inner> {
+ fn into_inner(self) -> Inner;
+}
+
+/// A trait for creating std types from internal representations
+#[doc(hidden)]
+pub trait FromInner<Inner> {
+ fn from_inner(inner: Inner) -> Self;
+}
+
+// Computes (value*numer)/denom without overflow, as long as both
+// (numer*denom) and the overall result fit into i64 (which is the case
+// for our time conversions).
+#[allow(dead_code)] // not used on all platforms
+pub fn mul_div_u64(value: u64, numer: u64, denom: u64) -> u64 {
+ let q = value / denom;
+ let r = value % denom;
+ // Decompose value as (value/denom*denom + value%denom),
+ // substitute into (value*numer)/denom and simplify.
+ // r < denom, so (denom*numer) is the upper bound of (r*numer)
+ q * numer + r * numer / denom
+}
diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs
new file mode 100644
index 000000000..48479f5bd
--- /dev/null
+++ b/library/std/src/sys_common/mutex.rs
@@ -0,0 +1,93 @@
+use crate::sys::locks as imp;
+
+/// An OS-based mutual exclusion lock, meant for use in static variables.
+///
+/// This mutex has a const constructor ([`StaticMutex::new`]), does not
+/// implement `Drop` to cleanup resources, and causes UB when used reentrantly.
+///
+/// This mutex does not implement poisoning.
+///
+/// This is a wrapper around `imp::Mutex` that does *not* call `init()` and
+/// `destroy()`.
+pub struct StaticMutex(imp::Mutex);
+
+unsafe impl Sync for StaticMutex {}
+
+impl StaticMutex {
+ /// Creates a new mutex for use.
+ #[inline]
+ pub const fn new() -> Self {
+ Self(imp::Mutex::new())
+ }
+
+ /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex
+ /// will be unlocked.
+ ///
+ /// It is undefined behaviour to call this function while locked by the
+ /// same thread.
+ #[inline]
+ pub unsafe fn lock(&'static self) -> StaticMutexGuard {
+ self.0.lock();
+ StaticMutexGuard(&self.0)
+ }
+}
+
+#[must_use]
+pub struct StaticMutexGuard(&'static imp::Mutex);
+
+impl Drop for StaticMutexGuard {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ self.0.unlock();
+ }
+ }
+}
+
+/// An OS-based mutual exclusion lock.
+///
+/// This mutex cleans up its resources in its `Drop` implementation, may safely
+/// be moved (when not borrowed), and does not cause UB when used reentrantly.
+///
+/// This mutex does not implement poisoning.
+///
+/// This is either a wrapper around `LazyBox<imp::Mutex>` or `imp::Mutex`,
+/// depending on the platform. It is boxed on platforms where `imp::Mutex` may
+/// not be moved.
+pub struct MovableMutex(imp::MovableMutex);
+
+unsafe impl Sync for MovableMutex {}
+
+impl MovableMutex {
+ /// Creates a new mutex.
+ #[inline]
+ pub const fn new() -> Self {
+ Self(imp::MovableMutex::new())
+ }
+
+ pub(super) fn raw(&self) -> &imp::Mutex {
+ &self.0
+ }
+
+ /// Locks the mutex blocking the current thread until it is available.
+ #[inline]
+ pub fn raw_lock(&self) {
+ unsafe { self.0.lock() }
+ }
+
+ /// Attempts to lock the mutex without blocking, returning whether it was
+ /// successfully acquired or not.
+ #[inline]
+ pub fn try_lock(&self) -> bool {
+ unsafe { self.0.try_lock() }
+ }
+
+ /// Unlocks the mutex.
+ ///
+ /// Behavior is undefined if the current thread does not actually hold the
+ /// mutex.
+ #[inline]
+ pub unsafe fn raw_unlock(&self) {
+ self.0.unlock()
+ }
+}
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
new file mode 100644
index 000000000..33d336c43
--- /dev/null
+++ b/library/std/src/sys_common/net.rs
@@ -0,0 +1,737 @@
+#[cfg(test)]
+mod tests;
+
+use crate::cmp;
+use crate::ffi::CString;
+use crate::fmt;
+use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
+use crate::mem;
+use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+use crate::ptr;
+use crate::sys::net::netc as c;
+use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket};
+use crate::sys_common::{FromInner, IntoInner};
+use crate::time::Duration;
+
+use libc::{c_int, c_void};
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "dragonfly", target_os = "freebsd",
+ target_os = "ios", target_os = "macos", target_os = "watchos",
+ target_os = "openbsd", target_os = "netbsd", target_os = "illumos",
+ target_os = "solaris", target_os = "haiku", target_os = "l4re"))] {
+ use crate::sys::net::netc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP;
+ use crate::sys::net::netc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP;
+ } else {
+ use crate::sys::net::netc::IPV6_ADD_MEMBERSHIP;
+ use crate::sys::net::netc::IPV6_DROP_MEMBERSHIP;
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux", target_os = "android",
+ target_os = "dragonfly", target_os = "freebsd",
+ target_os = "openbsd", target_os = "netbsd",
+ target_os = "haiku"))] {
+ use libc::MSG_NOSIGNAL;
+ } else {
+ const MSG_NOSIGNAL: c_int = 0x0;
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "dragonfly", target_os = "freebsd",
+ target_os = "openbsd", target_os = "netbsd",
+ target_os = "solaris", target_os = "illumos"))] {
+ use libc::c_uchar;
+ type IpV4MultiCastType = c_uchar;
+ } else {
+ type IpV4MultiCastType = c_int;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// sockaddr and misc bindings
+////////////////////////////////////////////////////////////////////////////////
+
+pub fn setsockopt<T>(
+ sock: &Socket,
+ level: c_int,
+ option_name: c_int,
+ option_value: T,
+) -> io::Result<()> {
+ unsafe {
+ cvt(c::setsockopt(
+ sock.as_raw(),
+ level,
+ option_name,
+ &option_value as *const T as *const _,
+ mem::size_of::<T>() as c::socklen_t,
+ ))?;
+ Ok(())
+ }
+}
+
+pub fn getsockopt<T: Copy>(sock: &Socket, level: c_int, option_name: c_int) -> io::Result<T> {
+ unsafe {
+ let mut option_value: T = mem::zeroed();
+ let mut option_len = mem::size_of::<T>() as c::socklen_t;
+ cvt(c::getsockopt(
+ sock.as_raw(),
+ level,
+ option_name,
+ &mut option_value as *mut T as *mut _,
+ &mut option_len,
+ ))?;
+ Ok(option_value)
+ }
+}
+
+fn sockname<F>(f: F) -> io::Result<SocketAddr>
+where
+ F: FnOnce(*mut c::sockaddr, *mut c::socklen_t) -> c_int,
+{
+ unsafe {
+ let mut storage: c::sockaddr_storage = mem::zeroed();
+ let mut len = mem::size_of_val(&storage) as c::socklen_t;
+ cvt(f(&mut storage as *mut _ as *mut _, &mut len))?;
+ sockaddr_to_addr(&storage, len as usize)
+ }
+}
+
+pub fn sockaddr_to_addr(storage: &c::sockaddr_storage, len: usize) -> io::Result<SocketAddr> {
+ match storage.ss_family as c_int {
+ c::AF_INET => {
+ assert!(len as usize >= mem::size_of::<c::sockaddr_in>());
+ Ok(SocketAddr::V4(FromInner::from_inner(unsafe {
+ *(storage as *const _ as *const c::sockaddr_in)
+ })))
+ }
+ c::AF_INET6 => {
+ assert!(len as usize >= mem::size_of::<c::sockaddr_in6>());
+ Ok(SocketAddr::V6(FromInner::from_inner(unsafe {
+ *(storage as *const _ as *const c::sockaddr_in6)
+ })))
+ }
+ _ => Err(io::const_io_error!(ErrorKind::InvalidInput, "invalid argument")),
+ }
+}
+
+#[cfg(target_os = "android")]
+fn to_ipv6mr_interface(value: u32) -> c_int {
+ value as c_int
+}
+
+#[cfg(not(target_os = "android"))]
+fn to_ipv6mr_interface(value: u32) -> libc::c_uint {
+ value as libc::c_uint
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// get_host_addresses
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct LookupHost {
+ original: *mut c::addrinfo,
+ cur: *mut c::addrinfo,
+ port: u16,
+}
+
+impl LookupHost {
+ pub fn port(&self) -> u16 {
+ self.port
+ }
+}
+
+impl Iterator for LookupHost {
+ type Item = SocketAddr;
+ fn next(&mut self) -> Option<SocketAddr> {
+ loop {
+ unsafe {
+ let cur = self.cur.as_ref()?;
+ self.cur = cur.ai_next;
+ match sockaddr_to_addr(mem::transmute(cur.ai_addr), cur.ai_addrlen as usize) {
+ Ok(addr) => return Some(addr),
+ Err(_) => continue,
+ }
+ }
+ }
+ }
+}
+
+unsafe impl Sync for LookupHost {}
+unsafe impl Send for LookupHost {}
+
+impl Drop for LookupHost {
+ fn drop(&mut self) {
+ unsafe { c::freeaddrinfo(self.original) }
+ }
+}
+
+impl TryFrom<&str> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from(s: &str) -> io::Result<LookupHost> {
+ macro_rules! try_opt {
+ ($e:expr, $msg:expr) => {
+ match $e {
+ Some(r) => r,
+ None => return Err(io::const_io_error!(io::ErrorKind::InvalidInput, $msg)),
+ }
+ };
+ }
+
+ // split the string by ':' and convert the second part to u16
+ let (host, port_str) = try_opt!(s.rsplit_once(':'), "invalid socket address");
+ let port: u16 = try_opt!(port_str.parse().ok(), "invalid port value");
+ (host, port).try_into()
+ }
+}
+
+impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
+ type Error = io::Error;
+
+ fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> {
+ init();
+
+ let c_host = CString::new(host)?;
+ let mut hints: c::addrinfo = unsafe { mem::zeroed() };
+ hints.ai_socktype = c::SOCK_STREAM;
+ let mut res = ptr::null_mut();
+ unsafe {
+ cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res))
+ .map(|_| LookupHost { original: res, cur: res, port })
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// TCP streams
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct TcpStream {
+ inner: Socket,
+}
+
+impl TcpStream {
+ pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> {
+ let addr = addr?;
+
+ init();
+
+ let sock = Socket::new(addr, c::SOCK_STREAM)?;
+
+ let (addr, len) = addr.into_inner();
+ cvt_r(|| unsafe { c::connect(sock.as_raw(), addr.as_ptr(), len) })?;
+ Ok(TcpStream { inner: sock })
+ }
+
+ pub fn connect_timeout(addr: &SocketAddr, timeout: Duration) -> io::Result<TcpStream> {
+ init();
+
+ let sock = Socket::new(addr, c::SOCK_STREAM)?;
+ sock.connect_timeout(addr, timeout)?;
+ Ok(TcpStream { inner: sock })
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.inner.set_timeout(dur, c::SO_RCVTIMEO)
+ }
+
+ pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.inner.set_timeout(dur, c::SO_SNDTIMEO)
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.inner.timeout(c::SO_RCVTIMEO)
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.inner.timeout(c::SO_SNDTIMEO)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.inner.read_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_read_vectored(&self) -> bool {
+ self.inner.is_read_vectored()
+ }
+
+ pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
+ let len = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
+ let ret = cvt(unsafe {
+ c::send(self.inner.as_raw(), buf.as_ptr() as *const c_void, len, MSG_NOSIGNAL)
+ })?;
+ Ok(ret as usize)
+ }
+
+ pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.inner.write_vectored(bufs)
+ }
+
+ #[inline]
+ pub fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ sockname(|buf, len| unsafe { c::getpeername(self.inner.as_raw(), buf, len) })
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ sockname(|buf, len| unsafe { c::getsockname(self.inner.as_raw(), buf, len) })
+ }
+
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.inner.shutdown(how)
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpStream> {
+ self.inner.duplicate().map(|s| TcpStream { inner: s })
+ }
+
+ pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
+ self.inner.set_linger(linger)
+ }
+
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.inner.linger()
+ }
+
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.inner.set_nodelay(nodelay)
+ }
+
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.inner.nodelay()
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+ Ok(raw as u32)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.inner.set_nonblocking(nonblocking)
+ }
+}
+
+impl FromInner<Socket> for TcpStream {
+ fn from_inner(socket: Socket) -> TcpStream {
+ TcpStream { inner: socket }
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut res = f.debug_struct("TcpStream");
+
+ if let Ok(addr) = self.socket_addr() {
+ res.field("addr", &addr);
+ }
+
+ if let Ok(peer) = self.peer_addr() {
+ res.field("peer", &peer);
+ }
+
+ let name = if cfg!(windows) { "socket" } else { "fd" };
+ res.field(name, &self.inner.as_raw()).finish()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// TCP listeners
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct TcpListener {
+ inner: Socket,
+}
+
+impl TcpListener {
+ pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> {
+ let addr = addr?;
+
+ init();
+
+ let sock = Socket::new(addr, c::SOCK_STREAM)?;
+
+ // On platforms with Berkeley-derived sockets, this allows to quickly
+ // rebind a socket, without needing to wait for the OS to clean up the
+ // previous one.
+ //
+ // On Windows, this allows rebinding sockets which are actively in use,
+ // which allows “socket hijacking”, so we explicitly don't set it here.
+ // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
+ #[cfg(not(windows))]
+ setsockopt(&sock, c::SOL_SOCKET, c::SO_REUSEADDR, 1 as c_int)?;
+
+ // Bind our new socket
+ let (addr, len) = addr.into_inner();
+ cvt(unsafe { c::bind(sock.as_raw(), addr.as_ptr(), len as _) })?;
+
+ cfg_if::cfg_if! {
+ if #[cfg(target_os = "horizon")] {
+ // The 3DS doesn't support a big connection backlog. Sometimes
+ // it allows up to about 37, but other times it doesn't even
+ // accept 32. There may be a global limitation causing this.
+ let backlog = 20;
+ } else {
+ // The default for all other platforms
+ let backlog = 128;
+ }
+ }
+
+ // Start listening
+ cvt(unsafe { c::listen(sock.as_raw(), backlog) })?;
+ Ok(TcpListener { inner: sock })
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ sockname(|buf, len| unsafe { c::getsockname(self.inner.as_raw(), buf, len) })
+ }
+
+ pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
+ let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
+ let mut len = mem::size_of_val(&storage) as c::socklen_t;
+ let sock = self.inner.accept(&mut storage as *mut _ as *mut _, &mut len)?;
+ let addr = sockaddr_to_addr(&storage, len as usize)?;
+ Ok((TcpStream { inner: sock }, addr))
+ }
+
+ pub fn duplicate(&self) -> io::Result<TcpListener> {
+ self.inner.duplicate().map(|s| TcpListener { inner: s })
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+ Ok(raw as u32)
+ }
+
+ pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+ setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY, only_v6 as c_int)
+ }
+
+ pub fn only_v6(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_V6ONLY)?;
+ Ok(raw != 0)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.inner.set_nonblocking(nonblocking)
+ }
+}
+
+impl FromInner<Socket> for TcpListener {
+ fn from_inner(socket: Socket) -> TcpListener {
+ TcpListener { inner: socket }
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut res = f.debug_struct("TcpListener");
+
+ if let Ok(addr) = self.socket_addr() {
+ res.field("addr", &addr);
+ }
+
+ let name = if cfg!(windows) { "socket" } else { "fd" };
+ res.field(name, &self.inner.as_raw()).finish()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// UDP
+////////////////////////////////////////////////////////////////////////////////
+
+pub struct UdpSocket {
+ inner: Socket,
+}
+
+impl UdpSocket {
+ pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<UdpSocket> {
+ let addr = addr?;
+
+ init();
+
+ let sock = Socket::new(addr, c::SOCK_DGRAM)?;
+ let (addr, len) = addr.into_inner();
+ cvt(unsafe { c::bind(sock.as_raw(), addr.as_ptr(), len as _) })?;
+ Ok(UdpSocket { inner: sock })
+ }
+
+ pub fn socket(&self) -> &Socket {
+ &self.inner
+ }
+
+ pub fn into_socket(self) -> Socket {
+ self.inner
+ }
+
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ sockname(|buf, len| unsafe { c::getpeername(self.inner.as_raw(), buf, len) })
+ }
+
+ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
+ sockname(|buf, len| unsafe { c::getsockname(self.inner.as_raw(), buf, len) })
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.recv_from(buf)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.peek_from(buf)
+ }
+
+ pub fn send_to(&self, buf: &[u8], dst: &SocketAddr) -> io::Result<usize> {
+ let len = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
+ let (dst, dstlen) = dst.into_inner();
+ let ret = cvt(unsafe {
+ c::sendto(
+ self.inner.as_raw(),
+ buf.as_ptr() as *const c_void,
+ len,
+ MSG_NOSIGNAL,
+ dst.as_ptr(),
+ dstlen,
+ )
+ })?;
+ Ok(ret as usize)
+ }
+
+ pub fn duplicate(&self) -> io::Result<UdpSocket> {
+ self.inner.duplicate().map(|s| UdpSocket { inner: s })
+ }
+
+ pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.inner.set_timeout(dur, c::SO_RCVTIMEO)
+ }
+
+ pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.inner.set_timeout(dur, c::SO_SNDTIMEO)
+ }
+
+ pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
+ self.inner.timeout(c::SO_RCVTIMEO)
+ }
+
+ pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
+ self.inner.timeout(c::SO_SNDTIMEO)
+ }
+
+ pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> {
+ setsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST, broadcast as c_int)
+ }
+
+ pub fn broadcast(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(&self.inner, c::SOL_SOCKET, c::SO_BROADCAST)?;
+ Ok(raw != 0)
+ }
+
+ pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> {
+ setsockopt(
+ &self.inner,
+ c::IPPROTO_IP,
+ c::IP_MULTICAST_LOOP,
+ multicast_loop_v4 as IpV4MultiCastType,
+ )
+ }
+
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ let raw: IpV4MultiCastType = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_LOOP)?;
+ Ok(raw != 0)
+ }
+
+ pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> {
+ setsockopt(
+ &self.inner,
+ c::IPPROTO_IP,
+ c::IP_MULTICAST_TTL,
+ multicast_ttl_v4 as IpV4MultiCastType,
+ )
+ }
+
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ let raw: IpV4MultiCastType = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_MULTICAST_TTL)?;
+ Ok(raw as u32)
+ }
+
+ pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> {
+ setsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP, multicast_loop_v6 as c_int)
+ }
+
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IPV6, c::IPV6_MULTICAST_LOOP)?;
+ Ok(raw != 0)
+ }
+
+ pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ let mreq = c::ip_mreq {
+ imr_multiaddr: multiaddr.into_inner(),
+ imr_interface: interface.into_inner(),
+ };
+ setsockopt(&self.inner, c::IPPROTO_IP, c::IP_ADD_MEMBERSHIP, mreq)
+ }
+
+ pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ let mreq = c::ipv6_mreq {
+ ipv6mr_multiaddr: multiaddr.into_inner(),
+ ipv6mr_interface: to_ipv6mr_interface(interface),
+ };
+ setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, mreq)
+ }
+
+ pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
+ let mreq = c::ip_mreq {
+ imr_multiaddr: multiaddr.into_inner(),
+ imr_interface: interface.into_inner(),
+ };
+ setsockopt(&self.inner, c::IPPROTO_IP, c::IP_DROP_MEMBERSHIP, mreq)
+ }
+
+ pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ let mreq = c::ipv6_mreq {
+ ipv6mr_multiaddr: multiaddr.into_inner(),
+ ipv6mr_interface: to_ipv6mr_interface(interface),
+ };
+ setsockopt(&self.inner, c::IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, mreq)
+ }
+
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ setsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL, ttl as c_int)
+ }
+
+ pub fn ttl(&self) -> io::Result<u32> {
+ let raw: c_int = getsockopt(&self.inner, c::IPPROTO_IP, c::IP_TTL)?;
+ Ok(raw as u32)
+ }
+
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.inner.take_error()
+ }
+
+ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
+ self.inner.set_nonblocking(nonblocking)
+ }
+
+ pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(buf)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
+ pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
+ let len = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t;
+ let ret = cvt(unsafe {
+ c::send(self.inner.as_raw(), buf.as_ptr() as *const c_void, len, MSG_NOSIGNAL)
+ })?;
+ Ok(ret as usize)
+ }
+
+ pub fn connect(&self, addr: io::Result<&SocketAddr>) -> io::Result<()> {
+ let (addr, len) = addr?.into_inner();
+ cvt_r(|| unsafe { c::connect(self.inner.as_raw(), addr.as_ptr(), len) }).map(drop)
+ }
+}
+
+impl FromInner<Socket> for UdpSocket {
+ fn from_inner(socket: Socket) -> UdpSocket {
+ UdpSocket { inner: socket }
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut res = f.debug_struct("UdpSocket");
+
+ if let Ok(addr) = self.socket_addr() {
+ res.field("addr", &addr);
+ }
+
+ let name = if cfg!(windows) { "socket" } else { "fd" };
+ res.field(name, &self.inner.as_raw()).finish()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Converting SocketAddr to libc representation
+////////////////////////////////////////////////////////////////////////////////
+
+/// A type with the same memory layout as `c::sockaddr`. Used in converting Rust level
+/// SocketAddr* types into their system representation. The benefit of this specific
+/// type over using `c::sockaddr_storage` is that this type is exactly as large as it
+/// needs to be and not a lot larger. And it can be initialized more cleanly from Rust.
+#[repr(C)]
+pub(crate) union SocketAddrCRepr {
+ v4: c::sockaddr_in,
+ v6: c::sockaddr_in6,
+}
+
+impl SocketAddrCRepr {
+ pub fn as_ptr(&self) -> *const c::sockaddr {
+ self as *const _ as *const c::sockaddr
+ }
+}
+
+impl<'a> IntoInner<(SocketAddrCRepr, c::socklen_t)> for &'a SocketAddr {
+ fn into_inner(self) -> (SocketAddrCRepr, c::socklen_t) {
+ match *self {
+ SocketAddr::V4(ref a) => {
+ let sockaddr = SocketAddrCRepr { v4: a.into_inner() };
+ (sockaddr, mem::size_of::<c::sockaddr_in>() as c::socklen_t)
+ }
+ SocketAddr::V6(ref a) => {
+ let sockaddr = SocketAddrCRepr { v6: a.into_inner() };
+ (sockaddr, mem::size_of::<c::sockaddr_in6>() as c::socklen_t)
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys_common/net/tests.rs b/library/std/src/sys_common/net/tests.rs
new file mode 100644
index 000000000..ac75d9ebf
--- /dev/null
+++ b/library/std/src/sys_common/net/tests.rs
@@ -0,0 +1,19 @@
+use super::*;
+use crate::collections::HashMap;
+
+#[test]
+fn no_lookup_host_duplicates() {
+ let mut addrs = HashMap::new();
+ let lh = match LookupHost::try_from(("localhost", 0)) {
+ Ok(lh) => lh,
+ Err(e) => panic!("couldn't resolve `localhost': {e}"),
+ };
+ for sa in lh {
+ *addrs.entry(sa).or_insert(0) += 1;
+ }
+ assert_eq!(
+ addrs.iter().filter(|&(_, &v)| v > 1).collect::<Vec<_>>(),
+ vec![],
+ "There should be no duplicate localhost entries"
+ );
+}
diff --git a/library/std/src/sys_common/process.rs b/library/std/src/sys_common/process.rs
new file mode 100644
index 000000000..9f978789a
--- /dev/null
+++ b/library/std/src/sys_common/process.rs
@@ -0,0 +1,119 @@
+#![allow(dead_code)]
+#![unstable(feature = "process_internals", issue = "none")]
+
+use crate::collections::BTreeMap;
+use crate::env;
+use crate::ffi::{OsStr, OsString};
+use crate::sys::process::EnvKey;
+
+// Stores a set of changes to an environment
+#[derive(Clone, Debug)]
+pub struct CommandEnv {
+ clear: bool,
+ saw_path: bool,
+ vars: BTreeMap<EnvKey, Option<OsString>>,
+}
+
+impl Default for CommandEnv {
+ fn default() -> Self {
+ CommandEnv { clear: false, saw_path: false, vars: Default::default() }
+ }
+}
+
+impl CommandEnv {
+ // Capture the current environment with these changes applied
+ pub fn capture(&self) -> BTreeMap<EnvKey, OsString> {
+ let mut result = BTreeMap::<EnvKey, OsString>::new();
+ if !self.clear {
+ for (k, v) in env::vars_os() {
+ result.insert(k.into(), v);
+ }
+ }
+ for (k, maybe_v) in &self.vars {
+ if let &Some(ref v) = maybe_v {
+ result.insert(k.clone(), v.clone());
+ } else {
+ result.remove(k);
+ }
+ }
+ result
+ }
+
+ pub fn is_unchanged(&self) -> bool {
+ !self.clear && self.vars.is_empty()
+ }
+
+ pub fn capture_if_changed(&self) -> Option<BTreeMap<EnvKey, OsString>> {
+ if self.is_unchanged() { None } else { Some(self.capture()) }
+ }
+
+ // The following functions build up changes
+ pub fn set(&mut self, key: &OsStr, value: &OsStr) {
+ let key = EnvKey::from(key);
+ self.maybe_saw_path(&key);
+ self.vars.insert(key, Some(value.to_owned()));
+ }
+
+ pub fn remove(&mut self, key: &OsStr) {
+ let key = EnvKey::from(key);
+ self.maybe_saw_path(&key);
+ if self.clear {
+ self.vars.remove(&key);
+ } else {
+ self.vars.insert(key, None);
+ }
+ }
+
+ pub fn clear(&mut self) {
+ self.clear = true;
+ self.vars.clear();
+ }
+
+ pub fn have_changed_path(&self) -> bool {
+ self.saw_path || self.clear
+ }
+
+ fn maybe_saw_path(&mut self, key: &EnvKey) {
+ if !self.saw_path && key == "PATH" {
+ self.saw_path = true;
+ }
+ }
+
+ pub fn iter(&self) -> CommandEnvs<'_> {
+ let iter = self.vars.iter();
+ CommandEnvs { iter }
+ }
+}
+
+/// An iterator over the command environment variables.
+///
+/// This struct is created by
+/// [`Command::get_envs`][crate::process::Command::get_envs]. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "command_access", since = "1.57.0")]
+#[derive(Debug)]
+pub struct CommandEnvs<'a> {
+ iter: crate::collections::btree_map::Iter<'a, EnvKey, Option<OsString>>,
+}
+
+#[stable(feature = "command_access", since = "1.57.0")]
+impl<'a> Iterator for CommandEnvs<'a> {
+ type Item = (&'a OsStr, Option<&'a OsStr>);
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next().map(|(key, value)| (key.as_ref(), value.as_deref()))
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "command_access", since = "1.57.0")]
+impl<'a> ExactSizeIterator for CommandEnvs<'a> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs
new file mode 100644
index 000000000..8921af311
--- /dev/null
+++ b/library/std/src/sys_common/remutex.rs
@@ -0,0 +1,200 @@
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomPinned;
+use crate::ops::Deref;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::pin::Pin;
+use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use crate::sys::locks as sys;
+
+/// A re-entrant mutual exclusion
+///
+/// This mutex will block *other* threads waiting for the lock to become
+/// available. The thread which has already locked the mutex can lock it
+/// multiple times without blocking, preventing a common source of deadlocks.
+///
+/// This is used by stdout().lock() and friends.
+///
+/// ## Implementation details
+///
+/// The 'owner' field tracks which thread has locked the mutex.
+///
+/// We use current_thread_unique_ptr() as the thread identifier,
+/// which is just the address of a thread local variable.
+///
+/// If `owner` is set to the identifier of the current thread,
+/// we assume the mutex is already locked and instead of locking it again,
+/// we increment `lock_count`.
+///
+/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
+/// it reaches zero.
+///
+/// `lock_count` is protected by the mutex and only accessed by the thread that has
+/// locked the mutex, so needs no synchronization.
+///
+/// `owner` can be checked by other threads that want to see if they already
+/// hold the lock, so needs to be atomic. If it compares equal, we're on the
+/// same thread that holds the mutex and memory access can use relaxed ordering
+/// since we're not dealing with multiple threads. If it compares unequal,
+/// synchronization is left to the mutex, making relaxed memory ordering for
+/// the `owner` field fine in all cases.
+pub struct ReentrantMutex<T> {
+ mutex: sys::Mutex,
+ owner: AtomicUsize,
+ lock_count: UnsafeCell<u32>,
+ data: T,
+ _pinned: PhantomPinned,
+}
+
+unsafe impl<T: Send> Send for ReentrantMutex<T> {}
+unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
+
+impl<T> UnwindSafe for ReentrantMutex<T> {}
+impl<T> RefUnwindSafe for ReentrantMutex<T> {}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// Deref implementation.
+///
+/// # Mutability
+///
+/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
+/// because implementation of the trait would violate Rust’s reference aliasing
+/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
+/// guarded data.
+#[must_use = "if unused the ReentrantMutex will immediately unlock"]
+pub struct ReentrantMutexGuard<'a, T: 'a> {
+ lock: Pin<&'a ReentrantMutex<T>>,
+}
+
+impl<T> !Send for ReentrantMutexGuard<'_, T> {}
+
+impl<T> ReentrantMutex<T> {
+ /// Creates a new reentrant mutex in an unlocked state.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because it is required that `init` is called
+ /// once this mutex is in its final resting place, and only then are the
+ /// lock/unlock methods safe.
+ pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
+ ReentrantMutex {
+ mutex: sys::Mutex::new(),
+ owner: AtomicUsize::new(0),
+ lock_count: UnsafeCell::new(0),
+ data: t,
+ _pinned: PhantomPinned,
+ }
+ }
+
+ /// Initializes this mutex so it's ready for use.
+ ///
+ /// # Unsafety
+ ///
+ /// Unsafe to call more than once, and must be called after this will no
+ /// longer move in memory.
+ pub unsafe fn init(self: Pin<&mut Self>) {
+ self.get_unchecked_mut().mutex.init()
+ }
+
+ /// Acquires a mutex, blocking the current thread until it is able to do so.
+ ///
+ /// This function will block the caller until it is available to acquire the mutex.
+ /// Upon returning, the thread is the only thread with the mutex held. When the thread
+ /// calling this method already holds the lock, the call shall succeed without
+ /// blocking.
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return failure if the mutex would otherwise be
+ /// acquired.
+ pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
+ let this_thread = current_thread_unique_ptr();
+ // Safety: We only touch lock_count when we own the lock,
+ // and since self is pinned we can safely call the lock() on the mutex.
+ unsafe {
+ if self.owner.load(Relaxed) == this_thread {
+ self.increment_lock_count();
+ } else {
+ self.mutex.lock();
+ self.owner.store(this_thread, Relaxed);
+ debug_assert_eq!(*self.lock_count.get(), 0);
+ *self.lock_count.get() = 1;
+ }
+ }
+ ReentrantMutexGuard { lock: self }
+ }
+
+ /// Attempts to acquire this lock.
+ ///
+ /// If the lock could not be acquired at this time, then `Err` is returned.
+ /// Otherwise, an RAII guard is returned.
+ ///
+ /// This function does not block.
+ ///
+ /// # Errors
+ ///
+ /// If another user of this mutex panicked while holding the mutex, then
+ /// this call will return failure if the mutex would otherwise be
+ /// acquired.
+ pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
+ let this_thread = current_thread_unique_ptr();
+ // Safety: We only touch lock_count when we own the lock,
+ // and since self is pinned we can safely call the try_lock on the mutex.
+ unsafe {
+ if self.owner.load(Relaxed) == this_thread {
+ self.increment_lock_count();
+ Some(ReentrantMutexGuard { lock: self })
+ } else if self.mutex.try_lock() {
+ self.owner.store(this_thread, Relaxed);
+ debug_assert_eq!(*self.lock_count.get(), 0);
+ *self.lock_count.get() = 1;
+ Some(ReentrantMutexGuard { lock: self })
+ } else {
+ None
+ }
+ }
+ }
+
+ unsafe fn increment_lock_count(&self) {
+ *self.lock_count.get() = (*self.lock_count.get())
+ .checked_add(1)
+ .expect("lock count overflow in reentrant mutex");
+ }
+}
+
+impl<T> Deref for ReentrantMutexGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.lock.data
+ }
+}
+
+impl<T> Drop for ReentrantMutexGuard<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // Safety: We own the lock, and the lock is pinned.
+ unsafe {
+ *self.lock.lock_count.get() -= 1;
+ if *self.lock.lock_count.get() == 0 {
+ self.lock.owner.store(0, Relaxed);
+ self.lock.mutex.unlock();
+ }
+ }
+ }
+}
+
+/// Get an address that is unique per running thread.
+///
+/// This can be used as a non-null usize-sized ID.
+pub fn current_thread_unique_ptr() -> usize {
+ // Use a non-drop type to make sure it's still available during thread destruction.
+ thread_local! { static X: u8 = const { 0 } }
+ X.with(|x| <*const _>::addr(x))
+}
diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs
new file mode 100644
index 000000000..64873b850
--- /dev/null
+++ b/library/std/src/sys_common/remutex/tests.rs
@@ -0,0 +1,77 @@
+use crate::boxed::Box;
+use crate::cell::RefCell;
+use crate::pin::Pin;
+use crate::sync::Arc;
+use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
+use crate::thread;
+
+#[test]
+fn smoke() {
+ let m = unsafe {
+ let mut m = Box::pin(ReentrantMutex::new(()));
+ m.as_mut().init();
+ m
+ };
+ let m = m.as_ref();
+ {
+ let a = m.lock();
+ {
+ let b = m.lock();
+ {
+ let c = m.lock();
+ assert_eq!(*c, ());
+ }
+ assert_eq!(*b, ());
+ }
+ assert_eq!(*a, ());
+ }
+}
+
+#[test]
+fn is_mutex() {
+ let m = unsafe {
+ // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
+ let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+ Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+ Pin::new_unchecked(m)
+ };
+ let m2 = m.clone();
+ let lock = m.as_ref().lock();
+ let child = thread::spawn(move || {
+ let lock = m2.as_ref().lock();
+ assert_eq!(*lock.borrow(), 4950);
+ });
+ for i in 0..100 {
+ let lock = m.as_ref().lock();
+ *lock.borrow_mut() += i;
+ }
+ drop(lock);
+ child.join().unwrap();
+}
+
+#[test]
+fn trylock_works() {
+ let m = unsafe {
+ // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
+ let mut m = Arc::new(ReentrantMutex::new(()));
+ Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+ Pin::new_unchecked(m)
+ };
+ let m2 = m.clone();
+ let _lock = m.as_ref().try_lock();
+ let _lock2 = m.as_ref().try_lock();
+ thread::spawn(move || {
+ let lock = m2.as_ref().try_lock();
+ assert!(lock.is_none());
+ })
+ .join()
+ .unwrap();
+ let _lock3 = m.as_ref().try_lock();
+}
+
+pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
+impl Drop for Answer<'_> {
+ fn drop(&mut self) {
+ *self.0.borrow_mut() = 42;
+ }
+}
diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs
new file mode 100644
index 000000000..ba56f3a8f
--- /dev/null
+++ b/library/std/src/sys_common/rwlock.rs
@@ -0,0 +1,130 @@
+use crate::sys::locks as imp;
+
+/// An OS-based reader-writer lock, meant for use in static variables.
+///
+/// This rwlock does not implement poisoning.
+///
+/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not
+/// implement `Drop` to cleanup resources.
+pub struct StaticRwLock(imp::RwLock);
+
+impl StaticRwLock {
+ /// Creates a new rwlock for use.
+ #[inline]
+ pub const fn new() -> Self {
+ Self(imp::RwLock::new())
+ }
+
+ /// Acquires shared access to the underlying lock, blocking the current
+ /// thread to do so.
+ ///
+ /// The lock is automatically unlocked when the returned guard is dropped.
+ #[inline]
+ pub fn read(&'static self) -> StaticRwLockReadGuard {
+ unsafe { self.0.read() };
+ StaticRwLockReadGuard(&self.0)
+ }
+
+ /// Acquires write access to the underlying lock, blocking the current thread
+ /// to do so.
+ ///
+ /// The lock is automatically unlocked when the returned guard is dropped.
+ #[inline]
+ pub fn write(&'static self) -> StaticRwLockWriteGuard {
+ unsafe { self.0.write() };
+ StaticRwLockWriteGuard(&self.0)
+ }
+}
+
+#[must_use]
+pub struct StaticRwLockReadGuard(&'static imp::RwLock);
+
+impl Drop for StaticRwLockReadGuard {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ self.0.read_unlock();
+ }
+ }
+}
+
+#[must_use]
+pub struct StaticRwLockWriteGuard(&'static imp::RwLock);
+
+impl Drop for StaticRwLockWriteGuard {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ self.0.write_unlock();
+ }
+ }
+}
+
+/// An OS-based reader-writer lock.
+///
+/// This rwlock cleans up its resources in its `Drop` implementation and may
+/// safely be moved (when not borrowed).
+///
+/// This rwlock does not implement poisoning.
+///
+/// This is either a wrapper around `LazyBox<imp::RwLock>` or `imp::RwLock`,
+/// depending on the platform. It is boxed on platforms where `imp::RwLock` may
+/// not be moved.
+pub struct MovableRwLock(imp::MovableRwLock);
+
+impl MovableRwLock {
+ /// Creates a new reader-writer lock for use.
+ #[inline]
+ pub const fn new() -> Self {
+ Self(imp::MovableRwLock::new())
+ }
+
+ /// Acquires shared access to the underlying lock, blocking the current
+ /// thread to do so.
+ #[inline]
+ pub fn read(&self) {
+ unsafe { self.0.read() }
+ }
+
+ /// Attempts to acquire shared access to this lock, returning whether it
+ /// succeeded or not.
+ ///
+ /// This function does not block the current thread.
+ #[inline]
+ pub fn try_read(&self) -> bool {
+ unsafe { self.0.try_read() }
+ }
+
+ /// Acquires write access to the underlying lock, blocking the current thread
+ /// to do so.
+ #[inline]
+ pub fn write(&self) {
+ unsafe { self.0.write() }
+ }
+
+ /// Attempts to acquire exclusive access to this lock, returning whether it
+ /// succeeded or not.
+ ///
+ /// This function does not block the current thread.
+ #[inline]
+ pub fn try_write(&self) -> bool {
+ unsafe { self.0.try_write() }
+ }
+
+ /// Unlocks previously acquired shared access to this lock.
+ ///
+ /// Behavior is undefined if the current thread does not have shared access.
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.0.read_unlock()
+ }
+
+ /// Unlocks previously acquired exclusive access to this lock.
+ ///
+ /// Behavior is undefined if the current thread does not currently have
+ /// exclusive access.
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ self.0.write_unlock()
+ }
+}
diff --git a/library/std/src/sys_common/tests.rs b/library/std/src/sys_common/tests.rs
new file mode 100644
index 000000000..1b6446db5
--- /dev/null
+++ b/library/std/src/sys_common/tests.rs
@@ -0,0 +1,6 @@
+use super::mul_div_u64;
+
+#[test]
+fn test_muldiv() {
+ assert_eq!(mul_div_u64(1_000_000_000_001, 1_000_000_000, 1_000_000), 1_000_000_000_001_000);
+}
diff --git a/library/std/src/sys_common/thread.rs b/library/std/src/sys_common/thread.rs
new file mode 100644
index 000000000..76466b2b3
--- /dev/null
+++ b/library/std/src/sys_common/thread.rs
@@ -0,0 +1,18 @@
+use crate::env;
+use crate::sync::atomic::{self, Ordering};
+use crate::sys::thread as imp;
+
+pub fn min_stack() -> usize {
+ static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+ match MIN.load(Ordering::Relaxed) {
+ 0 => {}
+ n => return n - 1,
+ }
+ let amt = env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok());
+ let amt = amt.unwrap_or(imp::DEFAULT_MIN_STACK_SIZE);
+
+ // 0 is our sentinel value, so ensure that we'll never see 0 after
+ // initialization has run
+ MIN.store(amt + 1, Ordering::Relaxed);
+ amt
+}
diff --git a/library/std/src/sys_common/thread_info.rs b/library/std/src/sys_common/thread_info.rs
new file mode 100644
index 000000000..38c9e5000
--- /dev/null
+++ b/library/std/src/sys_common/thread_info.rs
@@ -0,0 +1,47 @@
+#![allow(dead_code)] // stack_guard isn't used right now on all platforms
+#![allow(unused_unsafe)] // thread_local with `const {}` triggers this liny
+
+use crate::cell::RefCell;
+use crate::sys::thread::guard::Guard;
+use crate::thread::Thread;
+
+struct ThreadInfo {
+ stack_guard: Option<Guard>,
+ thread: Thread,
+}
+
+thread_local! { static THREAD_INFO: RefCell<Option<ThreadInfo>> = const { RefCell::new(None) } }
+
+impl ThreadInfo {
+ fn with<R, F>(f: F) -> Option<R>
+ where
+ F: FnOnce(&mut ThreadInfo) -> R,
+ {
+ THREAD_INFO
+ .try_with(move |thread_info| {
+ let mut thread_info = thread_info.borrow_mut();
+ let thread_info = thread_info.get_or_insert_with(|| ThreadInfo {
+ stack_guard: None,
+ thread: Thread::new(None),
+ });
+ f(thread_info)
+ })
+ .ok()
+ }
+}
+
+pub fn current_thread() -> Option<Thread> {
+ ThreadInfo::with(|info| info.thread.clone())
+}
+
+pub fn stack_guard() -> Option<Guard> {
+ ThreadInfo::with(|info| info.stack_guard.clone()).and_then(|o| o)
+}
+
+pub fn set(stack_guard: Option<Guard>, thread: Thread) {
+ THREAD_INFO.with(move |thread_info| {
+ let mut thread_info = thread_info.borrow_mut();
+ rtassert!(thread_info.is_none());
+ *thread_info = Some(ThreadInfo { stack_guard, thread });
+ });
+}
diff --git a/library/std/src/sys_common/thread_local_dtor.rs b/library/std/src/sys_common/thread_local_dtor.rs
new file mode 100644
index 000000000..1d13a7171
--- /dev/null
+++ b/library/std/src/sys_common/thread_local_dtor.rs
@@ -0,0 +1,49 @@
+//! Thread-local destructor
+//!
+//! Besides thread-local "keys" (pointer-sized non-addressable thread-local store
+//! with an associated destructor), many platforms also provide thread-local
+//! destructors that are not associated with any particular data. These are
+//! often more efficient.
+//!
+//! This module provides a fallback implementation for that interface, based
+//! on the less efficient thread-local "keys". Each platform provides
+//! a `thread_local_dtor` module which will either re-export the fallback,
+//! or implement something more efficient.
+
+#![unstable(feature = "thread_local_internals", issue = "none")]
+#![allow(dead_code)]
+
+use crate::ptr;
+use crate::sys_common::thread_local_key::StaticKey;
+
+pub unsafe fn register_dtor_fallback(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ // The fallback implementation uses a vanilla OS-based TLS key to track
+ // the list of destructors that need to be run for this thread. The key
+ // then has its own destructor which runs all the other destructors.
+ //
+ // The destructor for DTORS is a little special in that it has a `while`
+ // loop to continuously drain the list of registered destructors. It
+ // *should* be the case that this loop always terminates because we
+ // provide the guarantee that a TLS key cannot be set after it is
+ // flagged for destruction.
+
+ static DTORS: StaticKey = StaticKey::new(Some(run_dtors));
+ type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>;
+ if DTORS.get().is_null() {
+ let v: Box<List> = box Vec::new();
+ DTORS.set(Box::into_raw(v) as *mut u8);
+ }
+ let list: &mut List = &mut *(DTORS.get() as *mut List);
+ list.push((t, dtor));
+
+ unsafe extern "C" fn run_dtors(mut ptr: *mut u8) {
+ while !ptr.is_null() {
+ let list: Box<List> = Box::from_raw(ptr as *mut List);
+ for (ptr, dtor) in list.into_iter() {
+ dtor(ptr);
+ }
+ ptr = DTORS.get();
+ DTORS.set(ptr::null_mut());
+ }
+ }
+}
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
new file mode 100644
index 000000000..70beebe86
--- /dev/null
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -0,0 +1,237 @@
+//! OS-based thread local storage
+//!
+//! This module provides an implementation of OS-based thread local storage,
+//! using the native OS-provided facilities (think `TlsAlloc` or
+//! `pthread_setspecific`). The interface of this differs from the other types
+//! of thread-local-storage provided in this crate in that OS-based TLS can only
+//! get/set pointer-sized data, possibly with an associated destructor.
+//!
+//! This module also provides two flavors of TLS. One is intended for static
+//! initialization, and does not contain a `Drop` implementation to deallocate
+//! the OS-TLS key. The other is a type which does implement `Drop` and hence
+//! has a safe interface.
+//!
+//! # Usage
+//!
+//! This module should likely not be used directly unless other primitives are
+//! being built on. Types such as `thread_local::spawn::Key` are likely much
+//! more useful in practice than this OS-based version which likely requires
+//! unsafe code to interoperate with.
+//!
+//! # Examples
+//!
+//! Using a dynamically allocated TLS key. Note that this key can be shared
+//! among many threads via an `Arc`.
+//!
+//! ```ignore (cannot-doctest-private-modules)
+//! let key = Key::new(None);
+//! assert!(key.get().is_null());
+//! key.set(1 as *mut u8);
+//! assert!(!key.get().is_null());
+//!
+//! drop(key); // deallocate this TLS slot.
+//! ```
+//!
+//! Sometimes a statically allocated key is either required or easier to work
+//! with, however.
+//!
+//! ```ignore (cannot-doctest-private-modules)
+//! static KEY: StaticKey = INIT;
+//!
+//! unsafe {
+//! assert!(KEY.get().is_null());
+//! KEY.set(1 as *mut u8);
+//! }
+//! ```
+
+#![allow(non_camel_case_types)]
+#![unstable(feature = "thread_local_internals", issue = "none")]
+#![allow(dead_code)]
+
+#[cfg(test)]
+mod tests;
+
+use crate::sync::atomic::{self, AtomicUsize, Ordering};
+use crate::sys::thread_local_key as imp;
+use crate::sys_common::mutex::StaticMutex;
+
+/// A type for TLS keys that are statically allocated.
+///
+/// This type is entirely `unsafe` to use as it does not protect against
+/// use-after-deallocation or use-during-deallocation.
+///
+/// The actual OS-TLS key is lazily allocated when this is used for the first
+/// time. The key is also deallocated when the Rust runtime exits or `destroy`
+/// is called, whichever comes first.
+///
+/// # Examples
+///
+/// ```ignore (cannot-doctest-private-modules)
+/// use tls::os::{StaticKey, INIT};
+///
+/// static KEY: StaticKey = INIT;
+///
+/// unsafe {
+/// assert!(KEY.get().is_null());
+/// KEY.set(1 as *mut u8);
+/// }
+/// ```
+pub struct StaticKey {
+ /// Inner static TLS key (internals).
+ key: AtomicUsize,
+ /// Destructor for the TLS value.
+ ///
+ /// See `Key::new` for information about when the destructor runs and how
+ /// it runs.
+ dtor: Option<unsafe extern "C" fn(*mut u8)>,
+}
+
+/// A type for a safely managed OS-based TLS slot.
+///
+/// This type allocates an OS TLS key when it is initialized and will deallocate
+/// the key when it falls out of scope. When compared with `StaticKey`, this
+/// type is entirely safe to use.
+///
+/// Implementations will likely, however, contain unsafe code as this type only
+/// operates on `*mut u8`, a raw pointer.
+///
+/// # Examples
+///
+/// ```ignore (cannot-doctest-private-modules)
+/// use tls::os::Key;
+///
+/// let key = Key::new(None);
+/// assert!(key.get().is_null());
+/// key.set(1 as *mut u8);
+/// assert!(!key.get().is_null());
+///
+/// drop(key); // deallocate this TLS slot.
+/// ```
+pub struct Key {
+ key: imp::Key,
+}
+
+/// Constant initialization value for static TLS keys.
+///
+/// This value specifies no destructor by default.
+pub const INIT: StaticKey = StaticKey::new(None);
+
+impl StaticKey {
+ #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
+ pub const fn new(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> StaticKey {
+ StaticKey { key: atomic::AtomicUsize::new(0), dtor }
+ }
+
+ /// Gets the value associated with this TLS key
+ ///
+ /// This will lazily allocate a TLS key from the OS if one has not already
+ /// been allocated.
+ #[inline]
+ pub unsafe fn get(&self) -> *mut u8 {
+ imp::get(self.key())
+ }
+
+ /// Sets this TLS key to a new value.
+ ///
+ /// This will lazily allocate a TLS key from the OS if one has not already
+ /// been allocated.
+ #[inline]
+ pub unsafe fn set(&self, val: *mut u8) {
+ imp::set(self.key(), val)
+ }
+
+ #[inline]
+ unsafe fn key(&self) -> imp::Key {
+ match self.key.load(Ordering::Relaxed) {
+ 0 => self.lazy_init() as imp::Key,
+ n => n as imp::Key,
+ }
+ }
+
+ unsafe fn lazy_init(&self) -> usize {
+ // Currently the Windows implementation of TLS is pretty hairy, and
+ // it greatly simplifies creation if we just synchronize everything.
+ //
+ // Additionally a 0-index of a tls key hasn't been seen on windows, so
+ // we just simplify the whole branch.
+ if imp::requires_synchronized_create() {
+ // We never call `INIT_LOCK.init()`, so it is UB to attempt to
+ // acquire this mutex reentrantly!
+ static INIT_LOCK: StaticMutex = StaticMutex::new();
+ let _guard = INIT_LOCK.lock();
+ let mut key = self.key.load(Ordering::SeqCst);
+ if key == 0 {
+ key = imp::create(self.dtor) as usize;
+ self.key.store(key, Ordering::SeqCst);
+ }
+ rtassert!(key != 0);
+ return key;
+ }
+
+ // POSIX allows the key created here to be 0, but the compare_exchange
+ // below relies on using 0 as a sentinel value to check who won the
+ // race to set the shared TLS key. As far as I know, there is no
+ // guaranteed value that cannot be returned as a posix_key_create key,
+ // so there is no value we can initialize the inner key with to
+ // prove that it has not yet been set. As such, we'll continue using a
+ // value of 0, but with some gyrations to make sure we have a non-0
+ // value returned from the creation routine.
+ // FIXME: this is clearly a hack, and should be cleaned up.
+ let key1 = imp::create(self.dtor);
+ let key = if key1 != 0 {
+ key1
+ } else {
+ let key2 = imp::create(self.dtor);
+ imp::destroy(key1);
+ key2
+ };
+ rtassert!(key != 0);
+ match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) {
+ // The CAS succeeded, so we've created the actual key
+ Ok(_) => key as usize,
+ // If someone beat us to the punch, use their key instead
+ Err(n) => {
+ imp::destroy(key);
+ n
+ }
+ }
+ }
+}
+
+impl Key {
+ /// Creates a new managed OS TLS key.
+ ///
+ /// This key will be deallocated when the key falls out of scope.
+ ///
+ /// The argument provided is an optionally-specified destructor for the
+ /// value of this TLS key. When a thread exits and the value for this key
+ /// is non-null the destructor will be invoked. The TLS value will be reset
+ /// to null before the destructor is invoked.
+ ///
+ /// Note that the destructor will not be run when the `Key` goes out of
+ /// scope.
+ #[inline]
+ pub fn new(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ Key { key: unsafe { imp::create(dtor) } }
+ }
+
+ /// See StaticKey::get
+ #[inline]
+ pub fn get(&self) -> *mut u8 {
+ unsafe { imp::get(self.key) }
+ }
+
+ /// See StaticKey::set
+ #[inline]
+ pub fn set(&self, val: *mut u8) {
+ unsafe { imp::set(self.key, val) }
+ }
+}
+
+impl Drop for Key {
+ fn drop(&mut self) {
+ // Right now Windows doesn't support TLS key destruction, but this also
+ // isn't used anywhere other than tests, so just leak the TLS key.
+ // unsafe { imp::destroy(self.key) }
+ }
+}
diff --git a/library/std/src/sys_common/thread_local_key/tests.rs b/library/std/src/sys_common/thread_local_key/tests.rs
new file mode 100644
index 000000000..968738a41
--- /dev/null
+++ b/library/std/src/sys_common/thread_local_key/tests.rs
@@ -0,0 +1,34 @@
+use super::{Key, StaticKey};
+
+fn assert_sync<T: Sync>() {}
+fn assert_send<T: Send>() {}
+
+#[test]
+fn smoke() {
+ assert_sync::<Key>();
+ assert_send::<Key>();
+
+ let k1 = Key::new(None);
+ let k2 = Key::new(None);
+ assert!(k1.get().is_null());
+ assert!(k2.get().is_null());
+ k1.set(1 as *mut _);
+ k2.set(2 as *mut _);
+ assert_eq!(k1.get() as usize, 1);
+ assert_eq!(k2.get() as usize, 2);
+}
+
+#[test]
+fn statik() {
+ static K1: StaticKey = StaticKey::new(None);
+ static K2: StaticKey = StaticKey::new(None);
+
+ unsafe {
+ assert!(K1.get().is_null());
+ assert!(K2.get().is_null());
+ K1.set(1 as *mut _);
+ K2.set(2 as *mut _);
+ assert_eq!(K1.get() as usize, 1);
+ assert_eq!(K2.get() as usize, 2);
+ }
+}
diff --git a/library/std/src/sys_common/thread_parker/futex.rs b/library/std/src/sys_common/thread_parker/futex.rs
new file mode 100644
index 000000000..d9e2f39e3
--- /dev/null
+++ b/library/std/src/sys_common/thread_parker/futex.rs
@@ -0,0 +1,97 @@
+use crate::pin::Pin;
+use crate::sync::atomic::AtomicU32;
+use crate::sync::atomic::Ordering::{Acquire, Release};
+use crate::sys::futex::{futex_wait, futex_wake};
+use crate::time::Duration;
+
+const PARKED: u32 = u32::MAX;
+const EMPTY: u32 = 0;
+const NOTIFIED: u32 = 1;
+
+pub struct Parker {
+ state: AtomicU32,
+}
+
+// Notes about memory ordering:
+//
+// Memory ordering is only relevant for the relative ordering of operations
+// between different variables. Even Ordering::Relaxed guarantees a
+// monotonic/consistent order when looking at just a single atomic variable.
+//
+// So, since this parker is just a single atomic variable, we only need to look
+// at the ordering guarantees we need to provide to the 'outside world'.
+//
+// The only memory ordering guarantee that parking and unparking provide, is
+// that things which happened before unpark() are visible on the thread
+// returning from park() afterwards. Otherwise, it was effectively unparked
+// before unpark() was called while still consuming the 'token'.
+//
+// In other words, unpark() needs to synchronize with the part of park() that
+// consumes the token and returns.
+//
+// This is done with a release-acquire synchronization, by using
+// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
+// Ordering::Acquire when checking for this state in park().
+impl Parker {
+ /// Construct the futex parker. The UNIX parker implementation
+ /// requires this to happen in-place.
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Self { state: AtomicU32::new(EMPTY) });
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+ loop {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ futex_wait(&self.state, PARKED, None);
+ // Change NOTIFIED=>EMPTY and return in that case.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
+ return;
+ } else {
+ // Spurious wake up. We loop to try again.
+ }
+ }
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`. This implementation doesn't
+ // require `Pin`, but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+ // Wait for something to happen, assuming it's still set to PARKED.
+ futex_wait(&self.state, PARKED, Some(timeout));
+ // This is not just a store, because we need to establish a
+ // release-acquire ordering with unpark().
+ if self.state.swap(EMPTY, Acquire) == NOTIFIED {
+ // Woke up because of unpark().
+ } else {
+ // Timeout or spurious wake up.
+ // We return either way, because we can't easily tell if it was the
+ // timeout or not.
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ #[inline]
+ pub fn unpark(self: Pin<&Self>) {
+ // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
+ // wake the thread in the first case.
+ //
+ // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
+ // purpose, to make sure every unpark() has a release-acquire ordering
+ // with park().
+ if self.state.swap(NOTIFIED, Release) == PARKED {
+ futex_wake(&self.state);
+ }
+ }
+}
diff --git a/library/std/src/sys_common/thread_parker/generic.rs b/library/std/src/sys_common/thread_parker/generic.rs
new file mode 100644
index 000000000..f3d8b34d3
--- /dev/null
+++ b/library/std/src/sys_common/thread_parker/generic.rs
@@ -0,0 +1,125 @@
+//! Parker implementation based on a Mutex and Condvar.
+
+use crate::pin::Pin;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::{Condvar, Mutex};
+use crate::time::Duration;
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+pub struct Parker {
+ state: AtomicUsize,
+ lock: Mutex<()>,
+ cvar: Condvar,
+}
+
+impl Parker {
+ /// Construct the generic parker. The UNIX parker implementation
+ /// requires this to happen in-place.
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Parker {
+ state: AtomicUsize::new(EMPTY),
+ lock: Mutex::new(()),
+ cvar: Condvar::new(),
+ });
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ let mut m = self.lock.lock().unwrap();
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => panic!("inconsistent park state"),
+ }
+ loop {
+ m = self.cvar.wait(m).unwrap();
+ match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+ Ok(_) => return, // got a notification
+ Err(_) => {} // spurious wakeup, go back to sleep
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread, and
+ // afterwards we start coordinating for a sleep.
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+ let m = self.lock.lock().unwrap();
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => panic!("inconsistent park_timeout state"),
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ let (_m, _result) = self.cvar.wait_timeout(m, dur).unwrap();
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => {} // got a notification, hurray!
+ PARKED => {} // no notification, alas
+ n => panic!("inconsistent park_timeout state: {n}"),
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ pub fn unpark(self: Pin<&Self>) {
+ // To ensure the unparked thread will observe any writes we made
+ // before this call, we must perform a release operation that `park`
+ // can synchronize with. To do that we must write `NOTIFIED` even if
+ // `state` is already `NOTIFIED`. That is why this must be a swap
+ // rather than a compare-and-swap that returns if it reads `NOTIFIED`
+ // on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ drop(self.lock.lock().unwrap());
+ self.cvar.notify_one()
+ }
+}
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
new file mode 100644
index 000000000..cbd7832eb
--- /dev/null
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -0,0 +1,22 @@
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_arch = "wasm32", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+ ))] {
+ mod futex;
+ pub use futex::Parker;
+ } else if #[cfg(target_os = "solid_asp3")] {
+ mod wait_flag;
+ pub use wait_flag::Parker;
+ } else if #[cfg(any(windows, target_family = "unix"))] {
+ pub use crate::sys::thread_parker::Parker;
+ } else {
+ mod generic;
+ pub use generic::Parker;
+ }
+}
diff --git a/library/std/src/sys_common/thread_parker/wait_flag.rs b/library/std/src/sys_common/thread_parker/wait_flag.rs
new file mode 100644
index 000000000..6561c1866
--- /dev/null
+++ b/library/std/src/sys_common/thread_parker/wait_flag.rs
@@ -0,0 +1,102 @@
+//! A wait-flag-based thread parker.
+//!
+//! Some operating systems provide low-level parking primitives like wait counts,
+//! event flags or semaphores which are not susceptible to race conditions (meaning
+//! the wakeup can occur before the wait operation). To implement the `std` thread
+//! parker on top of these primitives, we only have to ensure that parking is fast
+//! when the thread token is available, the atomic ordering guarantees are maintained
+//! and spurious wakeups are minimized.
+//!
+//! To achieve this, this parker uses an atomic variable with three states: `EMPTY`,
+//! `PARKED` and `NOTIFIED`:
+//! * `EMPTY` means the token has not been made available, but the thread is not
+//! currently waiting on it.
+//! * `PARKED` means the token is not available and the thread is parked.
+//! * `NOTIFIED` means the token is available.
+//!
+//! `park` and `park_timeout` change the state from `EMPTY` to `PARKED` and from
+//! `NOTIFIED` to `EMPTY`. If the state was `NOTIFIED`, the thread was unparked and
+//! execution can continue without calling into the OS. If the state was `EMPTY`,
+//! the token is not available and the thread waits on the primitive (here called
+//! "wait flag").
+//!
+//! `unpark` changes the state to `NOTIFIED`. If the state was `PARKED`, the thread
+//! is or will be sleeping on the wait flag, so we raise it.
+
+use crate::pin::Pin;
+use crate::sync::atomic::AtomicI8;
+use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+use crate::sys::wait_flag::WaitFlag;
+use crate::time::Duration;
+
+const EMPTY: i8 = 0;
+const PARKED: i8 = -1;
+const NOTIFIED: i8 = 1;
+
+pub struct Parker {
+ state: AtomicI8,
+ wait_flag: WaitFlag,
+}
+
+impl Parker {
+ /// Construct a parker for the current thread. The UNIX parker
+ /// implementation requires this to happen in-place.
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() })
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ match self.state.fetch_sub(1, Acquire) {
+ // NOTIFIED => EMPTY
+ NOTIFIED => return,
+ // EMPTY => PARKED
+ EMPTY => (),
+ _ => panic!("inconsistent park state"),
+ }
+
+ // Avoid waking up from spurious wakeups (these are quite likely, see below).
+ loop {
+ self.wait_flag.wait();
+
+ match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) {
+ Ok(_) => return,
+ Err(PARKED) => (),
+ Err(_) => panic!("inconsistent park state"),
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ match self.state.fetch_sub(1, Acquire) {
+ NOTIFIED => return,
+ EMPTY => (),
+ _ => panic!("inconsistent park state"),
+ }
+
+ self.wait_flag.wait_timeout(dur);
+
+ // Either a wakeup or a timeout occurred. Wakeups may be spurious, as there can be
+ // a race condition when `unpark` is performed between receiving the timeout and
+ // resetting the state, resulting in the eventflag being set unnecessarily. `park`
+ // is protected against this by looping until the token is actually given, but
+ // here we cannot easily tell.
+
+ // Use `swap` to provide acquire ordering.
+ match self.state.swap(EMPTY, Acquire) {
+ NOTIFIED => (),
+ PARKED => (),
+ _ => panic!("inconsistent park state"),
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+
+ if state == PARKED {
+ self.wait_flag.raise();
+ }
+ }
+}
diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs
new file mode 100644
index 000000000..57fa49893
--- /dev/null
+++ b/library/std/src/sys_common/wtf8.rs
@@ -0,0 +1,926 @@
+//! Implementation of [the WTF-8 encoding](https://simonsapin.github.io/wtf-8/).
+//!
+//! This library uses Rust’s type system to maintain
+//! [well-formedness](https://simonsapin.github.io/wtf-8/#well-formed),
+//! like the `String` and `&str` types do for UTF-8.
+//!
+//! Since [WTF-8 must not be used
+//! for interchange](https://simonsapin.github.io/wtf-8/#intended-audience),
+//! this library deliberately does not provide access to the underlying bytes
+//! of WTF-8 strings,
+//! nor can it decode WTF-8 from arbitrary bytes.
+//! WTF-8 strings can be obtained from UTF-8, UTF-16, or code points.
+
+// this module is imported from @SimonSapin's repo and has tons of dead code on
+// unix (it's mostly used on windows), so don't worry about dead code here.
+#![allow(dead_code)]
+
+#[cfg(test)]
+mod tests;
+
+use core::str::next_code_point;
+
+use crate::borrow::Cow;
+use crate::char;
+use crate::collections::TryReserveError;
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::iter::FusedIterator;
+use crate::mem;
+use crate::ops;
+use crate::rc::Rc;
+use crate::slice;
+use crate::str;
+use crate::sync::Arc;
+use crate::sys_common::AsInner;
+
+const UTF8_REPLACEMENT_CHARACTER: &str = "\u{FFFD}";
+
+/// A Unicode code point: from U+0000 to U+10FFFF.
+///
+/// Compares with the `char` type,
+/// which represents a Unicode scalar value:
+/// a code point that is not a surrogate (U+D800 to U+DFFF).
+#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy)]
+pub struct CodePoint {
+ value: u32,
+}
+
+/// Format the code point as `U+` followed by four to six hexadecimal digits.
+/// Example: `U+1F4A9`
+impl fmt::Debug for CodePoint {
+ #[inline]
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(formatter, "U+{:04X}", self.value)
+ }
+}
+
+impl CodePoint {
+ /// Unsafely creates a new `CodePoint` without checking the value.
+ ///
+ /// Only use when `value` is known to be less than or equal to 0x10FFFF.
+ #[inline]
+ pub unsafe fn from_u32_unchecked(value: u32) -> CodePoint {
+ CodePoint { value }
+ }
+
+ /// Creates a new `CodePoint` if the value is a valid code point.
+ ///
+ /// Returns `None` if `value` is above 0x10FFFF.
+ #[inline]
+ pub fn from_u32(value: u32) -> Option<CodePoint> {
+ match value {
+ 0..=0x10FFFF => Some(CodePoint { value }),
+ _ => None,
+ }
+ }
+
+ /// Creates a new `CodePoint` from a `char`.
+ ///
+ /// Since all Unicode scalar values are code points, this always succeeds.
+ #[inline]
+ pub fn from_char(value: char) -> CodePoint {
+ CodePoint { value: value as u32 }
+ }
+
+ /// Returns the numeric value of the code point.
+ #[inline]
+ pub fn to_u32(&self) -> u32 {
+ self.value
+ }
+
+ /// Optionally returns a Unicode scalar value for the code point.
+ ///
+ /// Returns `None` if the code point is a surrogate (from U+D800 to U+DFFF).
+ #[inline]
+ pub fn to_char(&self) -> Option<char> {
+ match self.value {
+ 0xD800..=0xDFFF => None,
+ _ => Some(unsafe { char::from_u32_unchecked(self.value) }),
+ }
+ }
+
+ /// Returns a Unicode scalar value for the code point.
+ ///
+ /// Returns `'\u{FFFD}'` (the replacement character “�”)
+ /// if the code point is a surrogate (from U+D800 to U+DFFF).
+ #[inline]
+ pub fn to_char_lossy(&self) -> char {
+ self.to_char().unwrap_or('\u{FFFD}')
+ }
+}
+
+/// An owned, growable string of well-formed WTF-8 data.
+///
+/// Similar to `String`, but can additionally contain surrogate code points
+/// if they’re not in a surrogate pair.
+#[derive(Eq, PartialEq, Ord, PartialOrd, Clone)]
+pub struct Wtf8Buf {
+ bytes: Vec<u8>,
+}
+
+impl ops::Deref for Wtf8Buf {
+ type Target = Wtf8;
+
+ fn deref(&self) -> &Wtf8 {
+ self.as_slice()
+ }
+}
+
+impl ops::DerefMut for Wtf8Buf {
+ fn deref_mut(&mut self) -> &mut Wtf8 {
+ self.as_mut_slice()
+ }
+}
+
+/// Format the string with double quotes,
+/// and surrogates as `\u` followed by four hexadecimal digits.
+/// Example: `"a\u{D800}"` for a string with code points [U+0061, U+D800]
+impl fmt::Debug for Wtf8Buf {
+ #[inline]
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, formatter)
+ }
+}
+
+impl Wtf8Buf {
+ /// Creates a new, empty WTF-8 string.
+ #[inline]
+ pub fn new() -> Wtf8Buf {
+ Wtf8Buf { bytes: Vec::new() }
+ }
+
+ /// Creates a new, empty WTF-8 string with pre-allocated capacity for `capacity` bytes.
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Wtf8Buf {
+ Wtf8Buf { bytes: Vec::with_capacity(capacity) }
+ }
+
+ /// Creates a WTF-8 string from a UTF-8 `String`.
+ ///
+ /// This takes ownership of the `String` and does not copy.
+ ///
+ /// Since WTF-8 is a superset of UTF-8, this always succeeds.
+ #[inline]
+ pub fn from_string(string: String) -> Wtf8Buf {
+ Wtf8Buf { bytes: string.into_bytes() }
+ }
+
+ /// Creates a WTF-8 string from a UTF-8 `&str` slice.
+ ///
+ /// This copies the content of the slice.
+ ///
+ /// Since WTF-8 is a superset of UTF-8, this always succeeds.
+ #[inline]
+ pub fn from_str(str: &str) -> Wtf8Buf {
+ Wtf8Buf { bytes: <[_]>::to_vec(str.as_bytes()) }
+ }
+
+ pub fn clear(&mut self) {
+ self.bytes.clear()
+ }
+
+ /// Creates a WTF-8 string from a potentially ill-formed UTF-16 slice of 16-bit code units.
+ ///
+ /// This is lossless: calling `.encode_wide()` on the resulting string
+ /// will always return the original code units.
+ pub fn from_wide(v: &[u16]) -> Wtf8Buf {
+ let mut string = Wtf8Buf::with_capacity(v.len());
+ for item in char::decode_utf16(v.iter().cloned()) {
+ match item {
+ Ok(ch) => string.push_char(ch),
+ Err(surrogate) => {
+ let surrogate = surrogate.unpaired_surrogate();
+ // Surrogates are known to be in the code point range.
+ let code_point = unsafe { CodePoint::from_u32_unchecked(surrogate as u32) };
+ // Skip the WTF-8 concatenation check,
+ // surrogate pairs are already decoded by decode_utf16
+ string.push_code_point_unchecked(code_point)
+ }
+ }
+ }
+ string
+ }
+
+ /// Copied from String::push
+ /// This does **not** include the WTF-8 concatenation check.
+ fn push_code_point_unchecked(&mut self, code_point: CodePoint) {
+ let mut bytes = [0; 4];
+ let bytes = char::encode_utf8_raw(code_point.value, &mut bytes);
+ self.bytes.extend_from_slice(bytes)
+ }
+
+ #[inline]
+ pub fn as_slice(&self) -> &Wtf8 {
+ unsafe { Wtf8::from_bytes_unchecked(&self.bytes) }
+ }
+
+ #[inline]
+ pub fn as_mut_slice(&mut self) -> &mut Wtf8 {
+ unsafe { Wtf8::from_mut_bytes_unchecked(&mut self.bytes) }
+ }
+
+ /// Reserves capacity for at least `additional` more bytes to be inserted
+ /// in the given `Wtf8Buf`.
+ /// The collection may reserve more space to avoid frequent reallocations.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ self.bytes.reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more length units
+ /// in the given `Wtf8Buf`. The `Wtf8Buf` may reserve more space to avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ #[inline]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.bytes.try_reserve(additional)
+ }
+
+ #[inline]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.bytes.reserve_exact(additional)
+ }
+
+ /// Tries to reserve the minimum capacity for exactly `additional`
+ /// length units in the given `Wtf8Buf`. After calling
+ /// `try_reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the `Wtf8Buf` more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: Wtf8Buf::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ #[inline]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.bytes.try_reserve_exact(additional)
+ }
+
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.bytes.shrink_to_fit()
+ }
+
+ #[inline]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.bytes.shrink_to(min_capacity)
+ }
+
+ /// Returns the number of bytes that this string buffer can hold without reallocating.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.bytes.capacity()
+ }
+
+ /// Append a UTF-8 slice at the end of the string.
+ #[inline]
+ pub fn push_str(&mut self, other: &str) {
+ self.bytes.extend_from_slice(other.as_bytes())
+ }
+
+ /// Append a WTF-8 slice at the end of the string.
+ ///
+ /// This replaces newly paired surrogates at the boundary
+ /// with a supplementary code point,
+ /// like concatenating ill-formed UTF-16 strings effectively would.
+ #[inline]
+ pub fn push_wtf8(&mut self, other: &Wtf8) {
+ match ((&*self).final_lead_surrogate(), other.initial_trail_surrogate()) {
+ // Replace newly paired surrogates by a supplementary code point.
+ (Some(lead), Some(trail)) => {
+ let len_without_lead_surrogate = self.len() - 3;
+ self.bytes.truncate(len_without_lead_surrogate);
+ let other_without_trail_surrogate = &other.bytes[3..];
+ // 4 bytes for the supplementary code point
+ self.bytes.reserve(4 + other_without_trail_surrogate.len());
+ self.push_char(decode_surrogate_pair(lead, trail));
+ self.bytes.extend_from_slice(other_without_trail_surrogate);
+ }
+ _ => self.bytes.extend_from_slice(&other.bytes),
+ }
+ }
+
+ /// Append a Unicode scalar value at the end of the string.
+ #[inline]
+ pub fn push_char(&mut self, c: char) {
+ self.push_code_point_unchecked(CodePoint::from_char(c))
+ }
+
+ /// Append a code point at the end of the string.
+ ///
+ /// This replaces newly paired surrogates at the boundary
+ /// with a supplementary code point,
+ /// like concatenating ill-formed UTF-16 strings effectively would.
+ #[inline]
+ pub fn push(&mut self, code_point: CodePoint) {
+ if let trail @ 0xDC00..=0xDFFF = code_point.to_u32() {
+ if let Some(lead) = (&*self).final_lead_surrogate() {
+ let len_without_lead_surrogate = self.len() - 3;
+ self.bytes.truncate(len_without_lead_surrogate);
+ self.push_char(decode_surrogate_pair(lead, trail as u16));
+ return;
+ }
+ }
+
+ // No newly paired surrogates at the boundary.
+ self.push_code_point_unchecked(code_point)
+ }
+
+ /// Shortens a string to the specified length.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `new_len` > current length,
+ /// or if `new_len` is not a code point boundary.
+ #[inline]
+ pub fn truncate(&mut self, new_len: usize) {
+ assert!(is_code_point_boundary(self, new_len));
+ self.bytes.truncate(new_len)
+ }
+
+ /// Consumes the WTF-8 string and tries to convert it to UTF-8.
+ ///
+ /// This does not copy the data.
+ ///
+ /// If the contents are not well-formed UTF-8
+ /// (that is, if the string contains surrogates),
+ /// the original WTF-8 string is returned instead.
+ pub fn into_string(self) -> Result<String, Wtf8Buf> {
+ match self.next_surrogate(0) {
+ None => Ok(unsafe { String::from_utf8_unchecked(self.bytes) }),
+ Some(_) => Err(self),
+ }
+ }
+
+ /// Consumes the WTF-8 string and converts it lossily to UTF-8.
+ ///
+ /// This does not copy the data (but may overwrite parts of it in place).
+ ///
+ /// Surrogates are replaced with `"\u{FFFD}"` (the replacement character “�”)
+ pub fn into_string_lossy(mut self) -> String {
+ let mut pos = 0;
+ loop {
+ match self.next_surrogate(pos) {
+ Some((surrogate_pos, _)) => {
+ pos = surrogate_pos + 3;
+ self.bytes[surrogate_pos..pos]
+ .copy_from_slice(UTF8_REPLACEMENT_CHARACTER.as_bytes());
+ }
+ None => return unsafe { String::from_utf8_unchecked(self.bytes) },
+ }
+ }
+ }
+
+ /// Converts this `Wtf8Buf` into a boxed `Wtf8`.
+ #[inline]
+ pub fn into_box(self) -> Box<Wtf8> {
+ unsafe { mem::transmute(self.bytes.into_boxed_slice()) }
+ }
+
+ /// Converts a `Box<Wtf8>` into a `Wtf8Buf`.
+ pub fn from_box(boxed: Box<Wtf8>) -> Wtf8Buf {
+ let bytes: Box<[u8]> = unsafe { mem::transmute(boxed) };
+ Wtf8Buf { bytes: bytes.into_vec() }
+ }
+}
+
+/// Creates a new WTF-8 string from an iterator of code points.
+///
+/// This replaces surrogate code point pairs with supplementary code points,
+/// like concatenating ill-formed UTF-16 strings effectively would.
+impl FromIterator<CodePoint> for Wtf8Buf {
+ fn from_iter<T: IntoIterator<Item = CodePoint>>(iter: T) -> Wtf8Buf {
+ let mut string = Wtf8Buf::new();
+ string.extend(iter);
+ string
+ }
+}
+
+/// Append code points from an iterator to the string.
+///
+/// This replaces surrogate code point pairs with supplementary code points,
+/// like concatenating ill-formed UTF-16 strings effectively would.
+impl Extend<CodePoint> for Wtf8Buf {
+ fn extend<T: IntoIterator<Item = CodePoint>>(&mut self, iter: T) {
+ let iterator = iter.into_iter();
+ let (low, _high) = iterator.size_hint();
+ // Lower bound of one byte per code point (ASCII only)
+ self.bytes.reserve(low);
+ iterator.for_each(move |code_point| self.push(code_point));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, code_point: CodePoint) {
+ self.push(code_point);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ // Lower bound of one byte per code point (ASCII only)
+ self.bytes.reserve(additional);
+ }
+}
+
+/// A borrowed slice of well-formed WTF-8 data.
+///
+/// Similar to `&str`, but can additionally contain surrogate code points
+/// if they’re not in a surrogate pair.
+#[derive(Eq, Ord, PartialEq, PartialOrd)]
+pub struct Wtf8 {
+ bytes: [u8],
+}
+
+impl AsInner<[u8]> for Wtf8 {
+ fn as_inner(&self) -> &[u8] {
+ &self.bytes
+ }
+}
+
+/// Format the slice with double quotes,
+/// and surrogates as `\u` followed by four hexadecimal digits.
+/// Example: `"a\u{D800}"` for a slice with code points [U+0061, U+D800]
+impl fmt::Debug for Wtf8 {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fn write_str_escaped(f: &mut fmt::Formatter<'_>, s: &str) -> fmt::Result {
+ use crate::fmt::Write;
+ for c in s.chars().flat_map(|c| c.escape_debug()) {
+ f.write_char(c)?
+ }
+ Ok(())
+ }
+
+ formatter.write_str("\"")?;
+ let mut pos = 0;
+ while let Some((surrogate_pos, surrogate)) = self.next_surrogate(pos) {
+ write_str_escaped(formatter, unsafe {
+ str::from_utf8_unchecked(&self.bytes[pos..surrogate_pos])
+ })?;
+ write!(formatter, "\\u{{{:x}}}", surrogate)?;
+ pos = surrogate_pos + 3;
+ }
+ write_str_escaped(formatter, unsafe { str::from_utf8_unchecked(&self.bytes[pos..]) })?;
+ formatter.write_str("\"")
+ }
+}
+
+impl fmt::Display for Wtf8 {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let wtf8_bytes = &self.bytes;
+ let mut pos = 0;
+ loop {
+ match self.next_surrogate(pos) {
+ Some((surrogate_pos, _)) => {
+ formatter.write_str(unsafe {
+ str::from_utf8_unchecked(&wtf8_bytes[pos..surrogate_pos])
+ })?;
+ formatter.write_str(UTF8_REPLACEMENT_CHARACTER)?;
+ pos = surrogate_pos + 3;
+ }
+ None => {
+ let s = unsafe { str::from_utf8_unchecked(&wtf8_bytes[pos..]) };
+ if pos == 0 { return s.fmt(formatter) } else { return formatter.write_str(s) }
+ }
+ }
+ }
+ }
+}
+
+impl Wtf8 {
+ /// Creates a WTF-8 slice from a UTF-8 `&str` slice.
+ ///
+ /// Since WTF-8 is a superset of UTF-8, this always succeeds.
+ #[inline]
+ pub fn from_str(value: &str) -> &Wtf8 {
+ unsafe { Wtf8::from_bytes_unchecked(value.as_bytes()) }
+ }
+
+ /// Creates a WTF-8 slice from a WTF-8 byte slice.
+ ///
+ /// Since the byte slice is not checked for valid WTF-8, this functions is
+ /// marked unsafe.
+ #[inline]
+ unsafe fn from_bytes_unchecked(value: &[u8]) -> &Wtf8 {
+ mem::transmute(value)
+ }
+
+ /// Creates a mutable WTF-8 slice from a mutable WTF-8 byte slice.
+ ///
+ /// Since the byte slice is not checked for valid WTF-8, this functions is
+ /// marked unsafe.
+ #[inline]
+ unsafe fn from_mut_bytes_unchecked(value: &mut [u8]) -> &mut Wtf8 {
+ mem::transmute(value)
+ }
+
+ /// Returns the length, in WTF-8 bytes.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.bytes.len()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.bytes.is_empty()
+ }
+
+ /// Returns the code point at `position` if it is in the ASCII range,
+ /// or `b'\xFF' otherwise.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `position` is beyond the end of the string.
+ #[inline]
+ pub fn ascii_byte_at(&self, position: usize) -> u8 {
+ match self.bytes[position] {
+ ascii_byte @ 0x00..=0x7F => ascii_byte,
+ _ => 0xFF,
+ }
+ }
+
+ /// Returns an iterator for the string’s code points.
+ #[inline]
+ pub fn code_points(&self) -> Wtf8CodePoints<'_> {
+ Wtf8CodePoints { bytes: self.bytes.iter() }
+ }
+
+ /// Tries to convert the string to UTF-8 and return a `&str` slice.
+ ///
+ /// Returns `None` if the string contains surrogates.
+ ///
+ /// This does not copy the data.
+ #[inline]
+ pub fn as_str(&self) -> Option<&str> {
+ // Well-formed WTF-8 is also well-formed UTF-8
+ // if and only if it contains no surrogate.
+ match self.next_surrogate(0) {
+ None => Some(unsafe { str::from_utf8_unchecked(&self.bytes) }),
+ Some(_) => None,
+ }
+ }
+
+ /// Lossily converts the string to UTF-8.
+ /// Returns a UTF-8 `&str` slice if the contents are well-formed in UTF-8.
+ ///
+ /// Surrogates are replaced with `"\u{FFFD}"` (the replacement character “�”).
+ ///
+ /// This only copies the data if necessary (if it contains any surrogate).
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ let surrogate_pos = match self.next_surrogate(0) {
+ None => return Cow::Borrowed(unsafe { str::from_utf8_unchecked(&self.bytes) }),
+ Some((pos, _)) => pos,
+ };
+ let wtf8_bytes = &self.bytes;
+ let mut utf8_bytes = Vec::with_capacity(self.len());
+ utf8_bytes.extend_from_slice(&wtf8_bytes[..surrogate_pos]);
+ utf8_bytes.extend_from_slice(UTF8_REPLACEMENT_CHARACTER.as_bytes());
+ let mut pos = surrogate_pos + 3;
+ loop {
+ match self.next_surrogate(pos) {
+ Some((surrogate_pos, _)) => {
+ utf8_bytes.extend_from_slice(&wtf8_bytes[pos..surrogate_pos]);
+ utf8_bytes.extend_from_slice(UTF8_REPLACEMENT_CHARACTER.as_bytes());
+ pos = surrogate_pos + 3;
+ }
+ None => {
+ utf8_bytes.extend_from_slice(&wtf8_bytes[pos..]);
+ return Cow::Owned(unsafe { String::from_utf8_unchecked(utf8_bytes) });
+ }
+ }
+ }
+ }
+
+ /// Converts the WTF-8 string to potentially ill-formed UTF-16
+ /// and return an iterator of 16-bit code units.
+ ///
+ /// This is lossless:
+ /// calling `Wtf8Buf::from_ill_formed_utf16` on the resulting code units
+ /// would always return the original WTF-8 string.
+ #[inline]
+ pub fn encode_wide(&self) -> EncodeWide<'_> {
+ EncodeWide { code_points: self.code_points(), extra: 0 }
+ }
+
+ #[inline]
+ fn next_surrogate(&self, mut pos: usize) -> Option<(usize, u16)> {
+ let mut iter = self.bytes[pos..].iter();
+ loop {
+ let b = *iter.next()?;
+ if b < 0x80 {
+ pos += 1;
+ } else if b < 0xE0 {
+ iter.next();
+ pos += 2;
+ } else if b == 0xED {
+ match (iter.next(), iter.next()) {
+ (Some(&b2), Some(&b3)) if b2 >= 0xA0 => {
+ return Some((pos, decode_surrogate(b2, b3)));
+ }
+ _ => pos += 3,
+ }
+ } else if b < 0xF0 {
+ iter.next();
+ iter.next();
+ pos += 3;
+ } else {
+ iter.next();
+ iter.next();
+ iter.next();
+ pos += 4;
+ }
+ }
+ }
+
+ #[inline]
+ fn final_lead_surrogate(&self) -> Option<u16> {
+ match self.bytes {
+ [.., 0xED, b2 @ 0xA0..=0xAF, b3] => Some(decode_surrogate(b2, b3)),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ fn initial_trail_surrogate(&self) -> Option<u16> {
+ match self.bytes {
+ [0xED, b2 @ 0xB0..=0xBF, b3, ..] => Some(decode_surrogate(b2, b3)),
+ _ => None,
+ }
+ }
+
+ pub fn clone_into(&self, buf: &mut Wtf8Buf) {
+ self.bytes.clone_into(&mut buf.bytes)
+ }
+
+ /// Boxes this `Wtf8`.
+ #[inline]
+ pub fn into_box(&self) -> Box<Wtf8> {
+ let boxed: Box<[u8]> = self.bytes.into();
+ unsafe { mem::transmute(boxed) }
+ }
+
+ /// Creates a boxed, empty `Wtf8`.
+ pub fn empty_box() -> Box<Wtf8> {
+ let boxed: Box<[u8]> = Default::default();
+ unsafe { mem::transmute(boxed) }
+ }
+
+ #[inline]
+ pub fn into_arc(&self) -> Arc<Wtf8> {
+ let arc: Arc<[u8]> = Arc::from(&self.bytes);
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Wtf8) }
+ }
+
+ #[inline]
+ pub fn into_rc(&self) -> Rc<Wtf8> {
+ let rc: Rc<[u8]> = Rc::from(&self.bytes);
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Wtf8) }
+ }
+
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ self.bytes.make_ascii_lowercase()
+ }
+
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ self.bytes.make_ascii_uppercase()
+ }
+
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Wtf8Buf {
+ Wtf8Buf { bytes: self.bytes.to_ascii_lowercase() }
+ }
+
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Wtf8Buf {
+ Wtf8Buf { bytes: self.bytes.to_ascii_uppercase() }
+ }
+
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ self.bytes.is_ascii()
+ }
+
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
+ self.bytes.eq_ignore_ascii_case(&other.bytes)
+ }
+}
+
+/// Returns a slice of the given string for the byte range \[`begin`..`end`).
+///
+/// # Panics
+///
+/// Panics when `begin` and `end` do not point to code point boundaries,
+/// or point beyond the end of the string.
+impl ops::Index<ops::Range<usize>> for Wtf8 {
+ type Output = Wtf8;
+
+ #[inline]
+ fn index(&self, range: ops::Range<usize>) -> &Wtf8 {
+ // is_code_point_boundary checks that the index is in [0, .len()]
+ if range.start <= range.end
+ && is_code_point_boundary(self, range.start)
+ && is_code_point_boundary(self, range.end)
+ {
+ unsafe { slice_unchecked(self, range.start, range.end) }
+ } else {
+ slice_error_fail(self, range.start, range.end)
+ }
+ }
+}
+
+/// Returns a slice of the given string from byte `begin` to its end.
+///
+/// # Panics
+///
+/// Panics when `begin` is not at a code point boundary,
+/// or is beyond the end of the string.
+impl ops::Index<ops::RangeFrom<usize>> for Wtf8 {
+ type Output = Wtf8;
+
+ #[inline]
+ fn index(&self, range: ops::RangeFrom<usize>) -> &Wtf8 {
+ // is_code_point_boundary checks that the index is in [0, .len()]
+ if is_code_point_boundary(self, range.start) {
+ unsafe { slice_unchecked(self, range.start, self.len()) }
+ } else {
+ slice_error_fail(self, range.start, self.len())
+ }
+ }
+}
+
+/// Returns a slice of the given string from its beginning to byte `end`.
+///
+/// # Panics
+///
+/// Panics when `end` is not at a code point boundary,
+/// or is beyond the end of the string.
+impl ops::Index<ops::RangeTo<usize>> for Wtf8 {
+ type Output = Wtf8;
+
+ #[inline]
+ fn index(&self, range: ops::RangeTo<usize>) -> &Wtf8 {
+ // is_code_point_boundary checks that the index is in [0, .len()]
+ if is_code_point_boundary(self, range.end) {
+ unsafe { slice_unchecked(self, 0, range.end) }
+ } else {
+ slice_error_fail(self, 0, range.end)
+ }
+ }
+}
+
+impl ops::Index<ops::RangeFull> for Wtf8 {
+ type Output = Wtf8;
+
+ #[inline]
+ fn index(&self, _range: ops::RangeFull) -> &Wtf8 {
+ self
+ }
+}
+
+#[inline]
+fn decode_surrogate(second_byte: u8, third_byte: u8) -> u16 {
+ // The first byte is assumed to be 0xED
+ 0xD800 | (second_byte as u16 & 0x3F) << 6 | third_byte as u16 & 0x3F
+}
+
+#[inline]
+fn decode_surrogate_pair(lead: u16, trail: u16) -> char {
+ let code_point = 0x10000 + ((((lead - 0xD800) as u32) << 10) | (trail - 0xDC00) as u32);
+ unsafe { char::from_u32_unchecked(code_point) }
+}
+
+/// Copied from core::str::StrPrelude::is_char_boundary
+#[inline]
+pub fn is_code_point_boundary(slice: &Wtf8, index: usize) -> bool {
+ if index == slice.len() {
+ return true;
+ }
+ match slice.bytes.get(index) {
+ None => false,
+ Some(&b) => b < 128 || b >= 192,
+ }
+}
+
+/// Copied from core::str::raw::slice_unchecked
+#[inline]
+pub unsafe fn slice_unchecked(s: &Wtf8, begin: usize, end: usize) -> &Wtf8 {
+ // memory layout of a &[u8] and &Wtf8 are the same
+ Wtf8::from_bytes_unchecked(slice::from_raw_parts(s.bytes.as_ptr().add(begin), end - begin))
+}
+
+/// Copied from core::str::raw::slice_error_fail
+#[inline(never)]
+pub fn slice_error_fail(s: &Wtf8, begin: usize, end: usize) -> ! {
+ assert!(begin <= end);
+ panic!("index {begin} and/or {end} in `{s:?}` do not lie on character boundary");
+}
+
+/// Iterator for the code points of a WTF-8 string.
+///
+/// Created with the method `.code_points()`.
+#[derive(Clone)]
+pub struct Wtf8CodePoints<'a> {
+ bytes: slice::Iter<'a, u8>,
+}
+
+impl<'a> Iterator for Wtf8CodePoints<'a> {
+ type Item = CodePoint;
+
+ #[inline]
+ fn next(&mut self) -> Option<CodePoint> {
+ // SAFETY: `self.bytes` has been created from a WTF-8 string
+ unsafe { next_code_point(&mut self.bytes).map(|c| CodePoint { value: c }) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.bytes.len();
+ (len.saturating_add(3) / 4, Some(len))
+ }
+}
+
+/// Generates a wide character sequence for potentially ill-formed UTF-16.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct EncodeWide<'a> {
+ code_points: Wtf8CodePoints<'a>,
+ extra: u16,
+}
+
+// Copied from libunicode/u_str.rs
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for EncodeWide<'a> {
+ type Item = u16;
+
+ #[inline]
+ fn next(&mut self) -> Option<u16> {
+ if self.extra != 0 {
+ let tmp = self.extra;
+ self.extra = 0;
+ return Some(tmp);
+ }
+
+ let mut buf = [0; 2];
+ self.code_points.next().map(|code_point| {
+ let n = char::encode_utf16_raw(code_point.value, &mut buf).len();
+ if n == 2 {
+ self.extra = buf[1];
+ }
+ buf[0]
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, high) = self.code_points.size_hint();
+ let ext = (self.extra != 0) as usize;
+ // every code point gets either one u16 or two u16,
+ // so this iterator is between 1 or 2 times as
+ // long as the underlying iterator.
+ (low + ext, high.and_then(|n| n.checked_mul(2)).and_then(|n| n.checked_add(ext)))
+ }
+}
+
+#[stable(feature = "encode_wide_fused_iterator", since = "1.62.0")]
+impl FusedIterator for EncodeWide<'_> {}
+
+impl Hash for CodePoint {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.value.hash(state)
+ }
+}
+
+impl Hash for Wtf8Buf {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write(&self.bytes);
+ 0xfeu8.hash(state)
+ }
+}
+
+impl Hash for Wtf8 {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write(&self.bytes);
+ 0xfeu8.hash(state)
+ }
+}
diff --git a/library/std/src/sys_common/wtf8/tests.rs b/library/std/src/sys_common/wtf8/tests.rs
new file mode 100644
index 000000000..931996791
--- /dev/null
+++ b/library/std/src/sys_common/wtf8/tests.rs
@@ -0,0 +1,409 @@
+use super::*;
+use crate::borrow::Cow;
+
+#[test]
+fn code_point_from_u32() {
+ assert!(CodePoint::from_u32(0).is_some());
+ assert!(CodePoint::from_u32(0xD800).is_some());
+ assert!(CodePoint::from_u32(0x10FFFF).is_some());
+ assert!(CodePoint::from_u32(0x110000).is_none());
+}
+
+#[test]
+fn code_point_to_u32() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0).to_u32(), 0);
+ assert_eq!(c(0xD800).to_u32(), 0xD800);
+ assert_eq!(c(0x10FFFF).to_u32(), 0x10FFFF);
+}
+
+#[test]
+fn code_point_from_char() {
+ assert_eq!(CodePoint::from_char('a').to_u32(), 0x61);
+ assert_eq!(CodePoint::from_char('💩').to_u32(), 0x1F4A9);
+}
+
+#[test]
+fn code_point_to_string() {
+ assert_eq!(format!("{:?}", CodePoint::from_char('a')), "U+0061");
+ assert_eq!(format!("{:?}", CodePoint::from_char('💩')), "U+1F4A9");
+}
+
+#[test]
+fn code_point_to_char() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0x61).to_char(), Some('a'));
+ assert_eq!(c(0x1F4A9).to_char(), Some('💩'));
+ assert_eq!(c(0xD800).to_char(), None);
+}
+
+#[test]
+fn code_point_to_char_lossy() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0x61).to_char_lossy(), 'a');
+ assert_eq!(c(0x1F4A9).to_char_lossy(), '💩');
+ assert_eq!(c(0xD800).to_char_lossy(), '\u{FFFD}');
+}
+
+#[test]
+fn wtf8buf_new() {
+ assert_eq!(Wtf8Buf::new().bytes, b"");
+}
+
+#[test]
+fn wtf8buf_from_str() {
+ assert_eq!(Wtf8Buf::from_str("").bytes, b"");
+ assert_eq!(Wtf8Buf::from_str("aé 💩").bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+fn wtf8buf_from_string() {
+ assert_eq!(Wtf8Buf::from_string(String::from("")).bytes, b"");
+ assert_eq!(Wtf8Buf::from_string(String::from("aé 💩")).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+fn wtf8buf_from_wide() {
+ assert_eq!(Wtf8Buf::from_wide(&[]).bytes, b"");
+ assert_eq!(
+ Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]).bytes,
+ b"a\xC3\xA9 \xED\xA0\xBD\xF0\x9F\x92\xA9"
+ );
+}
+
+#[test]
+fn wtf8buf_push_str() {
+ let mut string = Wtf8Buf::new();
+ assert_eq!(string.bytes, b"");
+ string.push_str("aé 💩");
+ assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+fn wtf8buf_push_char() {
+ let mut string = Wtf8Buf::from_str("aé ");
+ assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ string.push_char('💩');
+ assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+fn wtf8buf_push() {
+ let mut string = Wtf8Buf::from_str("aé ");
+ assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ string.push(CodePoint::from_char('💩'));
+ assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xD83D)); // lead
+ string.push(c(0xDCA9)); // trail
+ assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9"); // Magic!
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xD83D)); // lead
+ string.push(c(0x20)); // not surrogate
+ string.push(c(0xDCA9)); // trail
+ assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xD800)); // lead
+ string.push(c(0xDBFF)); // lead
+ assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xD800)); // lead
+ string.push(c(0xE000)); // not surrogate
+ assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xD7FF)); // not surrogate
+ string.push(c(0xDC00)); // trail
+ assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0x61)); // not surrogate, < 3 bytes
+ string.push(c(0xDC00)); // trail
+ assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push(c(0xDC00)); // trail
+ assert_eq!(string.bytes, b"\xED\xB0\x80");
+}
+
+#[test]
+fn wtf8buf_push_wtf8() {
+ let mut string = Wtf8Buf::from_str("aé");
+ assert_eq!(string.bytes, b"a\xC3\xA9");
+ string.push_wtf8(Wtf8::from_str(" 💩"));
+ assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+
+ fn w(v: &[u8]) -> &Wtf8 {
+ unsafe { Wtf8::from_bytes_unchecked(v) }
+ }
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\xA0\xBD")); // lead
+ string.push_wtf8(w(b"\xED\xB2\xA9")); // trail
+ assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9"); // Magic!
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\xA0\xBD")); // lead
+ string.push_wtf8(w(b" ")); // not surrogate
+ string.push_wtf8(w(b"\xED\xB2\xA9")); // trail
+ assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\xA0\x80")); // lead
+ string.push_wtf8(w(b"\xED\xAF\xBF")); // lead
+ assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\xA0\x80")); // lead
+ string.push_wtf8(w(b"\xEE\x80\x80")); // not surrogate
+ assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\x9F\xBF")); // not surrogate
+ string.push_wtf8(w(b"\xED\xB0\x80")); // trail
+ assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"a")); // not surrogate, < 3 bytes
+ string.push_wtf8(w(b"\xED\xB0\x80")); // trail
+ assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
+
+ let mut string = Wtf8Buf::new();
+ string.push_wtf8(w(b"\xED\xB0\x80")); // trail
+ assert_eq!(string.bytes, b"\xED\xB0\x80");
+}
+
+#[test]
+fn wtf8buf_truncate() {
+ let mut string = Wtf8Buf::from_str("aé");
+ string.truncate(1);
+ assert_eq!(string.bytes, b"a");
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_fail_code_point_boundary() {
+ let mut string = Wtf8Buf::from_str("aé");
+ string.truncate(2);
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_fail_longer() {
+ let mut string = Wtf8Buf::from_str("aé");
+ string.truncate(4);
+}
+
+#[test]
+fn wtf8buf_into_string() {
+ let mut string = Wtf8Buf::from_str("aé 💩");
+ assert_eq!(string.clone().into_string(), Ok(String::from("aé 💩")));
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert_eq!(string.clone().into_string(), Err(string));
+}
+
+#[test]
+fn wtf8buf_into_string_lossy() {
+ let mut string = Wtf8Buf::from_str("aé 💩");
+ assert_eq!(string.clone().into_string_lossy(), String::from("aé 💩"));
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert_eq!(string.clone().into_string_lossy(), String::from("aé 💩�"));
+}
+
+#[test]
+fn wtf8buf_from_iterator() {
+ fn f(values: &[u32]) -> Wtf8Buf {
+ values.iter().map(|&c| CodePoint::from_u32(c).unwrap()).collect::<Wtf8Buf>()
+ }
+ assert_eq!(f(&[0x61, 0xE9, 0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+
+ assert_eq!(f(&[0xD83D, 0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
+ assert_eq!(f(&[0xD83D, 0x20, 0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+ assert_eq!(f(&[0xD800, 0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+ assert_eq!(f(&[0xD800, 0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
+ assert_eq!(f(&[0xD7FF, 0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+ assert_eq!(f(&[0x61, 0xDC00]).bytes, b"\x61\xED\xB0\x80");
+ assert_eq!(f(&[0xDC00]).bytes, b"\xED\xB0\x80");
+}
+
+#[test]
+fn wtf8buf_extend() {
+ fn e(initial: &[u32], extended: &[u32]) -> Wtf8Buf {
+ fn c(value: &u32) -> CodePoint {
+ CodePoint::from_u32(*value).unwrap()
+ }
+ let mut string = initial.iter().map(c).collect::<Wtf8Buf>();
+ string.extend(extended.iter().map(c));
+ string
+ }
+
+ assert_eq!(e(&[0x61, 0xE9], &[0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+
+ assert_eq!(e(&[0xD83D], &[0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
+ assert_eq!(e(&[0xD83D, 0x20], &[0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+ assert_eq!(e(&[0xD800], &[0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+ assert_eq!(e(&[0xD800], &[0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
+ assert_eq!(e(&[0xD7FF], &[0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+ assert_eq!(e(&[0x61], &[0xDC00]).bytes, b"\x61\xED\xB0\x80");
+ assert_eq!(e(&[], &[0xDC00]).bytes, b"\xED\xB0\x80");
+}
+
+#[test]
+fn wtf8buf_show() {
+ let mut string = Wtf8Buf::from_str("a\té \u{7f}💩\r");
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert_eq!(format!("{string:?}"), "\"a\\té \\u{7f}\u{1f4a9}\\r\\u{d800}\"");
+}
+
+#[test]
+fn wtf8buf_as_slice() {
+ assert_eq!(Wtf8Buf::from_str("aé").as_slice(), Wtf8::from_str("aé"));
+}
+
+#[test]
+fn wtf8buf_show_str() {
+ let text = "a\té 💩\r";
+ let string = Wtf8Buf::from_str(text);
+ assert_eq!(format!("{text:?}"), format!("{string:?}"));
+}
+
+#[test]
+fn wtf8_from_str() {
+ assert_eq!(&Wtf8::from_str("").bytes, b"");
+ assert_eq!(&Wtf8::from_str("aé 💩").bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+fn wtf8_len() {
+ assert_eq!(Wtf8::from_str("").len(), 0);
+ assert_eq!(Wtf8::from_str("aé 💩").len(), 8);
+}
+
+#[test]
+fn wtf8_slice() {
+ assert_eq!(&Wtf8::from_str("aé 💩")[1..4].bytes, b"\xC3\xA9 ");
+}
+
+#[test]
+#[should_panic]
+fn wtf8_slice_not_code_point_boundary() {
+ let _ = &Wtf8::from_str("aé 💩")[2..4];
+}
+
+#[test]
+fn wtf8_slice_from() {
+ assert_eq!(&Wtf8::from_str("aé 💩")[1..].bytes, b"\xC3\xA9 \xF0\x9F\x92\xA9");
+}
+
+#[test]
+#[should_panic]
+fn wtf8_slice_from_not_code_point_boundary() {
+ let _ = &Wtf8::from_str("aé 💩")[2..];
+}
+
+#[test]
+fn wtf8_slice_to() {
+ assert_eq!(&Wtf8::from_str("aé 💩")[..4].bytes, b"a\xC3\xA9 ");
+}
+
+#[test]
+#[should_panic]
+fn wtf8_slice_to_not_code_point_boundary() {
+ let _ = &Wtf8::from_str("aé 💩")[5..];
+}
+
+#[test]
+fn wtf8_ascii_byte_at() {
+ let slice = Wtf8::from_str("aé 💩");
+ assert_eq!(slice.ascii_byte_at(0), b'a');
+ assert_eq!(slice.ascii_byte_at(1), b'\xFF');
+ assert_eq!(slice.ascii_byte_at(2), b'\xFF');
+ assert_eq!(slice.ascii_byte_at(3), b' ');
+ assert_eq!(slice.ascii_byte_at(4), b'\xFF');
+}
+
+#[test]
+fn wtf8_code_points() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ fn cp(string: &Wtf8Buf) -> Vec<Option<char>> {
+ string.code_points().map(|c| c.to_char()).collect::<Vec<_>>()
+ }
+ let mut string = Wtf8Buf::from_str("é ");
+ assert_eq!(cp(&string), [Some('é'), Some(' ')]);
+ string.push(c(0xD83D));
+ assert_eq!(cp(&string), [Some('é'), Some(' '), None]);
+ string.push(c(0xDCA9));
+ assert_eq!(cp(&string), [Some('é'), Some(' '), Some('💩')]);
+}
+
+#[test]
+fn wtf8_as_str() {
+ assert_eq!(Wtf8::from_str("").as_str(), Some(""));
+ assert_eq!(Wtf8::from_str("aé 💩").as_str(), Some("aé 💩"));
+ let mut string = Wtf8Buf::new();
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert_eq!(string.as_str(), None);
+}
+
+#[test]
+fn wtf8_to_string_lossy() {
+ assert_eq!(Wtf8::from_str("").to_string_lossy(), Cow::Borrowed(""));
+ assert_eq!(Wtf8::from_str("aé 💩").to_string_lossy(), Cow::Borrowed("aé 💩"));
+ let mut string = Wtf8Buf::from_str("aé 💩");
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ let expected: Cow<'_, str> = Cow::Owned(String::from("aé 💩�"));
+ assert_eq!(string.to_string_lossy(), expected);
+}
+
+#[test]
+fn wtf8_display() {
+ fn d(b: &[u8]) -> String {
+ (&unsafe { Wtf8::from_bytes_unchecked(b) }).to_string()
+ }
+
+ assert_eq!("", d("".as_bytes()));
+ assert_eq!("aé 💩", d("aé 💩".as_bytes()));
+
+ let mut string = Wtf8Buf::from_str("aé 💩");
+ string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert_eq!("aé 💩�", d(string.as_inner()));
+}
+
+#[test]
+fn wtf8_encode_wide() {
+ let mut string = Wtf8Buf::from_str("aé ");
+ string.push(CodePoint::from_u32(0xD83D).unwrap());
+ string.push_char('💩');
+ assert_eq!(
+ string.encode_wide().collect::<Vec<_>>(),
+ vec![0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]
+ );
+}
+
+#[test]
+fn wtf8_encode_wide_size_hint() {
+ let string = Wtf8Buf::from_str("\u{12345}");
+ let mut iter = string.encode_wide();
+ assert_eq!((1, Some(8)), iter.size_hint());
+ iter.next().unwrap();
+ assert_eq!((1, Some(1)), iter.size_hint());
+ iter.next().unwrap();
+ assert_eq!((0, Some(0)), iter.size_hint());
+ assert!(iter.next().is_none());
+}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
new file mode 100644
index 000000000..f4750cdf7
--- /dev/null
+++ b/library/std/src/thread/local.rs
@@ -0,0 +1,1141 @@
+//! Thread local storage
+
+#![unstable(feature = "thread_local_internals", issue = "none")]
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+#[cfg(test)]
+mod dynamic_tests;
+
+use crate::cell::{Cell, RefCell};
+use crate::error::Error;
+use crate::fmt;
+
+/// A thread local storage key which owns its contents.
+///
+/// This key uses the fastest possible implementation available to it for the
+/// target platform. It is instantiated with the [`thread_local!`] macro and the
+/// primary method is the [`with`] method.
+///
+/// The [`with`] method yields a reference to the contained value which cannot be
+/// sent across threads or escape the given closure.
+///
+/// [`thread_local!`]: crate::thread_local
+///
+/// # Initialization and Destruction
+///
+/// Initialization is dynamically performed on the first call to [`with`]
+/// within a thread, and values that implement [`Drop`] get destructed when a
+/// thread exits. Some caveats apply, which are explained below.
+///
+/// A `LocalKey`'s initializer cannot recursively depend on itself, and using
+/// a `LocalKey` in this way will cause the initializer to infinitely recurse
+/// on the first call to `with`.
+///
+/// # Examples
+///
+/// ```
+/// use std::cell::RefCell;
+/// use std::thread;
+///
+/// thread_local!(static FOO: RefCell<u32> = RefCell::new(1));
+///
+/// FOO.with(|f| {
+/// assert_eq!(*f.borrow(), 1);
+/// *f.borrow_mut() = 2;
+/// });
+///
+/// // each thread starts out with the initial value of 1
+/// let t = thread::spawn(move|| {
+/// FOO.with(|f| {
+/// assert_eq!(*f.borrow(), 1);
+/// *f.borrow_mut() = 3;
+/// });
+/// });
+///
+/// // wait for the thread to complete and bail out on panic
+/// t.join().unwrap();
+///
+/// // we retain our original value of 2 despite the child thread
+/// FOO.with(|f| {
+/// assert_eq!(*f.borrow(), 2);
+/// });
+/// ```
+///
+/// # Platform-specific behavior
+///
+/// Note that a "best effort" is made to ensure that destructors for types
+/// stored in thread local storage are run, but not all platforms can guarantee
+/// that destructors will be run for all types in thread local storage. For
+/// example, there are a number of known caveats where destructors are not run:
+///
+/// 1. On Unix systems when pthread-based TLS is being used, destructors will
+/// not be run for TLS values on the main thread when it exits. Note that the
+/// application will exit immediately after the main thread exits as well.
+/// 2. On all platforms it's possible for TLS to re-initialize other TLS slots
+/// during destruction. Some platforms ensure that this cannot happen
+/// infinitely by preventing re-initialization of any slot that has been
+/// destroyed, but not all platforms have this guard. Those platforms that do
+/// not guard typically have a synthetic limit after which point no more
+/// destructors are run.
+/// 3. When the process exits on Windows systems, TLS destructors may only be
+/// run on the thread that causes the process to exit. This is because the
+/// other threads may be forcibly terminated.
+///
+/// ## Synchronization in thread-local destructors
+///
+/// On Windows, synchronization operations (such as [`JoinHandle::join`]) in
+/// thread local destructors are prone to deadlocks and so should be avoided.
+/// This is because the [loader lock] is held while a destructor is run. The
+/// lock is acquired whenever a thread starts or exits or when a DLL is loaded
+/// or unloaded. Therefore these events are blocked for as long as a thread
+/// local destructor is running.
+///
+/// [loader lock]: https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices
+/// [`JoinHandle::join`]: crate::thread::JoinHandle::join
+/// [`with`]: LocalKey::with
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct LocalKey<T: 'static> {
+ // This outer `LocalKey<T>` type is what's going to be stored in statics,
+ // but actual data inside will sometimes be tagged with #[thread_local].
+ // It's not valid for a true static to reference a #[thread_local] static,
+ // so we get around that by exposing an accessor through a layer of function
+ // indirection (this thunk).
+ //
+ // Note that the thunk is itself unsafe because the returned lifetime of the
+ // slot where data lives, `'static`, is not actually valid. The lifetime
+ // here is actually slightly shorter than the currently running thread!
+ //
+ // Although this is an extra layer of indirection, it should in theory be
+ // trivially devirtualizable by LLVM because the value of `inner` never
+ // changes and the constant should be readonly within a crate. This mainly
+ // only runs into problems when TLS statics are exported across crates.
+ inner: unsafe fn(Option<&mut Option<T>>) -> Option<&'static T>,
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T: 'static> fmt::Debug for LocalKey<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("LocalKey").finish_non_exhaustive()
+ }
+}
+
+/// Declare a new thread local storage key of type [`std::thread::LocalKey`].
+///
+/// # Syntax
+///
+/// The macro wraps any number of static declarations and makes them thread local.
+/// Publicity and attributes for each static are allowed. Example:
+///
+/// ```
+/// use std::cell::RefCell;
+/// thread_local! {
+/// pub static FOO: RefCell<u32> = RefCell::new(1);
+///
+/// #[allow(unused)]
+/// static BAR: RefCell<f32> = RefCell::new(1.0);
+/// }
+/// # fn main() {}
+/// ```
+///
+/// See [`LocalKey` documentation][`std::thread::LocalKey`] for more
+/// information.
+///
+/// [`std::thread::LocalKey`]: crate::thread::LocalKey
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "thread_local_macro")]
+#[allow_internal_unstable(thread_local_internals)]
+macro_rules! thread_local {
+ // empty (base case for the recursion)
+ () => {};
+
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const { $init:expr }; $($rest:tt)*) => (
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, const $init);
+ $crate::thread_local!($($rest)*);
+ );
+
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = const { $init:expr }) => (
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, const $init);
+ );
+
+ // process multiple declarations
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => (
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
+ $crate::thread_local!($($rest)*);
+ );
+
+ // handle a single declaration
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => (
+ $crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
+ );
+}
+
+#[doc(hidden)]
+#[unstable(feature = "thread_local_internals", reason = "should not be necessary", issue = "none")]
+#[macro_export]
+#[allow_internal_unstable(thread_local_internals, cfg_target_thread_local, thread_local)]
+#[allow_internal_unsafe]
+macro_rules! __thread_local_inner {
+ // used to generate the `LocalKey` value for const-initialized thread locals
+ (@key $t:ty, const $init:expr) => {{
+ #[cfg_attr(not(windows), inline)] // see comments below
+ #[deny(unsafe_op_in_unsafe_fn)]
+ unsafe fn __getit(
+ _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ const INIT_EXPR: $t = $init;
+
+ // wasm without atomics maps directly to `static mut`, and dtors
+ // aren't implemented because thread dtors aren't really a thing
+ // on wasm right now
+ //
+ // FIXME(#84224) this should come after the `target_thread_local`
+ // block.
+ #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
+ {
+ static mut VAL: $t = INIT_EXPR;
+ unsafe { $crate::option::Option::Some(&VAL) }
+ }
+
+ // If the platform has support for `#[thread_local]`, use it.
+ #[cfg(all(
+ target_thread_local,
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+ ))]
+ {
+ #[thread_local]
+ static mut VAL: $t = INIT_EXPR;
+
+ // If a dtor isn't needed we can do something "very raw" and
+ // just get going.
+ if !$crate::mem::needs_drop::<$t>() {
+ unsafe {
+ return $crate::option::Option::Some(&VAL)
+ }
+ }
+
+ // 0 == dtor not registered
+ // 1 == dtor registered, dtor not run
+ // 2 == dtor registered and is running or has run
+ #[thread_local]
+ static mut STATE: $crate::primitive::u8 = 0;
+
+ unsafe extern "C" fn destroy(ptr: *mut $crate::primitive::u8) {
+ let ptr = ptr as *mut $t;
+
+ unsafe {
+ $crate::debug_assert_eq!(STATE, 1);
+ STATE = 2;
+ $crate::ptr::drop_in_place(ptr);
+ }
+ }
+
+ unsafe {
+ match STATE {
+ // 0 == we haven't registered a destructor, so do
+ // so now.
+ 0 => {
+ $crate::thread::__FastLocalKeyInner::<$t>::register_dtor(
+ $crate::ptr::addr_of_mut!(VAL) as *mut $crate::primitive::u8,
+ destroy,
+ );
+ STATE = 1;
+ $crate::option::Option::Some(&VAL)
+ }
+ // 1 == the destructor is registered and the value
+ // is valid, so return the pointer.
+ 1 => $crate::option::Option::Some(&VAL),
+ // otherwise the destructor has already run, so we
+ // can't give access.
+ _ => $crate::option::Option::None,
+ }
+ }
+ }
+
+ // On platforms without `#[thread_local]` we fall back to the
+ // same implementation as below for os thread locals.
+ #[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+ ))]
+ {
+ #[inline]
+ const fn __init() -> $t { INIT_EXPR }
+ static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
+ $crate::thread::__OsLocalKeyInner::new();
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = _init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing initial value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }};
+
+ // used to generate the `LocalKey` value for `thread_local!`
+ (@key $t:ty, $init:expr) => {
+ {
+ #[inline]
+ fn __init() -> $t { $init }
+
+ // When reading this function you might ask "why is this inlined
+ // everywhere other than Windows?", and that's a very reasonable
+ // question to ask. The short story is that it segfaults rustc if
+ // this function is inlined. The longer story is that Windows looks
+ // to not support `extern` references to thread locals across DLL
+ // boundaries. This appears to at least not be supported in the ABI
+ // that LLVM implements.
+ //
+ // Because of this we never inline on Windows, but we do inline on
+ // other platforms (where external references to thread locals
+ // across DLLs are supported). A better fix for this would be to
+ // inline this function on Windows, but only for "statically linked"
+ // components. For example if two separately compiled rlibs end up
+ // getting linked into a DLL then it's fine to inline this function
+ // across that boundary. It's only not fine to inline this function
+ // across a DLL boundary. Unfortunately rustc doesn't currently
+ // have this sort of logic available in an attribute, and it's not
+ // clear that rustc is even equipped to answer this (it's more of a
+ // Cargo question kinda). This means that, unfortunately, Windows
+ // gets the pessimistic path for now where it's never inlined.
+ //
+ // The issue of "should enable on Windows sometimes" is #84933
+ #[cfg_attr(not(windows), inline)]
+ unsafe fn __getit(
+ init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
+ static __KEY: $crate::thread::__StaticLocalKeyInner<$t> =
+ $crate::thread::__StaticLocalKeyInner::new();
+
+ #[thread_local]
+ #[cfg(all(
+ target_thread_local,
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+ ))]
+ static __KEY: $crate::thread::__FastLocalKeyInner<$t> =
+ $crate::thread::__FastLocalKeyInner::new();
+
+ #[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+ ))]
+ static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
+ $crate::thread::__OsLocalKeyInner::new();
+
+ // FIXME: remove the #[allow(...)] marker when macros don't
+ // raise warning for missing/extraneous unsafe blocks anymore.
+ // See https://github.com/rust-lang/rust/issues/74838.
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing default value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }
+ };
+ ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => {
+ $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
+ $crate::__thread_local_inner!(@key $t, $($init)*);
+ }
+}
+
+/// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with).
+#[stable(feature = "thread_local_try_with", since = "1.26.0")]
+#[non_exhaustive]
+#[derive(Clone, Copy, Eq, PartialEq)]
+pub struct AccessError;
+
+#[stable(feature = "thread_local_try_with", since = "1.26.0")]
+impl fmt::Debug for AccessError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AccessError").finish()
+ }
+}
+
+#[stable(feature = "thread_local_try_with", since = "1.26.0")]
+impl fmt::Display for AccessError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("already destroyed", f)
+ }
+}
+
+#[stable(feature = "thread_local_try_with", since = "1.26.0")]
+impl Error for AccessError {}
+
+impl<T: 'static> LocalKey<T> {
+ #[doc(hidden)]
+ #[unstable(
+ feature = "thread_local_internals",
+ reason = "recently added to create a key",
+ issue = "none"
+ )]
+ #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
+ pub const unsafe fn new(
+ inner: unsafe fn(Option<&mut Option<T>>) -> Option<&'static T>,
+ ) -> LocalKey<T> {
+ LocalKey { inner }
+ }
+
+ /// Acquires a reference to the value in this TLS key.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// This function will `panic!()` if the key currently has its
+ /// destructor running, and it **may** panic if the destructor has
+ /// previously been run for this thread.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn with<F, R>(&'static self, f: F) -> R
+ where
+ F: FnOnce(&T) -> R,
+ {
+ self.try_with(f).expect(
+ "cannot access a Thread Local Storage value \
+ during or after destruction",
+ )
+ }
+
+ /// Acquires a reference to the value in this TLS key.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet. If the key has been destroyed (which may happen if this is called
+ /// in a destructor), this function will return an [`AccessError`].
+ ///
+ /// # Panics
+ ///
+ /// This function will still `panic!()` if the key is uninitialized and the
+ /// key's initializer panics.
+ #[stable(feature = "thread_local_try_with", since = "1.26.0")]
+ #[inline]
+ pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
+ where
+ F: FnOnce(&T) -> R,
+ {
+ unsafe {
+ let thread_local = (self.inner)(None).ok_or(AccessError)?;
+ Ok(f(thread_local))
+ }
+ }
+
+ /// Acquires a reference to the value in this TLS key, initializing it with
+ /// `init` if it wasn't already initialized on this thread.
+ ///
+ /// If `init` was used to initialize the thread local variable, `None` is
+ /// passed as the first argument to `f`. If it was already initialized,
+ /// `Some(init)` is passed to `f`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the key currently has its destructor
+ /// running, and it **may** panic if the destructor has previously been run
+ /// for this thread.
+ fn initialize_with<F, R>(&'static self, init: T, f: F) -> R
+ where
+ F: FnOnce(Option<T>, &T) -> R,
+ {
+ unsafe {
+ let mut init = Some(init);
+ let reference = (self.inner)(Some(&mut init)).expect(
+ "cannot access a Thread Local Storage value \
+ during or after destruction",
+ );
+ f(init, reference)
+ }
+ }
+}
+
+impl<T: 'static> LocalKey<Cell<T>> {
+ /// Sets or initializes the contained value.
+ ///
+ /// Unlike the other methods, this will *not* run the lazy initializer of
+ /// the thread local. Instead, it will be directly initialized with the
+ /// given value if it wasn't initialized yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::Cell;
+ ///
+ /// thread_local! {
+ /// static X: Cell<i32> = panic!("!");
+ /// }
+ ///
+ /// // Calling X.get() here would result in a panic.
+ ///
+ /// X.set(123); // But X.set() is fine, as it skips the initializer above.
+ ///
+ /// assert_eq!(X.get(), 123);
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn set(&'static self, value: T) {
+ self.initialize_with(Cell::new(value), |value, cell| {
+ if let Some(value) = value {
+ // The cell was already initialized, so `value` wasn't used to
+ // initialize it. So we overwrite the current value with the
+ // new one instead.
+ cell.set(value.into_inner());
+ }
+ });
+ }
+
+ /// Returns a copy of the contained value.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::Cell;
+ ///
+ /// thread_local! {
+ /// static X: Cell<i32> = Cell::new(1);
+ /// }
+ ///
+ /// assert_eq!(X.get(), 1);
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn get(&'static self) -> T
+ where
+ T: Copy,
+ {
+ self.with(|cell| cell.get())
+ }
+
+ /// Takes the contained value, leaving `Default::default()` in its place.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::Cell;
+ ///
+ /// thread_local! {
+ /// static X: Cell<Option<i32>> = Cell::new(Some(1));
+ /// }
+ ///
+ /// assert_eq!(X.take(), Some(1));
+ /// assert_eq!(X.take(), None);
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn take(&'static self) -> T
+ where
+ T: Default,
+ {
+ self.with(|cell| cell.take())
+ }
+
+ /// Replaces the contained value, returning the old value.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::Cell;
+ ///
+ /// thread_local! {
+ /// static X: Cell<i32> = Cell::new(1);
+ /// }
+ ///
+ /// assert_eq!(X.replace(2), 1);
+ /// assert_eq!(X.replace(3), 2);
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn replace(&'static self, value: T) -> T {
+ self.with(|cell| cell.replace(value))
+ }
+}
+
+impl<T: 'static> LocalKey<RefCell<T>> {
+ /// Acquires a reference to the contained value.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed.
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::RefCell;
+ ///
+ /// thread_local! {
+ /// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
+ /// }
+ ///
+ /// X.with_borrow(|v| assert!(v.is_empty()));
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn with_borrow<F, R>(&'static self, f: F) -> R
+ where
+ F: FnOnce(&T) -> R,
+ {
+ self.with(|cell| f(&cell.borrow()))
+ }
+
+ /// Acquires a mutable reference to the contained value.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::RefCell;
+ ///
+ /// thread_local! {
+ /// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
+ /// }
+ ///
+ /// X.with_borrow_mut(|v| v.push(1));
+ ///
+ /// X.with_borrow(|v| assert_eq!(*v, vec![1]));
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn with_borrow_mut<F, R>(&'static self, f: F) -> R
+ where
+ F: FnOnce(&mut T) -> R,
+ {
+ self.with(|cell| f(&mut cell.borrow_mut()))
+ }
+
+ /// Sets or initializes the contained value.
+ ///
+ /// Unlike the other methods, this will *not* run the lazy initializer of
+ /// the thread local. Instead, it will be directly initialized with the
+ /// given value if it wasn't initialized yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::RefCell;
+ ///
+ /// thread_local! {
+ /// static X: RefCell<Vec<i32>> = panic!("!");
+ /// }
+ ///
+ /// // Calling X.with() here would result in a panic.
+ ///
+ /// X.set(vec![1, 2, 3]); // But X.set() is fine, as it skips the initializer above.
+ ///
+ /// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn set(&'static self, value: T) {
+ self.initialize_with(RefCell::new(value), |value, cell| {
+ if let Some(value) = value {
+ // The cell was already initialized, so `value` wasn't used to
+ // initialize it. So we overwrite the current value with the
+ // new one instead.
+ *cell.borrow_mut() = value.into_inner();
+ }
+ });
+ }
+
+ /// Takes the contained value, leaving `Default::default()` in its place.
+ ///
+ /// This will lazily initialize the value if this thread has not referenced
+ /// this key yet.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::RefCell;
+ ///
+ /// thread_local! {
+ /// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
+ /// }
+ ///
+ /// X.with_borrow_mut(|v| v.push(1));
+ ///
+ /// let a = X.take();
+ ///
+ /// assert_eq!(a, vec![1]);
+ ///
+ /// X.with_borrow(|v| assert!(v.is_empty()));
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn take(&'static self) -> T
+ where
+ T: Default,
+ {
+ self.with(|cell| cell.take())
+ }
+
+ /// Replaces the contained value, returning the old value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// Panics if the key currently has its destructor running,
+ /// and it **may** panic if the destructor has previously been run for this thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(local_key_cell_methods)]
+ /// use std::cell::RefCell;
+ ///
+ /// thread_local! {
+ /// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
+ /// }
+ ///
+ /// let prev = X.replace(vec![1, 2, 3]);
+ /// assert!(prev.is_empty());
+ ///
+ /// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
+ /// ```
+ #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ pub fn replace(&'static self, value: T) -> T {
+ self.with(|cell| cell.replace(value))
+ }
+}
+
+mod lazy {
+ use crate::cell::UnsafeCell;
+ use crate::hint;
+ use crate::mem;
+
+ pub struct LazyKeyInner<T> {
+ inner: UnsafeCell<Option<T>>,
+ }
+
+ impl<T> LazyKeyInner<T> {
+ pub const fn new() -> LazyKeyInner<T> {
+ LazyKeyInner { inner: UnsafeCell::new(None) }
+ }
+
+ pub unsafe fn get(&self) -> Option<&'static T> {
+ // SAFETY: The caller must ensure no reference is ever handed out to
+ // the inner cell nor mutable reference to the Option<T> inside said
+ // cell. This make it safe to hand a reference, though the lifetime
+ // of 'static is itself unsafe, making the get method unsafe.
+ unsafe { (*self.inner.get()).as_ref() }
+ }
+
+ /// The caller must ensure that no reference is active: this method
+ /// needs unique access.
+ pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T {
+ // Execute the initialization up front, *then* move it into our slot,
+ // just in case initialization fails.
+ let value = init();
+ let ptr = self.inner.get();
+
+ // SAFETY:
+ //
+ // note that this can in theory just be `*ptr = Some(value)`, but due to
+ // the compiler will currently codegen that pattern with something like:
+ //
+ // ptr::drop_in_place(ptr)
+ // ptr::write(ptr, Some(value))
+ //
+ // Due to this pattern it's possible for the destructor of the value in
+ // `ptr` (e.g., if this is being recursively initialized) to re-access
+ // TLS, in which case there will be a `&` and `&mut` pointer to the same
+ // value (an aliasing violation). To avoid setting the "I'm running a
+ // destructor" flag we just use `mem::replace` which should sequence the
+ // operations a little differently and make this safe to call.
+ //
+ // The precondition also ensures that we are the only one accessing
+ // `self` at the moment so replacing is fine.
+ unsafe {
+ let _ = mem::replace(&mut *ptr, Some(value));
+ }
+
+ // SAFETY: With the call to `mem::replace` it is guaranteed there is
+ // a `Some` behind `ptr`, not a `None` so `unreachable_unchecked`
+ // will never be reached.
+ unsafe {
+ // After storing `Some` we want to get a reference to the contents of
+ // what we just stored. While we could use `unwrap` here and it should
+ // always work it empirically doesn't seem to always get optimized away,
+ // which means that using something like `try_with` can pull in
+ // panicking code and cause a large size bloat.
+ match *ptr {
+ Some(ref x) => x,
+ None => hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ /// The other methods hand out references while taking &self.
+ /// As such, callers of this method must ensure no `&` and `&mut` are
+ /// available and used at the same time.
+ #[allow(unused)]
+ pub unsafe fn take(&mut self) -> Option<T> {
+ // SAFETY: See doc comment for this method.
+ unsafe { (*self.inner.get()).take() }
+ }
+ }
+}
+
+/// On some targets like wasm there's no threads, so no need to generate
+/// thread locals and we can instead just use plain statics!
+#[doc(hidden)]
+#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
+pub mod statik {
+ use super::lazy::LazyKeyInner;
+ use crate::fmt;
+
+ pub struct Key<T> {
+ inner: LazyKeyInner<T>,
+ }
+
+ unsafe impl<T> Sync for Key<T> {}
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ impl<T> Key<T> {
+ pub const fn new() -> Key<T> {
+ Key { inner: LazyKeyInner::new() }
+ }
+
+ pub unsafe fn get(&self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: The caller must ensure no reference is ever handed out to
+ // the inner cell nor mutable reference to the Option<T> inside said
+ // cell. This make it safe to hand a reference, though the lifetime
+ // of 'static is itself unsafe, making the get method unsafe.
+ let value = unsafe {
+ match self.inner.get() {
+ Some(ref value) => value,
+ None => self.inner.initialize(init),
+ }
+ };
+
+ Some(value)
+ }
+ }
+}
+
+#[doc(hidden)]
+#[cfg(target_thread_local)]
+pub mod fast {
+ use super::lazy::LazyKeyInner;
+ use crate::cell::Cell;
+ use crate::fmt;
+ use crate::mem;
+ use crate::sys::thread_local_dtor::register_dtor;
+
+ #[derive(Copy, Clone)]
+ enum DtorState {
+ Unregistered,
+ Registered,
+ RunningOrHasRun,
+ }
+
+ // This data structure has been carefully constructed so that the fast path
+ // only contains one branch on x86. That optimization is necessary to avoid
+ // duplicated tls lookups on OSX.
+ //
+ // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
+ pub struct Key<T> {
+ // If `LazyKeyInner::get` returns `None`, that indicates either:
+ // * The value has never been initialized
+ // * The value is being recursively initialized
+ // * The value has already been destroyed or is being destroyed
+ // To determine which kind of `None`, check `dtor_state`.
+ //
+ // This is very optimizer friendly for the fast path - initialized but
+ // not yet dropped.
+ inner: LazyKeyInner<T>,
+
+ // Metadata to keep track of the state of the destructor. Remember that
+ // this variable is thread-local, not global.
+ dtor_state: Cell<DtorState>,
+ }
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ impl<T> Key<T> {
+ pub const fn new() -> Key<T> {
+ Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) }
+ }
+
+ // note that this is just a publicly-callable function only for the
+ // const-initialized form of thread locals, basically a way to call the
+ // free `register_dtor` function defined elsewhere in libstd.
+ pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ unsafe {
+ register_dtor(a, dtor);
+ }
+ }
+
+ pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
+ // SAFETY: See the definitions of `LazyKeyInner::get` and
+ // `try_initialize` for more information.
+ //
+ // The caller must ensure no mutable references are ever active to
+ // the inner cell or the inner T when this is called.
+ // The `try_initialize` is dependant on the passed `init` function
+ // for this.
+ unsafe {
+ match self.inner.get() {
+ Some(val) => Some(val),
+ None => self.try_initialize(init),
+ }
+ }
+ }
+
+ // `try_initialize` is only called once per fast thread local variable,
+ // except in corner cases where thread_local dtors reference other
+ // thread_local's, or it is being recursively initialized.
+ //
+ // Macos: Inlining this function can cause two `tlv_get_addr` calls to
+ // be performed for every call to `Key::get`.
+ // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
+ #[inline(never)]
+ unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
+ // SAFETY: See comment above (this function doc).
+ if !mem::needs_drop::<T>() || unsafe { self.try_register_dtor() } {
+ // SAFETY: See comment above (this function doc).
+ Some(unsafe { self.inner.initialize(init) })
+ } else {
+ None
+ }
+ }
+
+ // `try_register_dtor` is only called once per fast thread local
+ // variable, except in corner cases where thread_local dtors reference
+ // other thread_local's, or it is being recursively initialized.
+ unsafe fn try_register_dtor(&self) -> bool {
+ match self.dtor_state.get() {
+ DtorState::Unregistered => {
+ // SAFETY: dtor registration happens before initialization.
+ // Passing `self` as a pointer while using `destroy_value<T>`
+ // is safe because the function will build a pointer to a
+ // Key<T>, which is the type of self and so find the correct
+ // size.
+ unsafe { register_dtor(self as *const _ as *mut u8, destroy_value::<T>) };
+ self.dtor_state.set(DtorState::Registered);
+ true
+ }
+ DtorState::Registered => {
+ // recursively initialized
+ true
+ }
+ DtorState::RunningOrHasRun => false,
+ }
+ }
+ }
+
+ unsafe extern "C" fn destroy_value<T>(ptr: *mut u8) {
+ let ptr = ptr as *mut Key<T>;
+
+ // SAFETY:
+ //
+ // The pointer `ptr` has been built just above and comes from
+ // `try_register_dtor` where it is originally a Key<T> coming from `self`,
+ // making it non-NUL and of the correct type.
+ //
+ // Right before we run the user destructor be sure to set the
+ // `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
+ // causes future calls to `get` to run `try_initialize_drop` again,
+ // which will now fail, and return `None`.
+ unsafe {
+ let value = (*ptr).inner.take();
+ (*ptr).dtor_state.set(DtorState::RunningOrHasRun);
+ drop(value);
+ }
+ }
+}
+
+#[doc(hidden)]
+pub mod os {
+ use super::lazy::LazyKeyInner;
+ use crate::cell::Cell;
+ use crate::fmt;
+ use crate::marker;
+ use crate::ptr;
+ use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
+
+ pub struct Key<T> {
+ // OS-TLS key that we'll use to key off.
+ os: OsStaticKey,
+ marker: marker::PhantomData<Cell<T>>,
+ }
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ unsafe impl<T> Sync for Key<T> {}
+
+ struct Value<T: 'static> {
+ inner: LazyKeyInner<T>,
+ key: &'static Key<T>,
+ }
+
+ impl<T: 'static> Key<T> {
+ #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
+ pub const fn new() -> Key<T> {
+ Key { os: OsStaticKey::new(Some(destroy_value::<T>)), marker: marker::PhantomData }
+ }
+
+ /// It is a requirement for the caller to ensure that no mutable
+ /// reference is active when this method is called.
+ pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: See the documentation for this method.
+ let ptr = unsafe { self.os.get() as *mut Value<T> };
+ if ptr.addr() > 1 {
+ // SAFETY: the check ensured the pointer is safe (its destructor
+ // is not running) + it is coming from a trusted source (self).
+ if let Some(ref value) = unsafe { (*ptr).inner.get() } {
+ return Some(value);
+ }
+ }
+ // SAFETY: At this point we are sure we have no value and so
+ // initializing (or trying to) is safe.
+ unsafe { self.try_initialize(init) }
+ }
+
+ // `try_initialize` is only called once per os thread local variable,
+ // except in corner cases where thread_local dtors reference other
+ // thread_local's, or it is being recursively initialized.
+ unsafe fn try_initialize(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: No mutable references are ever handed out meaning getting
+ // the value is ok.
+ let ptr = unsafe { self.os.get() as *mut Value<T> };
+ if ptr.addr() == 1 {
+ // destructor is running
+ return None;
+ }
+
+ let ptr = if ptr.is_null() {
+ // If the lookup returned null, we haven't initialized our own
+ // local copy, so do that now.
+ let ptr: Box<Value<T>> = box Value { inner: LazyKeyInner::new(), key: self };
+ let ptr = Box::into_raw(ptr);
+ // SAFETY: At this point we are sure there is no value inside
+ // ptr so setting it will not affect anyone else.
+ unsafe {
+ self.os.set(ptr as *mut u8);
+ }
+ ptr
+ } else {
+ // recursive initialization
+ ptr
+ };
+
+ // SAFETY: ptr has been ensured as non-NUL just above an so can be
+ // dereferenced safely.
+ unsafe { Some((*ptr).inner.initialize(init)) }
+ }
+ }
+
+ unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) {
+ // SAFETY:
+ //
+ // The OS TLS ensures that this key contains a null value when this
+ // destructor starts to run. We set it back to a sentinel value of 1 to
+ // ensure that any future calls to `get` for this thread will return
+ // `None`.
+ //
+ // Note that to prevent an infinite loop we reset it back to null right
+ // before we return from the destructor ourselves.
+ unsafe {
+ let ptr = Box::from_raw(ptr as *mut Value<T>);
+ let key = ptr.key;
+ key.os.set(ptr::invalid_mut(1));
+ drop(ptr);
+ key.os.set(ptr::null_mut());
+ }
+ }
+}
diff --git a/library/std/src/thread/local/dynamic_tests.rs b/library/std/src/thread/local/dynamic_tests.rs
new file mode 100644
index 000000000..dd1800416
--- /dev/null
+++ b/library/std/src/thread/local/dynamic_tests.rs
@@ -0,0 +1,40 @@
+use crate::cell::RefCell;
+use crate::collections::HashMap;
+use crate::thread_local;
+
+#[test]
+fn smoke() {
+ fn square(i: i32) -> i32 {
+ i * i
+ }
+ thread_local!(static FOO: i32 = square(3));
+
+ FOO.with(|f| {
+ assert_eq!(*f, 9);
+ });
+}
+
+#[test]
+fn hashmap() {
+ fn map() -> RefCell<HashMap<i32, i32>> {
+ let mut m = HashMap::new();
+ m.insert(1, 2);
+ RefCell::new(m)
+ }
+ thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map());
+
+ FOO.with(|map| {
+ assert_eq!(map.borrow()[&1], 2);
+ });
+}
+
+#[test]
+fn refcell_vec() {
+ thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3]));
+
+ FOO.with(|vec| {
+ assert_eq!(vec.borrow().len(), 3);
+ vec.borrow_mut().push(4);
+ assert_eq!(vec.borrow()[3], 4);
+ });
+}
diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs
new file mode 100644
index 000000000..1df1ca758
--- /dev/null
+++ b/library/std/src/thread/local/tests.rs
@@ -0,0 +1,317 @@
+use crate::cell::{Cell, UnsafeCell};
+use crate::sync::atomic::{AtomicU8, Ordering};
+use crate::sync::mpsc::{channel, Sender};
+use crate::thread::{self, LocalKey};
+use crate::thread_local;
+
+struct Foo(Sender<()>);
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ let Foo(ref s) = *self;
+ s.send(()).unwrap();
+ }
+}
+
+#[test]
+fn smoke_no_dtor() {
+ thread_local!(static FOO: Cell<i32> = Cell::new(1));
+ run(&FOO);
+ thread_local!(static FOO2: Cell<i32> = const { Cell::new(1) });
+ run(&FOO2);
+
+ fn run(key: &'static LocalKey<Cell<i32>>) {
+ key.with(|f| {
+ assert_eq!(f.get(), 1);
+ f.set(2);
+ });
+ let t = thread::spawn(move || {
+ key.with(|f| {
+ assert_eq!(f.get(), 1);
+ });
+ });
+ t.join().unwrap();
+
+ key.with(|f| {
+ assert_eq!(f.get(), 2);
+ });
+ }
+}
+
+#[test]
+fn states() {
+ struct Foo(&'static LocalKey<Foo>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ assert!(self.0.try_with(|_| ()).is_err());
+ }
+ }
+
+ thread_local!(static FOO: Foo = Foo(&FOO));
+ run(&FOO);
+ thread_local!(static FOO2: Foo = const { Foo(&FOO2) });
+ run(&FOO2);
+
+ fn run(foo: &'static LocalKey<Foo>) {
+ thread::spawn(move || {
+ assert!(foo.try_with(|_| ()).is_ok());
+ })
+ .join()
+ .unwrap();
+ }
+}
+
+#[test]
+fn smoke_dtor() {
+ thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
+ run(&FOO);
+ thread_local!(static FOO2: UnsafeCell<Option<Foo>> = const { UnsafeCell::new(None) });
+ run(&FOO2);
+
+ fn run(key: &'static LocalKey<UnsafeCell<Option<Foo>>>) {
+ let (tx, rx) = channel();
+ let t = thread::spawn(move || unsafe {
+ let mut tx = Some(tx);
+ key.with(|f| {
+ *f.get() = Some(Foo(tx.take().unwrap()));
+ });
+ });
+ rx.recv().unwrap();
+ t.join().unwrap();
+ }
+}
+
+#[test]
+fn circular() {
+ struct S1(&'static LocalKey<UnsafeCell<Option<S1>>>, &'static LocalKey<UnsafeCell<Option<S2>>>);
+ struct S2(&'static LocalKey<UnsafeCell<Option<S1>>>, &'static LocalKey<UnsafeCell<Option<S2>>>);
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
+ thread_local!(static K3: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
+ thread_local!(static K4: UnsafeCell<Option<S2>> = const { UnsafeCell::new(None) });
+ static mut HITS: usize = 0;
+
+ impl Drop for S1 {
+ fn drop(&mut self) {
+ unsafe {
+ HITS += 1;
+ if self.1.try_with(|_| ()).is_err() {
+ assert_eq!(HITS, 3);
+ } else {
+ if HITS == 1 {
+ self.1.with(|s| *s.get() = Some(S2(self.0, self.1)));
+ } else {
+ assert_eq!(HITS, 3);
+ }
+ }
+ }
+ }
+ }
+ impl Drop for S2 {
+ fn drop(&mut self) {
+ unsafe {
+ HITS += 1;
+ assert!(self.0.try_with(|_| ()).is_ok());
+ assert_eq!(HITS, 2);
+ self.0.with(|s| *s.get() = Some(S1(self.0, self.1)));
+ }
+ }
+ }
+
+ thread::spawn(move || {
+ drop(S1(&K1, &K2));
+ })
+ .join()
+ .unwrap();
+
+ unsafe {
+ HITS = 0;
+ }
+
+ thread::spawn(move || {
+ drop(S1(&K3, &K4));
+ })
+ .join()
+ .unwrap();
+}
+
+#[test]
+fn self_referential() {
+ struct S1(&'static LocalKey<UnsafeCell<Option<S1>>>);
+
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
+
+ impl Drop for S1 {
+ fn drop(&mut self) {
+ assert!(self.0.try_with(|_| ()).is_err());
+ }
+ }
+
+ thread::spawn(move || unsafe {
+ K1.with(|s| *s.get() = Some(S1(&K1)));
+ })
+ .join()
+ .unwrap();
+
+ thread::spawn(move || unsafe {
+ K2.with(|s| *s.get() = Some(S1(&K2)));
+ })
+ .join()
+ .unwrap();
+}
+
+// Note that this test will deadlock if TLS destructors aren't run (this
+// requires the destructor to be run to pass the test).
+#[test]
+fn dtors_in_dtors_in_dtors() {
+ struct S1(Sender<()>);
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
+
+ impl Drop for S1 {
+ fn drop(&mut self) {
+ let S1(ref tx) = *self;
+ unsafe {
+ let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone())));
+ }
+ }
+ }
+
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || unsafe {
+ let mut tx = Some(tx);
+ K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
+ });
+ rx.recv().unwrap();
+}
+
+#[test]
+fn dtors_in_dtors_in_dtors_const_init() {
+ struct S1(Sender<()>);
+ thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
+ thread_local!(static K2: UnsafeCell<Option<Foo>> = const { UnsafeCell::new(None) });
+
+ impl Drop for S1 {
+ fn drop(&mut self) {
+ let S1(ref tx) = *self;
+ unsafe {
+ let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone())));
+ }
+ }
+ }
+
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || unsafe {
+ let mut tx = Some(tx);
+ K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
+ });
+ rx.recv().unwrap();
+}
+
+// This test tests that TLS destructors have run before the thread joins. The
+// test has no false positives (meaning: if the test fails, there's actually
+// an ordering problem). It may have false negatives, where the test passes but
+// join is not guaranteed to be after the TLS destructors. However, false
+// negatives should be exceedingly rare due to judicious use of
+// thread::yield_now and running the test several times.
+#[test]
+fn join_orders_after_tls_destructors() {
+ // We emulate a synchronous MPSC rendezvous channel using only atomics and
+ // thread::yield_now. We can't use std::mpsc as the implementation itself
+ // may rely on thread locals.
+ //
+ // The basic state machine for an SPSC rendezvous channel is:
+ // FRESH -> THREAD1_WAITING -> MAIN_THREAD_RENDEZVOUS
+ // where the first transition is done by the “receiving” thread and the 2nd
+ // transition is done by the “sending” thread.
+ //
+ // We add an additional state `THREAD2_LAUNCHED` between `FRESH` and
+ // `THREAD1_WAITING` to block until all threads are actually running.
+ //
+ // A thread that joins on the “receiving” thread completion should never
+ // observe the channel in the `THREAD1_WAITING` state. If this does occur,
+ // we switch to the “poison” state `THREAD2_JOINED` and panic all around.
+ // (This is equivalent to “sending” from an alternate producer thread.)
+ const FRESH: u8 = 0;
+ const THREAD2_LAUNCHED: u8 = 1;
+ const THREAD1_WAITING: u8 = 2;
+ const MAIN_THREAD_RENDEZVOUS: u8 = 3;
+ const THREAD2_JOINED: u8 = 4;
+ static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH);
+
+ for _ in 0..10 {
+ SYNC_STATE.store(FRESH, Ordering::SeqCst);
+
+ let jh = thread::Builder::new()
+ .name("thread1".into())
+ .spawn(move || {
+ struct TlDrop;
+
+ impl Drop for TlDrop {
+ fn drop(&mut self) {
+ let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst);
+ loop {
+ match sync_state {
+ THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(),
+ MAIN_THREAD_RENDEZVOUS => break,
+ THREAD2_JOINED => panic!(
+ "Thread 1 still running after thread 2 joined on thread 1"
+ ),
+ v => unreachable!("sync state: {}", v),
+ }
+ sync_state = SYNC_STATE.load(Ordering::SeqCst);
+ }
+ }
+ }
+
+ thread_local! {
+ static TL_DROP: TlDrop = TlDrop;
+ }
+
+ TL_DROP.with(|_| {});
+
+ loop {
+ match SYNC_STATE.load(Ordering::SeqCst) {
+ FRESH => thread::yield_now(),
+ THREAD2_LAUNCHED => break,
+ v => unreachable!("sync state: {}", v),
+ }
+ }
+ })
+ .unwrap();
+
+ let jh2 = thread::Builder::new()
+ .name("thread2".into())
+ .spawn(move || {
+ assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH);
+ jh.join().unwrap();
+ match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) {
+ MAIN_THREAD_RENDEZVOUS => return,
+ THREAD2_LAUNCHED | THREAD1_WAITING => {
+ panic!("Thread 2 running after thread 1 join before main thread rendezvous")
+ }
+ v => unreachable!("sync state: {:?}", v),
+ }
+ })
+ .unwrap();
+
+ loop {
+ match SYNC_STATE.compare_exchange(
+ THREAD1_WAITING,
+ MAIN_THREAD_RENDEZVOUS,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
+ Ok(_) => break,
+ Err(FRESH) => thread::yield_now(),
+ Err(THREAD2_LAUNCHED) => thread::yield_now(),
+ Err(THREAD2_JOINED) => {
+ panic!("Main thread rendezvous after thread 2 joined thread 1")
+ }
+ v => unreachable!("sync state: {:?}", v),
+ }
+ }
+ jh2.join().unwrap();
+ }
+}
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
new file mode 100644
index 000000000..44c8a50fd
--- /dev/null
+++ b/library/std/src/thread/mod.rs
@@ -0,0 +1,1621 @@
+//! Native threads.
+//!
+//! ## The threading model
+//!
+//! An executing Rust program consists of a collection of native OS threads,
+//! each with their own stack and local state. Threads can be named, and
+//! provide some built-in support for low-level synchronization.
+//!
+//! Communication between threads can be done through
+//! [channels], Rust's message-passing types, along with [other forms of thread
+//! synchronization](../../std/sync/index.html) and shared-memory data
+//! structures. In particular, types that are guaranteed to be
+//! threadsafe are easily shared between threads using the
+//! atomically-reference-counted container, [`Arc`].
+//!
+//! Fatal logic errors in Rust cause *thread panic*, during which
+//! a thread will unwind the stack, running destructors and freeing
+//! owned resources. While not meant as a 'try/catch' mechanism, panics
+//! in Rust can nonetheless be caught (unless compiling with `panic=abort`) with
+//! [`catch_unwind`](../../std/panic/fn.catch_unwind.html) and recovered
+//! from, or alternatively be resumed with
+//! [`resume_unwind`](../../std/panic/fn.resume_unwind.html). If the panic
+//! is not caught the thread will exit, but the panic may optionally be
+//! detected from a different thread with [`join`]. If the main thread panics
+//! without the panic being caught, the application will exit with a
+//! non-zero exit code.
+//!
+//! When the main thread of a Rust program terminates, the entire program shuts
+//! down, even if other threads are still running. However, this module provides
+//! convenient facilities for automatically waiting for the termination of a
+//! thread (i.e., join).
+//!
+//! ## Spawning a thread
+//!
+//! A new thread can be spawned using the [`thread::spawn`][`spawn`] function:
+//!
+//! ```rust
+//! use std::thread;
+//!
+//! thread::spawn(move || {
+//! // some work here
+//! });
+//! ```
+//!
+//! In this example, the spawned thread is "detached," which means that there is
+//! no way for the program to learn when the spawned thread completes or otherwise
+//! terminates.
+//!
+//! To learn when a thread completes, it is necessary to capture the [`JoinHandle`]
+//! object that is returned by the call to [`spawn`], which provides
+//! a `join` method that allows the caller to wait for the completion of the
+//! spawned thread:
+//!
+//! ```rust
+//! use std::thread;
+//!
+//! let thread_join_handle = thread::spawn(move || {
+//! // some work here
+//! });
+//! // some work here
+//! let res = thread_join_handle.join();
+//! ```
+//!
+//! The [`join`] method returns a [`thread::Result`] containing [`Ok`] of the final
+//! value produced by the spawned thread, or [`Err`] of the value given to
+//! a call to [`panic!`] if the thread panicked.
+//!
+//! Note that there is no parent/child relationship between a thread that spawns a
+//! new thread and the thread being spawned. In particular, the spawned thread may or
+//! may not outlive the spawning thread, unless the spawning thread is the main thread.
+//!
+//! ## Configuring threads
+//!
+//! A new thread can be configured before it is spawned via the [`Builder`] type,
+//! which currently allows you to set the name and stack size for the thread:
+//!
+//! ```rust
+//! # #![allow(unused_must_use)]
+//! use std::thread;
+//!
+//! thread::Builder::new().name("thread1".to_string()).spawn(move || {
+//! println!("Hello, world!");
+//! });
+//! ```
+//!
+//! ## The `Thread` type
+//!
+//! Threads are represented via the [`Thread`] type, which you can get in one of
+//! two ways:
+//!
+//! * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
+//! function, and calling [`thread`][`JoinHandle::thread`] on the [`JoinHandle`].
+//! * By requesting the current thread, using the [`thread::current`] function.
+//!
+//! The [`thread::current`] function is available even for threads not spawned
+//! by the APIs of this module.
+//!
+//! ## Thread-local storage
+//!
+//! This module also provides an implementation of thread-local storage for Rust
+//! programs. Thread-local storage is a method of storing data into a global
+//! variable that each thread in the program will have its own copy of.
+//! Threads do not share this data, so accesses do not need to be synchronized.
+//!
+//! A thread-local key owns the value it contains and will destroy the value when the
+//! thread exits. It is created with the [`thread_local!`] macro and can contain any
+//! value that is `'static` (no borrowed pointers). It provides an accessor function,
+//! [`with`], that yields a shared reference to the value to the specified
+//! closure. Thread-local keys allow only shared access to values, as there would be no
+//! way to guarantee uniqueness if mutable borrows were allowed. Most values
+//! will want to make use of some form of **interior mutability** through the
+//! [`Cell`] or [`RefCell`] types.
+//!
+//! ## Naming threads
+//!
+//! Threads are able to have associated names for identification purposes. By default, spawned
+//! threads are unnamed. To specify a name for a thread, build the thread with [`Builder`] and pass
+//! the desired thread name to [`Builder::name`]. To retrieve the thread name from within the
+//! thread, use [`Thread::name`]. A couple examples of where the name of a thread gets used:
+//!
+//! * If a panic occurs in a named thread, the thread name will be printed in the panic message.
+//! * The thread name is provided to the OS where applicable (e.g., `pthread_setname_np` in
+//! unix-like platforms).
+//!
+//! ## Stack size
+//!
+//! The default stack size for spawned threads is 2 MiB, though this particular stack size is
+//! subject to change in the future. There are two ways to manually specify the stack size for
+//! spawned threads:
+//!
+//! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`].
+//! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack
+//! size (in bytes). Note that setting [`Builder::stack_size`] will override this.
+//!
+//! Note that the stack size of the main thread is *not* determined by Rust.
+//!
+//! [channels]: crate::sync::mpsc
+//! [`join`]: JoinHandle::join
+//! [`Result`]: crate::result::Result
+//! [`Ok`]: crate::result::Result::Ok
+//! [`Err`]: crate::result::Result::Err
+//! [`thread::current`]: current
+//! [`thread::Result`]: Result
+//! [`unpark`]: Thread::unpark
+//! [`thread::park_timeout`]: park_timeout
+//! [`Cell`]: crate::cell::Cell
+//! [`RefCell`]: crate::cell::RefCell
+//! [`with`]: LocalKey::with
+//! [`thread_local!`]: crate::thread_local
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[cfg(all(test, not(target_os = "emscripten")))]
+mod tests;
+
+use crate::any::Any;
+use crate::cell::UnsafeCell;
+use crate::ffi::{CStr, CString};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::num::NonZeroU64;
+use crate::num::NonZeroUsize;
+use crate::panic;
+use crate::panicking;
+use crate::pin::Pin;
+use crate::ptr::addr_of_mut;
+use crate::str;
+use crate::sync::Arc;
+use crate::sys::thread as imp;
+use crate::sys_common::mutex;
+use crate::sys_common::thread;
+use crate::sys_common::thread_info;
+use crate::sys_common::thread_parker::Parker;
+use crate::sys_common::{AsInner, IntoInner};
+use crate::time::Duration;
+
+////////////////////////////////////////////////////////////////////////////////
+// Thread-local storage
+////////////////////////////////////////////////////////////////////////////////
+
+#[macro_use]
+mod local;
+
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+mod scoped;
+
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+pub use scoped::{scope, Scope, ScopedJoinHandle};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::local::{AccessError, LocalKey};
+
+// The types used by the thread_local! macro to access TLS keys. Note that there
+// are two types, the "OS" type and the "fast" type. The OS thread local key
+// type is accessed via platform-specific API calls and is slow, while the fast
+// key type is accessed via code generated via LLVM, where TLS keys are set up
+// by the elf linker. Note that the OS TLS type is always available: on macOS
+// the standard library is compiled with support for older platform versions
+// where fast TLS was not available; end-user code is compiled with fast TLS
+// where available, but both are needed.
+
+#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(target_thread_local)]
+#[doc(hidden)]
+pub use self::local::fast::Key as __FastLocalKeyInner;
+#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[doc(hidden)]
+pub use self::local::os::Key as __OsLocalKeyInner;
+#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
+#[doc(hidden)]
+pub use self::local::statik::Key as __StaticLocalKeyInner;
+
+////////////////////////////////////////////////////////////////////////////////
+// Builder
+////////////////////////////////////////////////////////////////////////////////
+
+/// Thread factory, which can be used in order to configure the properties of
+/// a new thread.
+///
+/// Methods can be chained on it in order to configure it.
+///
+/// The two configurations available are:
+///
+/// - [`name`]: specifies an [associated name for the thread][naming-threads]
+/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size]
+///
+/// The [`spawn`] method will take ownership of the builder and create an
+/// [`io::Result`] to the thread handle with the given configuration.
+///
+/// The [`thread::spawn`] free function uses a `Builder` with default
+/// configuration and [`unwrap`]s its return value.
+///
+/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want
+/// to recover from a failure to launch a thread, indeed the free function will
+/// panic where the `Builder` method will return a [`io::Result`].
+///
+/// # Examples
+///
+/// ```
+/// use std::thread;
+///
+/// let builder = thread::Builder::new();
+///
+/// let handler = builder.spawn(|| {
+/// // thread code
+/// }).unwrap();
+///
+/// handler.join().unwrap();
+/// ```
+///
+/// [`stack_size`]: Builder::stack_size
+/// [`name`]: Builder::name
+/// [`spawn`]: Builder::spawn
+/// [`thread::spawn`]: spawn
+/// [`io::Result`]: crate::io::Result
+/// [`unwrap`]: crate::result::Result::unwrap
+/// [naming-threads]: ./index.html#naming-threads
+/// [stack-size]: ./index.html#stack-size
+#[must_use = "must eventually spawn the thread"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Builder {
+ // A name for the thread-to-be, for identification in panic messages
+ name: Option<String>,
+ // The size of the stack for the spawned thread in bytes
+ stack_size: Option<usize>,
+}
+
+impl Builder {
+ /// Generates the base configuration for spawning a thread, from which
+ /// configuration methods can be chained.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new()
+ /// .name("foo".into())
+ /// .stack_size(32 * 1024);
+ ///
+ /// let handler = builder.spawn(|| {
+ /// // thread code
+ /// }).unwrap();
+ ///
+ /// handler.join().unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new() -> Builder {
+ Builder { name: None, stack_size: None }
+ }
+
+ /// Names the thread-to-be. Currently the name is used for identification
+ /// only in panic messages.
+ ///
+ /// The name must not contain null bytes (`\0`).
+ ///
+ /// For more information about named threads, see
+ /// [this module-level documentation][naming-threads].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new()
+ /// .name("foo".into());
+ ///
+ /// let handler = builder.spawn(|| {
+ /// assert_eq!(thread::current().name(), Some("foo"))
+ /// }).unwrap();
+ ///
+ /// handler.join().unwrap();
+ /// ```
+ ///
+ /// [naming-threads]: ./index.html#naming-threads
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn name(mut self, name: String) -> Builder {
+ self.name = Some(name);
+ self
+ }
+
+ /// Sets the size of the stack (in bytes) for the new thread.
+ ///
+ /// The actual stack size may be greater than this value if
+ /// the platform specifies a minimal stack size.
+ ///
+ /// For more information about the stack size for threads, see
+ /// [this module-level documentation][stack-size].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new().stack_size(32 * 1024);
+ /// ```
+ ///
+ /// [stack-size]: ./index.html#stack-size
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn stack_size(mut self, size: usize) -> Builder {
+ self.stack_size = Some(size);
+ self
+ }
+
+ /// Spawns a new thread by taking ownership of the `Builder`, and returns an
+ /// [`io::Result`] to its [`JoinHandle`].
+ ///
+ /// The spawned thread may outlive the caller (unless the caller thread
+ /// is the main thread; the whole process is terminated when the main
+ /// thread finishes). The join handle can be used to block on
+ /// termination of the spawned thread, including recovering its panics.
+ ///
+ /// For a more complete documentation see [`thread::spawn`][`spawn`].
+ ///
+ /// # Errors
+ ///
+ /// Unlike the [`spawn`] free function, this method yields an
+ /// [`io::Result`] to capture any failure to create the thread at
+ /// the OS level.
+ ///
+ /// [`io::Result`]: crate::io::Result
+ ///
+ /// # Panics
+ ///
+ /// Panics if a thread name was set and it contained null bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new();
+ ///
+ /// let handler = builder.spawn(|| {
+ /// // thread code
+ /// }).unwrap();
+ ///
+ /// handler.join().unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ unsafe { self.spawn_unchecked(f) }
+ }
+
+ /// Spawns a new thread without any lifetime restrictions by taking ownership
+ /// of the `Builder`, and returns an [`io::Result`] to its [`JoinHandle`].
+ ///
+ /// The spawned thread may outlive the caller (unless the caller thread
+ /// is the main thread; the whole process is terminated when the main
+ /// thread finishes). The join handle can be used to block on
+ /// termination of the spawned thread, including recovering its panics.
+ ///
+ /// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`],
+ /// except for the relaxed lifetime bounds, which render it unsafe.
+ /// For a more complete documentation see [`thread::spawn`][`spawn`].
+ ///
+ /// # Errors
+ ///
+ /// Unlike the [`spawn`] free function, this method yields an
+ /// [`io::Result`] to capture any failure to create the thread at
+ /// the OS level.
+ ///
+ /// # Panics
+ ///
+ /// Panics if a thread name was set and it contained null bytes.
+ ///
+ /// # Safety
+ ///
+ /// The caller has to ensure that the spawned thread does not outlive any
+ /// references in the supplied thread closure and its return type.
+ /// This can be guaranteed in two ways:
+ ///
+ /// - ensure that [`join`][`JoinHandle::join`] is called before any referenced
+ /// data is dropped
+ /// - use only types with `'static` lifetime bounds, i.e., those with no or only
+ /// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`]
+ /// and [`thread::spawn`][`spawn`] enforce this property statically)
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(thread_spawn_unchecked)]
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new();
+ ///
+ /// let x = 1;
+ /// let thread_x = &x;
+ ///
+ /// let handler = unsafe {
+ /// builder.spawn_unchecked(move || {
+ /// println!("x = {}", *thread_x);
+ /// }).unwrap()
+ /// };
+ ///
+ /// // caller has to ensure `join()` is called, otherwise
+ /// // it is possible to access freed memory if `x` gets
+ /// // dropped before the thread closure is executed!
+ /// handler.join().unwrap();
+ /// ```
+ ///
+ /// [`io::Result`]: crate::io::Result
+ #[unstable(feature = "thread_spawn_unchecked", issue = "55132")]
+ pub unsafe fn spawn_unchecked<'a, F, T>(self, f: F) -> io::Result<JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'a,
+ T: Send + 'a,
+ {
+ Ok(JoinHandle(unsafe { self.spawn_unchecked_(f, None) }?))
+ }
+
+ unsafe fn spawn_unchecked_<'a, 'scope, F, T>(
+ self,
+ f: F,
+ scope_data: Option<Arc<scoped::ScopeData>>,
+ ) -> io::Result<JoinInner<'scope, T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'a,
+ T: Send + 'a,
+ 'scope: 'a,
+ {
+ let Builder { name, stack_size } = self;
+
+ let stack_size = stack_size.unwrap_or_else(thread::min_stack);
+
+ let my_thread = Thread::new(name.map(|name| {
+ CString::new(name).expect("thread name may not contain interior null bytes")
+ }));
+ let their_thread = my_thread.clone();
+
+ let my_packet: Arc<Packet<'scope, T>> = Arc::new(Packet {
+ scope: scope_data,
+ result: UnsafeCell::new(None),
+ _marker: PhantomData,
+ });
+ let their_packet = my_packet.clone();
+
+ let output_capture = crate::io::set_output_capture(None);
+ crate::io::set_output_capture(output_capture.clone());
+
+ let main = move || {
+ if let Some(name) = their_thread.cname() {
+ imp::Thread::set_name(name);
+ }
+
+ crate::io::set_output_capture(output_capture);
+
+ // SAFETY: the stack guard passed is the one for the current thread.
+ // This means the current thread's stack and the new thread's stack
+ // are properly set and protected from each other.
+ thread_info::set(unsafe { imp::guard::current() }, their_thread);
+ let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ crate::sys_common::backtrace::__rust_begin_short_backtrace(f)
+ }));
+ // SAFETY: `their_packet` as been built just above and moved by the
+ // closure (it is an Arc<...>) and `my_packet` will be stored in the
+ // same `JoinInner` as this closure meaning the mutation will be
+ // safe (not modify it and affect a value far away).
+ unsafe { *their_packet.result.get() = Some(try_result) };
+ };
+
+ if let Some(scope_data) = &my_packet.scope {
+ scope_data.increment_num_running_threads();
+ }
+
+ Ok(JoinInner {
+ // SAFETY:
+ //
+ // `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed
+ // through FFI or otherwise used with low-level threading primitives that have no
+ // notion of or way to enforce lifetimes.
+ //
+ // As mentioned in the `Safety` section of this function's documentation, the caller of
+ // this function needs to guarantee that the passed-in lifetime is sufficiently long
+ // for the lifetime of the thread.
+ //
+ // Similarly, the `sys` implementation must guarantee that no references to the closure
+ // exist after the thread has terminated, which is signaled by `Thread::join`
+ // returning.
+ native: unsafe {
+ imp::Thread::new(
+ stack_size,
+ mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(
+ Box::new(main),
+ ),
+ )?
+ },
+ thread: my_thread,
+ packet: my_packet,
+ })
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Free functions
+////////////////////////////////////////////////////////////////////////////////
+
+/// Spawns a new thread, returning a [`JoinHandle`] for it.
+///
+/// The join handle provides a [`join`] method that can be used to join the spawned
+/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
+/// the argument given to [`panic!`].
+///
+/// If the join handle is dropped, the spawned thread will implicitly be *detached*.
+/// In this case, the spawned thread may no longer be joined.
+/// (It is the responsibility of the program to either eventually join threads it
+/// creates or detach them; otherwise, a resource leak will result.)
+///
+/// This call will create a thread using default parameters of [`Builder`], if you
+/// want to specify the stack size or the name of the thread, use this API
+/// instead.
+///
+/// As you can see in the signature of `spawn` there are two constraints on
+/// both the closure given to `spawn` and its return value, let's explain them:
+///
+/// - The `'static` constraint means that the closure and its return value
+/// must have a lifetime of the whole program execution. The reason for this
+/// is that threads can outlive the lifetime they have been created in.
+///
+/// Indeed if the thread, and by extension its return value, can outlive their
+/// caller, we need to make sure that they will be valid afterwards, and since
+/// we *can't* know when it will return we need to have them valid as long as
+/// possible, that is until the end of the program, hence the `'static`
+/// lifetime.
+/// - The [`Send`] constraint is because the closure will need to be passed
+/// *by value* from the thread where it is spawned to the new thread. Its
+/// return value will need to be passed from the new thread to the thread
+/// where it is `join`ed.
+/// As a reminder, the [`Send`] marker trait expresses that it is safe to be
+/// passed from thread to thread. [`Sync`] expresses that it is safe to have a
+/// reference be passed from thread to thread.
+///
+/// # Panics
+///
+/// Panics if the OS fails to create a thread; use [`Builder::spawn`]
+/// to recover from such errors.
+///
+/// # Examples
+///
+/// Creating a thread.
+///
+/// ```
+/// use std::thread;
+///
+/// let handler = thread::spawn(|| {
+/// // thread code
+/// });
+///
+/// handler.join().unwrap();
+/// ```
+///
+/// As mentioned in the module documentation, threads are usually made to
+/// communicate using [`channels`], here is how it usually looks.
+///
+/// This example also shows how to use `move`, in order to give ownership
+/// of values to a thread.
+///
+/// ```
+/// use std::thread;
+/// use std::sync::mpsc::channel;
+///
+/// let (tx, rx) = channel();
+///
+/// let sender = thread::spawn(move || {
+/// tx.send("Hello, thread".to_owned())
+/// .expect("Unable to send on channel");
+/// });
+///
+/// let receiver = thread::spawn(move || {
+/// let value = rx.recv().expect("Unable to receive from channel");
+/// println!("{value}");
+/// });
+///
+/// sender.join().expect("The sender thread has panicked");
+/// receiver.join().expect("The receiver thread has panicked");
+/// ```
+///
+/// A thread can also return a value through its [`JoinHandle`], you can use
+/// this to make asynchronous computations (futures might be more appropriate
+/// though).
+///
+/// ```
+/// use std::thread;
+///
+/// let computation = thread::spawn(|| {
+/// // Some expensive computation.
+/// 42
+/// });
+///
+/// let result = computation.join().unwrap();
+/// println!("{result}");
+/// ```
+///
+/// [`channels`]: crate::sync::mpsc
+/// [`join`]: JoinHandle::join
+/// [`Err`]: crate::result::Result::Err
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn spawn<F, T>(f: F) -> JoinHandle<T>
+where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+{
+ Builder::new().spawn(f).expect("failed to spawn thread")
+}
+
+/// Gets a handle to the thread that invokes it.
+///
+/// # Examples
+///
+/// Getting a handle to the current thread with `thread::current()`:
+///
+/// ```
+/// use std::thread;
+///
+/// let handler = thread::Builder::new()
+/// .name("named thread".into())
+/// .spawn(|| {
+/// let handle = thread::current();
+/// assert_eq!(handle.name(), Some("named thread"));
+/// })
+/// .unwrap();
+///
+/// handler.join().unwrap();
+/// ```
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn current() -> Thread {
+ thread_info::current_thread().expect(
+ "use of std::thread::current() is not possible \
+ after the thread's local data has been destroyed",
+ )
+}
+
+/// Cooperatively gives up a timeslice to the OS scheduler.
+///
+/// This calls the underlying OS scheduler's yield primitive, signaling
+/// that the calling thread is willing to give up its remaining timeslice
+/// so that the OS may schedule other threads on the CPU.
+///
+/// A drawback of yielding in a loop is that if the OS does not have any
+/// other ready threads to run on the current CPU, the thread will effectively
+/// busy-wait, which wastes CPU time and energy.
+///
+/// Therefore, when waiting for events of interest, a programmer's first
+/// choice should be to use synchronization devices such as [`channel`]s,
+/// [`Condvar`]s, [`Mutex`]es or [`join`] since these primitives are
+/// implemented in a blocking manner, giving up the CPU until the event
+/// of interest has occurred which avoids repeated yielding.
+///
+/// `yield_now` should thus be used only rarely, mostly in situations where
+/// repeated polling is required because there is no other suitable way to
+/// learn when an event of interest has occurred.
+///
+/// # Examples
+///
+/// ```
+/// use std::thread;
+///
+/// thread::yield_now();
+/// ```
+///
+/// [`channel`]: crate::sync::mpsc
+/// [`join`]: JoinHandle::join
+/// [`Condvar`]: crate::sync::Condvar
+/// [`Mutex`]: crate::sync::Mutex
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn yield_now() {
+ imp::Thread::yield_now()
+}
+
+/// Determines whether the current thread is unwinding because of panic.
+///
+/// A common use of this feature is to poison shared resources when writing
+/// unsafe code, by checking `panicking` when the `drop` is called.
+///
+/// This is usually not needed when writing safe code, as [`Mutex`es][Mutex]
+/// already poison themselves when a thread panics while holding the lock.
+///
+/// This can also be used in multithreaded applications, in order to send a
+/// message to other threads warning that a thread has panicked (e.g., for
+/// monitoring purposes).
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::thread;
+///
+/// struct SomeStruct;
+///
+/// impl Drop for SomeStruct {
+/// fn drop(&mut self) {
+/// if thread::panicking() {
+/// println!("dropped while unwinding");
+/// } else {
+/// println!("dropped while not unwinding");
+/// }
+/// }
+/// }
+///
+/// {
+/// print!("a: ");
+/// let a = SomeStruct;
+/// }
+///
+/// {
+/// print!("b: ");
+/// let b = SomeStruct;
+/// panic!()
+/// }
+/// ```
+///
+/// [Mutex]: crate::sync::Mutex
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn panicking() -> bool {
+ panicking::panicking()
+}
+
+/// Puts the current thread to sleep for at least the specified amount of time.
+///
+/// The thread may sleep longer than the duration specified due to scheduling
+/// specifics or platform-dependent functionality. It will never sleep less.
+///
+/// This function is blocking, and should not be used in `async` functions.
+///
+/// # Platform-specific behavior
+///
+/// On Unix platforms, the underlying syscall may be interrupted by a
+/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
+/// the specified duration, this function may invoke that system call multiple
+/// times.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::thread;
+///
+/// // Let's sleep for 2 seconds:
+/// thread::sleep_ms(2000);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.6.0", note = "replaced by `std::thread::sleep`")]
+pub fn sleep_ms(ms: u32) {
+ sleep(Duration::from_millis(ms as u64))
+}
+
+/// Puts the current thread to sleep for at least the specified amount of time.
+///
+/// The thread may sleep longer than the duration specified due to scheduling
+/// specifics or platform-dependent functionality. It will never sleep less.
+///
+/// This function is blocking, and should not be used in `async` functions.
+///
+/// # Platform-specific behavior
+///
+/// On Unix platforms, the underlying syscall may be interrupted by a
+/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
+/// the specified duration, this function may invoke that system call multiple
+/// times.
+/// Platforms which do not support nanosecond precision for sleeping will
+/// have `dur` rounded up to the nearest granularity of time they can sleep for.
+///
+/// Currently, specifying a zero duration on Unix platforms returns immediately
+/// without invoking the underlying [`nanosleep`] syscall, whereas on Windows
+/// platforms the underlying [`Sleep`] syscall is always invoked.
+/// If the intention is to yield the current time-slice you may want to use
+/// [`yield_now`] instead.
+///
+/// [`nanosleep`]: https://linux.die.net/man/2/nanosleep
+/// [`Sleep`]: https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-sleep
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::{thread, time};
+///
+/// let ten_millis = time::Duration::from_millis(10);
+/// let now = time::Instant::now();
+///
+/// thread::sleep(ten_millis);
+///
+/// assert!(now.elapsed() >= ten_millis);
+/// ```
+#[stable(feature = "thread_sleep", since = "1.4.0")]
+pub fn sleep(dur: Duration) {
+ imp::Thread::sleep(dur)
+}
+
+/// Blocks unless or until the current thread's token is made available.
+///
+/// A call to `park` does not guarantee that the thread will remain parked
+/// forever, and callers should be prepared for this possibility.
+///
+/// # park and unpark
+///
+/// Every thread is equipped with some basic low-level blocking support, via the
+/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`]
+/// method. [`park`] blocks the current thread, which can then be resumed from
+/// another thread by calling the [`unpark`] method on the blocked thread's
+/// handle.
+///
+/// Conceptually, each [`Thread`] handle has an associated token, which is
+/// initially not present:
+///
+/// * The [`thread::park`][`park`] function blocks the current thread unless or
+/// until the token is available for its thread handle, at which point it
+/// atomically consumes the token. It may also return *spuriously*, without
+/// consuming the token. [`thread::park_timeout`] does the same, but allows
+/// specifying a maximum time to block the thread for.
+///
+/// * The [`unpark`] method on a [`Thread`] atomically makes the token available
+/// if it wasn't already. Because the token is initially absent, [`unpark`]
+/// followed by [`park`] will result in the second call returning immediately.
+///
+/// In other words, each [`Thread`] acts a bit like a spinlock that can be
+/// locked and unlocked using `park` and `unpark`.
+///
+/// Notice that being unblocked does not imply any synchronization with someone
+/// that unparked this thread, it could also be spurious.
+/// For example, it would be a valid, but inefficient, implementation to make both [`park`] and
+/// [`unpark`] return immediately without doing anything.
+///
+/// The API is typically used by acquiring a handle to the current thread,
+/// placing that handle in a shared data structure so that other threads can
+/// find it, and then `park`ing in a loop. When some desired condition is met, another
+/// thread calls [`unpark`] on the handle.
+///
+/// The motivation for this design is twofold:
+///
+/// * It avoids the need to allocate mutexes and condvars when building new
+/// synchronization primitives; the threads already provide basic
+/// blocking/signaling.
+///
+/// * It can be implemented very efficiently on many platforms.
+///
+/// # Examples
+///
+/// ```
+/// use std::thread;
+/// use std::sync::{Arc, atomic::{Ordering, AtomicBool}};
+/// use std::time::Duration;
+///
+/// let flag = Arc::new(AtomicBool::new(false));
+/// let flag2 = Arc::clone(&flag);
+///
+/// let parked_thread = thread::spawn(move || {
+/// // We want to wait until the flag is set. We *could* just spin, but using
+/// // park/unpark is more efficient.
+/// while !flag2.load(Ordering::Acquire) {
+/// println!("Parking thread");
+/// thread::park();
+/// // We *could* get here spuriously, i.e., way before the 10ms below are over!
+/// // But that is no problem, we are in a loop until the flag is set anyway.
+/// println!("Thread unparked");
+/// }
+/// println!("Flag received");
+/// });
+///
+/// // Let some time pass for the thread to be spawned.
+/// thread::sleep(Duration::from_millis(10));
+///
+/// // Set the flag, and let the thread wake up.
+/// // There is no race condition here, if `unpark`
+/// // happens first, `park` will return immediately.
+/// // Hence there is no risk of a deadlock.
+/// flag.store(true, Ordering::Release);
+/// println!("Unpark the thread");
+/// parked_thread.thread().unpark();
+///
+/// parked_thread.join().unwrap();
+/// ```
+///
+/// [`unpark`]: Thread::unpark
+/// [`thread::park_timeout`]: park_timeout
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn park() {
+ // SAFETY: park_timeout is called on the parker owned by this thread.
+ unsafe {
+ current().inner.as_ref().parker().park();
+ }
+}
+
+/// Use [`park_timeout`].
+///
+/// Blocks unless or until the current thread's token is made available or
+/// the specified duration has been reached (may wake spuriously).
+///
+/// The semantics of this function are equivalent to [`park`] except
+/// that the thread will be blocked for roughly no longer than `dur`. This
+/// method should not be used for precise timing due to anomalies such as
+/// preemption or platform differences that might not cause the maximum
+/// amount of time waited to be precisely `ms` long.
+///
+/// See the [park documentation][`park`] for more detail.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[deprecated(since = "1.6.0", note = "replaced by `std::thread::park_timeout`")]
+pub fn park_timeout_ms(ms: u32) {
+ park_timeout(Duration::from_millis(ms as u64))
+}
+
+/// Blocks unless or until the current thread's token is made available or
+/// the specified duration has been reached (may wake spuriously).
+///
+/// The semantics of this function are equivalent to [`park`][park] except
+/// that the thread will be blocked for roughly no longer than `dur`. This
+/// method should not be used for precise timing due to anomalies such as
+/// preemption or platform differences that might not cause the maximum
+/// amount of time waited to be precisely `dur` long.
+///
+/// See the [park documentation][park] for more details.
+///
+/// # Platform-specific behavior
+///
+/// Platforms which do not support nanosecond precision for sleeping will have
+/// `dur` rounded up to the nearest granularity of time they can sleep for.
+///
+/// # Examples
+///
+/// Waiting for the complete expiration of the timeout:
+///
+/// ```rust,no_run
+/// use std::thread::park_timeout;
+/// use std::time::{Instant, Duration};
+///
+/// let timeout = Duration::from_secs(2);
+/// let beginning_park = Instant::now();
+///
+/// let mut timeout_remaining = timeout;
+/// loop {
+/// park_timeout(timeout_remaining);
+/// let elapsed = beginning_park.elapsed();
+/// if elapsed >= timeout {
+/// break;
+/// }
+/// println!("restarting park_timeout after {elapsed:?}");
+/// timeout_remaining = timeout - elapsed;
+/// }
+/// ```
+#[stable(feature = "park_timeout", since = "1.4.0")]
+pub fn park_timeout(dur: Duration) {
+ // SAFETY: park_timeout is called on the parker owned by this thread.
+ unsafe {
+ current().inner.as_ref().parker().park_timeout(dur);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ThreadId
+////////////////////////////////////////////////////////////////////////////////
+
+/// A unique identifier for a running thread.
+///
+/// A `ThreadId` is an opaque object that uniquely identifies each thread
+/// created during the lifetime of a process. `ThreadId`s are guaranteed not to
+/// be reused, even when a thread terminates. `ThreadId`s are under the control
+/// of Rust's standard library and there may not be any relationship between
+/// `ThreadId` and the underlying platform's notion of a thread identifier --
+/// the two concepts cannot, therefore, be used interchangeably. A `ThreadId`
+/// can be retrieved from the [`id`] method on a [`Thread`].
+///
+/// # Examples
+///
+/// ```
+/// use std::thread;
+///
+/// let other_thread = thread::spawn(|| {
+/// thread::current().id()
+/// });
+///
+/// let other_thread_id = other_thread.join().unwrap();
+/// assert!(thread::current().id() != other_thread_id);
+/// ```
+///
+/// [`id`]: Thread::id
+#[stable(feature = "thread_id", since = "1.19.0")]
+#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
+pub struct ThreadId(NonZeroU64);
+
+impl ThreadId {
+ // Generate a new unique thread ID.
+ fn new() -> ThreadId {
+ // It is UB to attempt to acquire this mutex reentrantly!
+ static GUARD: mutex::StaticMutex = mutex::StaticMutex::new();
+ static mut COUNTER: u64 = 1;
+
+ unsafe {
+ let guard = GUARD.lock();
+
+ // If we somehow use up all our bits, panic so that we're not
+ // covering up subtle bugs of IDs being reused.
+ if COUNTER == u64::MAX {
+ drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire.
+ panic!("failed to generate unique thread ID: bitspace exhausted");
+ }
+
+ let id = COUNTER;
+ COUNTER += 1;
+
+ ThreadId(NonZeroU64::new(id).unwrap())
+ }
+ }
+
+ /// This returns a numeric identifier for the thread identified by this
+ /// `ThreadId`.
+ ///
+ /// As noted in the documentation for the type itself, it is essentially an
+ /// opaque ID, but is guaranteed to be unique for each thread. The returned
+ /// value is entirely opaque -- only equality testing is stable. Note that
+ /// it is not guaranteed which values new threads will return, and this may
+ /// change across Rust versions.
+ #[must_use]
+ #[unstable(feature = "thread_id_value", issue = "67939")]
+ pub fn as_u64(&self) -> NonZeroU64 {
+ self.0
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Thread
+////////////////////////////////////////////////////////////////////////////////
+
+/// The internal representation of a `Thread` handle
+struct Inner {
+ name: Option<CString>, // Guaranteed to be UTF-8
+ id: ThreadId,
+ parker: Parker,
+}
+
+impl Inner {
+ fn parker(self: Pin<&Self>) -> Pin<&Parker> {
+ unsafe { Pin::map_unchecked(self, |inner| &inner.parker) }
+ }
+}
+
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+/// A handle to a thread.
+///
+/// Threads are represented via the `Thread` type, which you can get in one of
+/// two ways:
+///
+/// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
+/// function, and calling [`thread`][`JoinHandle::thread`] on the
+/// [`JoinHandle`].
+/// * By requesting the current thread, using the [`thread::current`] function.
+///
+/// The [`thread::current`] function is available even for threads not spawned
+/// by the APIs of this module.
+///
+/// There is usually no need to create a `Thread` struct yourself, one
+/// should instead use a function like `spawn` to create new threads, see the
+/// docs of [`Builder`] and [`spawn`] for more details.
+///
+/// [`thread::current`]: current
+pub struct Thread {
+ inner: Pin<Arc<Inner>>,
+}
+
+impl Thread {
+ // Used only internally to construct a thread object without spawning
+ // Panics if the name contains nuls.
+ pub(crate) fn new(name: Option<CString>) -> Thread {
+ // We have to use `unsafe` here to construct the `Parker` in-place,
+ // which is required for the UNIX implementation.
+ //
+ // SAFETY: We pin the Arc immediately after creation, so its address never
+ // changes.
+ let inner = unsafe {
+ let mut arc = Arc::<Inner>::new_uninit();
+ let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr();
+ addr_of_mut!((*ptr).name).write(name);
+ addr_of_mut!((*ptr).id).write(ThreadId::new());
+ Parker::new(addr_of_mut!((*ptr).parker));
+ Pin::new_unchecked(arc.assume_init())
+ };
+
+ Thread { inner }
+ }
+
+ /// Atomically makes the handle's token available if it is not already.
+ ///
+ /// Every thread is equipped with some basic low-level blocking support, via
+ /// the [`park`][park] function and the `unpark()` method. These can be
+ /// used as a more CPU-efficient implementation of a spinlock.
+ ///
+ /// See the [park documentation][park] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let parked_thread = thread::Builder::new()
+ /// .spawn(|| {
+ /// println!("Parking thread");
+ /// thread::park();
+ /// println!("Thread unparked");
+ /// })
+ /// .unwrap();
+ ///
+ /// // Let some time pass for the thread to be spawned.
+ /// thread::sleep(Duration::from_millis(10));
+ ///
+ /// println!("Unpark the thread");
+ /// parked_thread.thread().unpark();
+ ///
+ /// parked_thread.join().unwrap();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn unpark(&self) {
+ self.inner.as_ref().parker().unpark();
+ }
+
+ /// Gets the thread's unique identifier.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let other_thread = thread::spawn(|| {
+ /// thread::current().id()
+ /// });
+ ///
+ /// let other_thread_id = other_thread.join().unwrap();
+ /// assert!(thread::current().id() != other_thread_id);
+ /// ```
+ #[stable(feature = "thread_id", since = "1.19.0")]
+ #[must_use]
+ pub fn id(&self) -> ThreadId {
+ self.inner.id
+ }
+
+ /// Gets the thread's name.
+ ///
+ /// For more information about named threads, see
+ /// [this module-level documentation][naming-threads].
+ ///
+ /// # Examples
+ ///
+ /// Threads by default have no name specified:
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new();
+ ///
+ /// let handler = builder.spawn(|| {
+ /// assert!(thread::current().name().is_none());
+ /// }).unwrap();
+ ///
+ /// handler.join().unwrap();
+ /// ```
+ ///
+ /// Thread with a specified name:
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new()
+ /// .name("foo".into());
+ ///
+ /// let handler = builder.spawn(|| {
+ /// assert_eq!(thread::current().name(), Some("foo"))
+ /// }).unwrap();
+ ///
+ /// handler.join().unwrap();
+ /// ```
+ ///
+ /// [naming-threads]: ./index.html#naming-threads
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn name(&self) -> Option<&str> {
+ self.cname().map(|s| unsafe { str::from_utf8_unchecked(s.to_bytes()) })
+ }
+
+ fn cname(&self) -> Option<&CStr> {
+ self.inner.name.as_deref()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for Thread {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Thread")
+ .field("id", &self.id())
+ .field("name", &self.name())
+ .finish_non_exhaustive()
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// JoinHandle
+////////////////////////////////////////////////////////////////////////////////
+
+/// A specialized [`Result`] type for threads.
+///
+/// Indicates the manner in which a thread exited.
+///
+/// The value contained in the `Result::Err` variant
+/// is the value the thread panicked with;
+/// that is, the argument the `panic!` macro was called with.
+/// Unlike with normal errors, this value doesn't implement
+/// the [`Error`](crate::error::Error) trait.
+///
+/// Thus, a sensible way to handle a thread panic is to either:
+///
+/// 1. propagate the panic with [`std::panic::resume_unwind`]
+/// 2. or in case the thread is intended to be a subsystem boundary
+/// that is supposed to isolate system-level failures,
+/// match on the `Err` variant and handle the panic in an appropriate way
+///
+/// A thread that completes without panicking is considered to exit successfully.
+///
+/// # Examples
+///
+/// Matching on the result of a joined thread:
+///
+/// ```no_run
+/// use std::{fs, thread, panic};
+///
+/// fn copy_in_thread() -> thread::Result<()> {
+/// thread::spawn(|| {
+/// fs::copy("foo.txt", "bar.txt").unwrap();
+/// }).join()
+/// }
+///
+/// fn main() {
+/// match copy_in_thread() {
+/// Ok(_) => println!("copy succeeded"),
+/// Err(e) => panic::resume_unwind(e),
+/// }
+/// }
+/// ```
+///
+/// [`Result`]: crate::result::Result
+/// [`std::panic::resume_unwind`]: crate::panic::resume_unwind
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type Result<T> = crate::result::Result<T, Box<dyn Any + Send + 'static>>;
+
+// This packet is used to communicate the return value between the spawned
+// thread and the rest of the program. It is shared through an `Arc` and
+// there's no need for a mutex here because synchronization happens with `join()`
+// (the caller will never read this packet until the thread has exited).
+//
+// An Arc to the packet is stored into a `JoinInner` which in turns is placed
+// in `JoinHandle`.
+struct Packet<'scope, T> {
+ scope: Option<Arc<scoped::ScopeData>>,
+ result: UnsafeCell<Option<Result<T>>>,
+ _marker: PhantomData<Option<&'scope scoped::ScopeData>>,
+}
+
+// Due to the usage of `UnsafeCell` we need to manually implement Sync.
+// The type `T` should already always be Send (otherwise the thread could not
+// have been created) and the Packet is Sync because all access to the
+// `UnsafeCell` synchronized (by the `join()` boundary), and `ScopeData` is Sync.
+unsafe impl<'scope, T: Sync> Sync for Packet<'scope, T> {}
+
+impl<'scope, T> Drop for Packet<'scope, T> {
+ fn drop(&mut self) {
+ // If this packet was for a thread that ran in a scope, the thread
+ // panicked, and nobody consumed the panic payload, we make sure
+ // the scope function will panic.
+ let unhandled_panic = matches!(self.result.get_mut(), Some(Err(_)));
+ // Drop the result without causing unwinding.
+ // This is only relevant for threads that aren't join()ed, as
+ // join() will take the `result` and set it to None, such that
+ // there is nothing left to drop here.
+ // If this panics, we should handle that, because we're outside the
+ // outermost `catch_unwind` of our thread.
+ // We just abort in that case, since there's nothing else we can do.
+ // (And even if we tried to handle it somehow, we'd also need to handle
+ // the case where the panic payload we get out of it also panics on
+ // drop, and so on. See issue #86027.)
+ if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ *self.result.get_mut() = None;
+ })) {
+ rtabort!("thread result panicked on drop");
+ }
+ // Book-keeping so the scope knows when it's done.
+ if let Some(scope) = &self.scope {
+ // Now that there will be no more user code running on this thread
+ // that can use 'scope, mark the thread as 'finished'.
+ // It's important we only do this after the `result` has been dropped,
+ // since dropping it might still use things it borrowed from 'scope.
+ scope.decrement_num_running_threads(unhandled_panic);
+ }
+ }
+}
+
+/// Inner representation for JoinHandle
+struct JoinInner<'scope, T> {
+ native: imp::Thread,
+ thread: Thread,
+ packet: Arc<Packet<'scope, T>>,
+}
+
+impl<'scope, T> JoinInner<'scope, T> {
+ fn join(mut self) -> Result<T> {
+ self.native.join();
+ Arc::get_mut(&mut self.packet).unwrap().result.get_mut().take().unwrap()
+ }
+}
+
+/// An owned permission to join on a thread (block on its termination).
+///
+/// A `JoinHandle` *detaches* the associated thread when it is dropped, which
+/// means that there is no longer any handle to the thread and no way to `join`
+/// on it.
+///
+/// Due to platform restrictions, it is not possible to [`Clone`] this
+/// handle: the ability to join a thread is a uniquely-owned permission.
+///
+/// This `struct` is created by the [`thread::spawn`] function and the
+/// [`thread::Builder::spawn`] method.
+///
+/// # Examples
+///
+/// Creation from [`thread::spawn`]:
+///
+/// ```
+/// use std::thread;
+///
+/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| {
+/// // some work here
+/// });
+/// ```
+///
+/// Creation from [`thread::Builder::spawn`]:
+///
+/// ```
+/// use std::thread;
+///
+/// let builder = thread::Builder::new();
+///
+/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
+/// // some work here
+/// }).unwrap();
+/// ```
+///
+/// A thread being detached and outliving the thread that spawned it:
+///
+/// ```no_run
+/// use std::thread;
+/// use std::time::Duration;
+///
+/// let original_thread = thread::spawn(|| {
+/// let _detached_thread = thread::spawn(|| {
+/// // Here we sleep to make sure that the first thread returns before.
+/// thread::sleep(Duration::from_millis(10));
+/// // This will be called, even though the JoinHandle is dropped.
+/// println!("♫ Still alive ♫");
+/// });
+/// });
+///
+/// original_thread.join().expect("The thread being joined has panicked");
+/// println!("Original thread is joined.");
+///
+/// // We make sure that the new thread has time to run, before the main
+/// // thread returns.
+///
+/// thread::sleep(Duration::from_millis(1000));
+/// ```
+///
+/// [`thread::Builder::spawn`]: Builder::spawn
+/// [`thread::spawn`]: spawn
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct JoinHandle<T>(JoinInner<'static, T>);
+
+#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
+unsafe impl<T> Send for JoinHandle<T> {}
+#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
+unsafe impl<T> Sync for JoinHandle<T> {}
+
+impl<T> JoinHandle<T> {
+ /// Extracts a handle to the underlying thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new();
+ ///
+ /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
+ /// // some work here
+ /// }).unwrap();
+ ///
+ /// let thread = join_handle.thread();
+ /// println!("thread id: {:?}", thread.id());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn thread(&self) -> &Thread {
+ &self.0.thread
+ }
+
+ /// Waits for the associated thread to finish.
+ ///
+ /// This function will return immediately if the associated thread has already finished.
+ ///
+ /// In terms of [atomic memory orderings], the completion of the associated
+ /// thread synchronizes with this function returning. In other words, all
+ /// operations performed by that thread [happen
+ /// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all
+ /// operations that happen after `join` returns.
+ ///
+ /// If the associated thread panics, [`Err`] is returned with the parameter given
+ /// to [`panic!`].
+ ///
+ /// [`Err`]: crate::result::Result::Err
+ /// [atomic memory orderings]: crate::sync::atomic
+ ///
+ /// # Panics
+ ///
+ /// This function may panic on some platforms if a thread attempts to join
+ /// itself or otherwise may create a deadlock with joining threads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let builder = thread::Builder::new();
+ ///
+ /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
+ /// // some work here
+ /// }).unwrap();
+ /// join_handle.join().expect("Couldn't join on the associated thread");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn join(self) -> Result<T> {
+ self.0.join()
+ }
+
+ /// Checks if the associated thread has finished running its main function.
+ ///
+ /// `is_finished` supports implementing a non-blocking join operation, by checking
+ /// `is_finished`, and calling `join` if it returns `true`. This function does not block. To
+ /// block while waiting on the thread to finish, use [`join`][Self::join].
+ ///
+ /// This might return `true` for a brief moment after the thread's main
+ /// function has returned, but before the thread itself has stopped running.
+ /// However, once this returns `true`, [`join`][Self::join] can be expected
+ /// to return quickly, without blocking for any significant amount of time.
+ #[stable(feature = "thread_is_running", since = "1.61.0")]
+ pub fn is_finished(&self) -> bool {
+ Arc::strong_count(&self.0.packet) == 1
+ }
+}
+
+impl<T> AsInner<imp::Thread> for JoinHandle<T> {
+ fn as_inner(&self) -> &imp::Thread {
+ &self.0.native
+ }
+}
+
+impl<T> IntoInner<imp::Thread> for JoinHandle<T> {
+ fn into_inner(self) -> imp::Thread {
+ self.0.native
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl<T> fmt::Debug for JoinHandle<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("JoinHandle").finish_non_exhaustive()
+ }
+}
+
+fn _assert_sync_and_send() {
+ fn _assert_both<T: Send + Sync>() {}
+ _assert_both::<JoinHandle<()>>();
+ _assert_both::<Thread>();
+}
+
+/// Returns an estimate of the default amount of parallelism a program should use.
+///
+/// Parallelism is a resource. A given machine provides a certain capacity for
+/// parallelism, i.e., a bound on the number of computations it can perform
+/// simultaneously. This number often corresponds to the amount of CPUs a
+/// computer has, but it may diverge in various cases.
+///
+/// Host environments such as VMs or container orchestrators may want to
+/// restrict the amount of parallelism made available to programs in them. This
+/// is often done to limit the potential impact of (unintentionally)
+/// resource-intensive programs on other programs running on the same machine.
+///
+/// # Limitations
+///
+/// The purpose of this API is to provide an easy and portable way to query
+/// the default amount of parallelism the program should use. Among other things it
+/// does not expose information on NUMA regions, does not account for
+/// differences in (co)processor capabilities or current system load,
+/// and will not modify the program's global state in order to more accurately
+/// query the amount of available parallelism.
+///
+/// Where both fixed steady-state and burst limits are available the steady-state
+/// capacity will be used to ensure more predictable latencies.
+///
+/// Resource limits can be changed during the runtime of a program, therefore the value is
+/// not cached and instead recomputed every time this function is called. It should not be
+/// called from hot code.
+///
+/// The value returned by this function should be considered a simplified
+/// approximation of the actual amount of parallelism available at any given
+/// time. To get a more detailed or precise overview of the amount of
+/// parallelism available to the program, you may wish to use
+/// platform-specific APIs as well. The following platform limitations currently
+/// apply to `available_parallelism`:
+///
+/// On Windows:
+/// - It may undercount the amount of parallelism available on systems with more
+/// than 64 logical CPUs. However, programs typically need specific support to
+/// take advantage of more than 64 logical CPUs, and in the absence of such
+/// support, the number returned by this function accurately reflects the
+/// number of logical CPUs the program can use by default.
+/// - It may overcount the amount of parallelism available on systems limited by
+/// process-wide affinity masks, or job object limitations.
+///
+/// On Linux:
+/// - It may overcount the amount of parallelism available when limited by a
+/// process-wide affinity mask or cgroup quotas and `sched_getaffinity()` or cgroup fs can't be
+/// queried, e.g. due to sandboxing.
+/// - It may undercount the amount of parallelism if the current thread's affinity mask
+/// does not reflect the process' cpuset, e.g. due to pinned threads.
+/// - If the process is in a cgroup v1 cpu controller, this may need to
+/// scan mountpoints to find the corresponding cgroup v1 controller,
+/// which may take time on systems with large numbers of mountpoints.
+/// (This does not apply to cgroup v2, or to processes not in a
+/// cgroup.)
+///
+/// On all targets:
+/// - It may overcount the amount of parallelism available when running in a VM
+/// with CPU usage limits (e.g. an overcommitted host).
+///
+/// # Errors
+///
+/// This function will, but is not limited to, return errors in the following
+/// cases:
+///
+/// - If the amount of parallelism is not known for the target platform.
+/// - If the program lacks permission to query the amount of parallelism made
+/// available to it.
+///
+/// # Examples
+///
+/// ```
+/// # #![allow(dead_code)]
+/// use std::{io, thread};
+///
+/// fn main() -> io::Result<()> {
+/// let count = thread::available_parallelism()?.get();
+/// assert!(count >= 1_usize);
+/// Ok(())
+/// }
+/// ```
+#[doc(alias = "available_concurrency")] // Alias for a previous name we gave this API on unstable.
+#[doc(alias = "hardware_concurrency")] // Alias for C++ `std::thread::hardware_concurrency`.
+#[doc(alias = "num_cpus")] // Alias for a popular ecosystem crate which provides similar functionality.
+#[stable(feature = "available_parallelism", since = "1.59.0")]
+pub fn available_parallelism() -> io::Result<NonZeroUsize> {
+ imp::available_parallelism()
+}
diff --git a/library/std/src/thread/scoped.rs b/library/std/src/thread/scoped.rs
new file mode 100644
index 000000000..e6dbf35bd
--- /dev/null
+++ b/library/std/src/thread/scoped.rs
@@ -0,0 +1,343 @@
+use super::{current, park, Builder, JoinInner, Result, Thread};
+use crate::fmt;
+use crate::io;
+use crate::marker::PhantomData;
+use crate::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use crate::sync::Arc;
+
+/// A scope to spawn scoped threads in.
+///
+/// See [`scope`] for details.
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+pub struct Scope<'scope, 'env: 'scope> {
+ data: Arc<ScopeData>,
+ /// Invariance over 'scope, to make sure 'scope cannot shrink,
+ /// which is necessary for soundness.
+ ///
+ /// Without invariance, this would compile fine but be unsound:
+ ///
+ /// ```compile_fail,E0373
+ /// std::thread::scope(|s| {
+ /// s.spawn(|| {
+ /// let a = String::from("abcd");
+ /// s.spawn(|| println!("{a:?}")); // might run after `a` is dropped
+ /// });
+ /// });
+ /// ```
+ scope: PhantomData<&'scope mut &'scope ()>,
+ env: PhantomData<&'env mut &'env ()>,
+}
+
+/// An owned permission to join on a scoped thread (block on its termination).
+///
+/// See [`Scope::spawn`] for details.
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+pub struct ScopedJoinHandle<'scope, T>(JoinInner<'scope, T>);
+
+pub(super) struct ScopeData {
+ num_running_threads: AtomicUsize,
+ a_thread_panicked: AtomicBool,
+ main_thread: Thread,
+}
+
+impl ScopeData {
+ pub(super) fn increment_num_running_threads(&self) {
+ // We check for 'overflow' with usize::MAX / 2, to make sure there's no
+ // chance it overflows to 0, which would result in unsoundness.
+ if self.num_running_threads.fetch_add(1, Ordering::Relaxed) > usize::MAX / 2 {
+ // This can only reasonably happen by mem::forget()'ing many many ScopedJoinHandles.
+ self.decrement_num_running_threads(false);
+ panic!("too many running threads in thread scope");
+ }
+ }
+ pub(super) fn decrement_num_running_threads(&self, panic: bool) {
+ if panic {
+ self.a_thread_panicked.store(true, Ordering::Relaxed);
+ }
+ if self.num_running_threads.fetch_sub(1, Ordering::Release) == 1 {
+ self.main_thread.unpark();
+ }
+ }
+}
+
+/// Create a scope for spawning scoped threads.
+///
+/// The function passed to `scope` will be provided a [`Scope`] object,
+/// through which scoped threads can be [spawned][`Scope::spawn`].
+///
+/// Unlike non-scoped threads, scoped threads can borrow non-`'static` data,
+/// as the scope guarantees all threads will be joined at the end of the scope.
+///
+/// All threads spawned within the scope that haven't been manually joined
+/// will be automatically joined before this function returns.
+///
+/// # Panics
+///
+/// If any of the automatically joined threads panicked, this function will panic.
+///
+/// If you want to handle panics from spawned threads,
+/// [`join`][ScopedJoinHandle::join] them before the end of the scope.
+///
+/// # Example
+///
+/// ```
+/// use std::thread;
+///
+/// let mut a = vec![1, 2, 3];
+/// let mut x = 0;
+///
+/// thread::scope(|s| {
+/// s.spawn(|| {
+/// println!("hello from the first scoped thread");
+/// // We can borrow `a` here.
+/// dbg!(&a);
+/// });
+/// s.spawn(|| {
+/// println!("hello from the second scoped thread");
+/// // We can even mutably borrow `x` here,
+/// // because no other threads are using it.
+/// x += a[0] + a[2];
+/// });
+/// println!("hello from the main thread");
+/// });
+///
+/// // After the scope, we can modify and access our variables again:
+/// a.push(4);
+/// assert_eq!(x, a.len());
+/// ```
+///
+/// # Lifetimes
+///
+/// Scoped threads involve two lifetimes: `'scope` and `'env`.
+///
+/// The `'scope` lifetime represents the lifetime of the scope itself.
+/// That is: the time during which new scoped threads may be spawned,
+/// and also the time during which they might still be running.
+/// Once this lifetime ends, all scoped threads are joined.
+/// This lifetime starts within the `scope` function, before `f` (the argument to `scope`) starts.
+/// It ends after `f` returns and all scoped threads have been joined, but before `scope` returns.
+///
+/// The `'env` lifetime represents the lifetime of whatever is borrowed by the scoped threads.
+/// This lifetime must outlast the call to `scope`, and thus cannot be smaller than `'scope`.
+/// It can be as small as the call to `scope`, meaning that anything that outlives this call,
+/// such as local variables defined right before the scope, can be borrowed by the scoped threads.
+///
+/// The `'env: 'scope` bound is part of the definition of the `Scope` type.
+#[track_caller]
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+pub fn scope<'env, F, T>(f: F) -> T
+where
+ F: for<'scope> FnOnce(&'scope Scope<'scope, 'env>) -> T,
+{
+ // We put the `ScopeData` into an `Arc` so that other threads can finish their
+ // `decrement_num_running_threads` even after this function returns.
+ let scope = Scope {
+ data: Arc::new(ScopeData {
+ num_running_threads: AtomicUsize::new(0),
+ main_thread: current(),
+ a_thread_panicked: AtomicBool::new(false),
+ }),
+ env: PhantomData,
+ scope: PhantomData,
+ };
+
+ // Run `f`, but catch panics so we can make sure to wait for all the threads to join.
+ let result = catch_unwind(AssertUnwindSafe(|| f(&scope)));
+
+ // Wait until all the threads are finished.
+ while scope.data.num_running_threads.load(Ordering::Acquire) != 0 {
+ park();
+ }
+
+ // Throw any panic from `f`, or the return value of `f` if no thread panicked.
+ match result {
+ Err(e) => resume_unwind(e),
+ Ok(_) if scope.data.a_thread_panicked.load(Ordering::Relaxed) => {
+ panic!("a scoped thread panicked")
+ }
+ Ok(result) => result,
+ }
+}
+
+impl<'scope, 'env> Scope<'scope, 'env> {
+ /// Spawns a new thread within a scope, returning a [`ScopedJoinHandle`] for it.
+ ///
+ /// Unlike non-scoped threads, threads spawned with this function may
+ /// borrow non-`'static` data from the outside the scope. See [`scope`] for
+ /// details.
+ ///
+ /// The join handle provides a [`join`] method that can be used to join the spawned
+ /// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
+ /// the panic payload.
+ ///
+ /// If the join handle is dropped, the spawned thread will implicitly joined at the
+ /// end of the scope. In that case, if the spawned thread panics, [`scope`] will
+ /// panic after all threads are joined.
+ ///
+ /// This call will create a thread using default parameters of [`Builder`].
+ /// If you want to specify the stack size or the name of the thread, use
+ /// [`Builder::spawn_scoped`] instead.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the OS fails to create a thread; use [`Builder::spawn_scoped`]
+ /// to recover from such errors.
+ ///
+ /// [`join`]: ScopedJoinHandle::join
+ #[stable(feature = "scoped_threads", since = "1.63.0")]
+ pub fn spawn<F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T>
+ where
+ F: FnOnce() -> T + Send + 'scope,
+ T: Send + 'scope,
+ {
+ Builder::new().spawn_scoped(self, f).expect("failed to spawn thread")
+ }
+}
+
+impl Builder {
+ /// Spawns a new scoped thread using the settings set through this `Builder`.
+ ///
+ /// Unlike [`Scope::spawn`], this method yields an [`io::Result`] to
+ /// capture any failure to create the thread at the OS level.
+ ///
+ /// [`io::Result`]: crate::io::Result
+ ///
+ /// # Panics
+ ///
+ /// Panics if a thread name was set and it contained null bytes.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// let mut a = vec![1, 2, 3];
+ /// let mut x = 0;
+ ///
+ /// thread::scope(|s| {
+ /// thread::Builder::new()
+ /// .name("first".to_string())
+ /// .spawn_scoped(s, ||
+ /// {
+ /// println!("hello from the {:?} scoped thread", thread::current().name());
+ /// // We can borrow `a` here.
+ /// dbg!(&a);
+ /// })
+ /// .unwrap();
+ /// thread::Builder::new()
+ /// .name("second".to_string())
+ /// .spawn_scoped(s, ||
+ /// {
+ /// println!("hello from the {:?} scoped thread", thread::current().name());
+ /// // We can even mutably borrow `x` here,
+ /// // because no other threads are using it.
+ /// x += a[0] + a[2];
+ /// })
+ /// .unwrap();
+ /// println!("hello from the main thread");
+ /// });
+ ///
+ /// // After the scope, we can modify and access our variables again:
+ /// a.push(4);
+ /// assert_eq!(x, a.len());
+ /// ```
+ #[stable(feature = "scoped_threads", since = "1.63.0")]
+ pub fn spawn_scoped<'scope, 'env, F, T>(
+ self,
+ scope: &'scope Scope<'scope, 'env>,
+ f: F,
+ ) -> io::Result<ScopedJoinHandle<'scope, T>>
+ where
+ F: FnOnce() -> T + Send + 'scope,
+ T: Send + 'scope,
+ {
+ Ok(ScopedJoinHandle(unsafe { self.spawn_unchecked_(f, Some(scope.data.clone())) }?))
+ }
+}
+
+impl<'scope, T> ScopedJoinHandle<'scope, T> {
+ /// Extracts a handle to the underlying thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// thread::scope(|s| {
+ /// let t = s.spawn(|| {
+ /// println!("hello");
+ /// });
+ /// println!("thread id: {:?}", t.thread().id());
+ /// });
+ /// ```
+ #[must_use]
+ #[stable(feature = "scoped_threads", since = "1.63.0")]
+ pub fn thread(&self) -> &Thread {
+ &self.0.thread
+ }
+
+ /// Waits for the associated thread to finish.
+ ///
+ /// This function will return immediately if the associated thread has already finished.
+ ///
+ /// In terms of [atomic memory orderings], the completion of the associated
+ /// thread synchronizes with this function returning.
+ /// In other words, all operations performed by that thread
+ /// [happen before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses)
+ /// all operations that happen after `join` returns.
+ ///
+ /// If the associated thread panics, [`Err`] is returned with the panic payload.
+ ///
+ /// [atomic memory orderings]: crate::sync::atomic
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// thread::scope(|s| {
+ /// let t = s.spawn(|| {
+ /// panic!("oh no");
+ /// });
+ /// assert!(t.join().is_err());
+ /// });
+ /// ```
+ #[stable(feature = "scoped_threads", since = "1.63.0")]
+ pub fn join(self) -> Result<T> {
+ self.0.join()
+ }
+
+ /// Checks if the associated thread has finished running its main function.
+ ///
+ /// `is_finished` supports implementing a non-blocking join operation, by checking
+ /// `is_finished`, and calling `join` if it returns `false`. This function does not block. To
+ /// block while waiting on the thread to finish, use [`join`][Self::join].
+ ///
+ /// This might return `true` for a brief moment after the thread's main
+ /// function has returned, but before the thread itself has stopped running.
+ /// However, once this returns `true`, [`join`][Self::join] can be expected
+ /// to return quickly, without blocking for any significant amount of time.
+ #[stable(feature = "scoped_threads", since = "1.63.0")]
+ pub fn is_finished(&self) -> bool {
+ Arc::strong_count(&self.0.packet) == 1
+ }
+}
+
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+impl fmt::Debug for Scope<'_, '_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Scope")
+ .field("num_running_threads", &self.data.num_running_threads.load(Ordering::Relaxed))
+ .field("a_thread_panicked", &self.data.a_thread_panicked.load(Ordering::Relaxed))
+ .field("main_thread", &self.data.main_thread)
+ .finish_non_exhaustive()
+ }
+}
+
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+impl<'scope, T> fmt::Debug for ScopedJoinHandle<'scope, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ScopedJoinHandle").finish_non_exhaustive()
+ }
+}
diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs
new file mode 100644
index 000000000..ec68b5291
--- /dev/null
+++ b/library/std/src/thread/tests.rs
@@ -0,0 +1,331 @@
+use super::Builder;
+use crate::any::Any;
+use crate::mem;
+use crate::panic::panic_any;
+use crate::result;
+use crate::sync::{
+ atomic::{AtomicBool, Ordering},
+ mpsc::{channel, Sender},
+ Arc, Barrier,
+};
+use crate::thread::{self, Scope, ThreadId};
+use crate::time::Duration;
+use crate::time::Instant;
+
+// !!! These tests are dangerous. If something is buggy, they will hang, !!!
+// !!! instead of exiting cleanly. This might wedge the buildbots. !!!
+
+#[test]
+fn test_unnamed_thread() {
+ thread::spawn(move || {
+ assert!(thread::current().name().is_none());
+ })
+ .join()
+ .ok()
+ .expect("thread panicked");
+}
+
+#[test]
+fn test_named_thread() {
+ Builder::new()
+ .name("ada lovelace".to_string())
+ .spawn(move || {
+ assert!(thread::current().name().unwrap() == "ada lovelace".to_string());
+ })
+ .unwrap()
+ .join()
+ .unwrap();
+}
+
+#[test]
+#[should_panic]
+fn test_invalid_named_thread() {
+ let _ = Builder::new().name("ada l\0velace".to_string()).spawn(|| {});
+}
+
+#[test]
+fn test_run_basic() {
+ let (tx, rx) = channel();
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ });
+ rx.recv().unwrap();
+}
+
+#[test]
+fn test_is_finished() {
+ let b = Arc::new(Barrier::new(2));
+ let t = thread::spawn({
+ let b = b.clone();
+ move || {
+ b.wait();
+ 1234
+ }
+ });
+
+ // Thread is definitely running here, since it's still waiting for the barrier.
+ assert_eq!(t.is_finished(), false);
+
+ // Unblock the barrier.
+ b.wait();
+
+ // Now check that t.is_finished() becomes true within a reasonable time.
+ let start = Instant::now();
+ while !t.is_finished() {
+ assert!(start.elapsed() < Duration::from_secs(2));
+ thread::sleep(Duration::from_millis(15));
+ }
+
+ // Joining the thread should not block for a significant time now.
+ let join_time = Instant::now();
+ assert_eq!(t.join().unwrap(), 1234);
+ assert!(join_time.elapsed() < Duration::from_secs(2));
+}
+
+#[test]
+fn test_join_panic() {
+ match thread::spawn(move || panic!()).join() {
+ result::Result::Err(_) => (),
+ result::Result::Ok(()) => panic!(),
+ }
+}
+
+#[test]
+fn test_spawn_sched() {
+ let (tx, rx) = channel();
+
+ fn f(i: i32, tx: Sender<()>) {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ if i == 0 {
+ tx.send(()).unwrap();
+ } else {
+ f(i - 1, tx);
+ }
+ });
+ }
+ f(10, tx);
+ rx.recv().unwrap();
+}
+
+#[test]
+fn test_spawn_sched_childs_on_default_sched() {
+ let (tx, rx) = channel();
+
+ thread::spawn(move || {
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ });
+ });
+
+ rx.recv().unwrap();
+}
+
+fn avoid_copying_the_body<F>(spawnfn: F)
+where
+ F: FnOnce(Box<dyn Fn() + Send>),
+{
+ let (tx, rx) = channel();
+
+ let x: Box<_> = Box::new(1);
+ let x_in_parent = (&*x) as *const i32 as usize;
+
+ spawnfn(Box::new(move || {
+ let x_in_child = (&*x) as *const i32 as usize;
+ tx.send(x_in_child).unwrap();
+ }));
+
+ let x_in_child = rx.recv().unwrap();
+ assert_eq!(x_in_parent, x_in_child);
+}
+
+#[test]
+fn test_avoid_copying_the_body_spawn() {
+ avoid_copying_the_body(|v| {
+ thread::spawn(move || v());
+ });
+}
+
+#[test]
+fn test_avoid_copying_the_body_thread_spawn() {
+ avoid_copying_the_body(|f| {
+ thread::spawn(move || {
+ f();
+ });
+ })
+}
+
+#[test]
+fn test_avoid_copying_the_body_join() {
+ avoid_copying_the_body(|f| {
+ let _ = thread::spawn(move || f()).join();
+ })
+}
+
+#[test]
+fn test_child_doesnt_ref_parent() {
+ // If the child refcounts the parent thread, this will stack overflow when
+ // climbing the thread tree to dereference each ancestor. (See #1789)
+ // (well, it would if the constant were 8000+ - I lowered it to be more
+ // valgrind-friendly. try this at home, instead..!)
+ const GENERATIONS: u32 = 16;
+ fn child_no(x: u32) -> Box<dyn Fn() + Send> {
+ return Box::new(move || {
+ if x < GENERATIONS {
+ thread::spawn(move || child_no(x + 1)());
+ }
+ });
+ }
+ thread::spawn(|| child_no(0)());
+}
+
+#[test]
+fn test_simple_newsched_spawn() {
+ thread::spawn(move || {});
+}
+
+#[test]
+fn test_try_panic_message_string_literal() {
+ match thread::spawn(move || {
+ panic!("static string");
+ })
+ .join()
+ {
+ Err(e) => {
+ type T = &'static str;
+ assert!(e.is::<T>());
+ assert_eq!(*e.downcast::<T>().unwrap(), "static string");
+ }
+ Ok(()) => panic!(),
+ }
+}
+
+#[test]
+fn test_try_panic_any_message_owned_str() {
+ match thread::spawn(move || {
+ panic_any("owned string".to_string());
+ })
+ .join()
+ {
+ Err(e) => {
+ type T = String;
+ assert!(e.is::<T>());
+ assert_eq!(*e.downcast::<T>().unwrap(), "owned string".to_string());
+ }
+ Ok(()) => panic!(),
+ }
+}
+
+#[test]
+fn test_try_panic_any_message_any() {
+ match thread::spawn(move || {
+ panic_any(Box::new(413u16) as Box<dyn Any + Send>);
+ })
+ .join()
+ {
+ Err(e) => {
+ type T = Box<dyn Any + Send>;
+ assert!(e.is::<T>());
+ let any = e.downcast::<T>().unwrap();
+ assert!(any.is::<u16>());
+ assert_eq!(*any.downcast::<u16>().unwrap(), 413);
+ }
+ Ok(()) => panic!(),
+ }
+}
+
+#[test]
+fn test_try_panic_any_message_unit_struct() {
+ struct Juju;
+
+ match thread::spawn(move || panic_any(Juju)).join() {
+ Err(ref e) if e.is::<Juju>() => {}
+ Err(_) | Ok(()) => panic!(),
+ }
+}
+
+#[test]
+fn test_park_timeout_unpark_before() {
+ for _ in 0..10 {
+ thread::current().unpark();
+ thread::park_timeout(Duration::from_millis(u32::MAX as u64));
+ }
+}
+
+#[test]
+fn test_park_timeout_unpark_not_called() {
+ for _ in 0..10 {
+ thread::park_timeout(Duration::from_millis(10));
+ }
+}
+
+#[test]
+fn test_park_timeout_unpark_called_other_thread() {
+ for _ in 0..10 {
+ let th = thread::current();
+
+ let _guard = thread::spawn(move || {
+ super::sleep(Duration::from_millis(50));
+ th.unpark();
+ });
+
+ thread::park_timeout(Duration::from_millis(u32::MAX as u64));
+ }
+}
+
+#[test]
+fn sleep_ms_smoke() {
+ thread::sleep(Duration::from_millis(2));
+}
+
+#[test]
+fn test_size_of_option_thread_id() {
+ assert_eq!(mem::size_of::<Option<ThreadId>>(), mem::size_of::<ThreadId>());
+}
+
+#[test]
+fn test_thread_id_equal() {
+ assert!(thread::current().id() == thread::current().id());
+}
+
+#[test]
+fn test_thread_id_not_equal() {
+ let spawned_id = thread::spawn(|| thread::current().id()).join().unwrap();
+ assert!(thread::current().id() != spawned_id);
+}
+
+#[test]
+fn test_scoped_threads_drop_result_before_join() {
+ let actually_finished = &AtomicBool::new(false);
+ struct X<'scope, 'env>(&'scope Scope<'scope, 'env>, &'env AtomicBool);
+ impl Drop for X<'_, '_> {
+ fn drop(&mut self) {
+ thread::sleep(Duration::from_millis(20));
+ let actually_finished = self.1;
+ self.0.spawn(move || {
+ thread::sleep(Duration::from_millis(20));
+ actually_finished.store(true, Ordering::Relaxed);
+ });
+ }
+ }
+ thread::scope(|s| {
+ s.spawn(move || {
+ thread::sleep(Duration::from_millis(20));
+ X(s, actually_finished)
+ });
+ });
+ assert!(actually_finished.load(Ordering::Relaxed));
+}
+
+#[test]
+fn test_scoped_threads_nll() {
+ // this is mostly a *compilation test* for this exact function:
+ fn foo(x: &u8) {
+ thread::scope(|s| {
+ s.spawn(|| drop(x));
+ });
+ }
+ // let's also run it for good measure
+ let x = 42_u8;
+ foo(&x);
+}
diff --git a/library/std/src/time.rs b/library/std/src/time.rs
new file mode 100644
index 000000000..759a59e1f
--- /dev/null
+++ b/library/std/src/time.rs
@@ -0,0 +1,694 @@
+//! Temporal quantification.
+//!
+//! # Examples:
+//!
+//! There are multiple ways to create a new [`Duration`]:
+//!
+//! ```
+//! # use std::time::Duration;
+//! let five_seconds = Duration::from_secs(5);
+//! assert_eq!(five_seconds, Duration::from_millis(5_000));
+//! assert_eq!(five_seconds, Duration::from_micros(5_000_000));
+//! assert_eq!(five_seconds, Duration::from_nanos(5_000_000_000));
+//!
+//! let ten_seconds = Duration::from_secs(10);
+//! let seven_nanos = Duration::from_nanos(7);
+//! let total = ten_seconds + seven_nanos;
+//! assert_eq!(total, Duration::new(10, 7));
+//! ```
+//!
+//! Using [`Instant`] to calculate how long a function took to run:
+//!
+//! ```ignore (incomplete)
+//! let now = Instant::now();
+//!
+//! // Calling a slow function, it may take a while
+//! slow_function();
+//!
+//! let elapsed_time = now.elapsed();
+//! println!("Running slow_function() took {} seconds.", elapsed_time.as_secs());
+//! ```
+
+#![stable(feature = "time", since = "1.3.0")]
+
+#[cfg(test)]
+mod tests;
+
+use crate::error::Error;
+use crate::fmt;
+use crate::ops::{Add, AddAssign, Sub, SubAssign};
+use crate::sys::time;
+use crate::sys_common::{FromInner, IntoInner};
+
+#[stable(feature = "time", since = "1.3.0")]
+pub use core::time::Duration;
+
+#[unstable(feature = "duration_checked_float", issue = "83400")]
+pub use core::time::FromFloatSecsError;
+
+/// A measurement of a monotonically nondecreasing clock.
+/// Opaque and useful only with [`Duration`].
+///
+/// Instants are always guaranteed, barring [platform bugs], to be no less than any previously
+/// measured instant when created, and are often useful for tasks such as measuring
+/// benchmarks or timing how long an operation takes.
+///
+/// Note, however, that instants are **not** guaranteed to be **steady**. In other
+/// words, each tick of the underlying clock might not be the same length (e.g.
+/// some seconds may be longer than others). An instant may jump forwards or
+/// experience time dilation (slow down or speed up), but it will never go
+/// backwards.
+///
+/// Instants are opaque types that can only be compared to one another. There is
+/// no method to get "the number of seconds" from an instant. Instead, it only
+/// allows measuring the duration between two instants (or comparing two
+/// instants).
+///
+/// The size of an `Instant` struct may vary depending on the target operating
+/// system.
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, Instant};
+/// use std::thread::sleep;
+///
+/// fn main() {
+/// let now = Instant::now();
+///
+/// // we sleep for 2 seconds
+/// sleep(Duration::new(2, 0));
+/// // it prints '2'
+/// println!("{}", now.elapsed().as_secs());
+/// }
+/// ```
+///
+/// [platform bugs]: Instant#monotonicity
+///
+/// # OS-specific behaviors
+///
+/// An `Instant` is a wrapper around system-specific types and it may behave
+/// differently depending on the underlying operating system. For example,
+/// the following snippet is fine on Linux but panics on macOS:
+///
+/// ```no_run
+/// use std::time::{Instant, Duration};
+///
+/// let now = Instant::now();
+/// let max_seconds = u64::MAX / 1_000_000_000;
+/// let duration = Duration::new(max_seconds, 0);
+/// println!("{:?}", now + duration);
+/// ```
+///
+/// # Underlying System calls
+///
+/// The following system calls are [currently] being used by `now()` to find out
+/// the current time:
+///
+/// | Platform | System call |
+/// |-----------|----------------------------------------------------------------------|
+/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
+/// | UNIX | [clock_gettime (Monotonic Clock)] |
+/// | Darwin | [mach_absolute_time] |
+/// | VXWorks | [clock_gettime (Monotonic Clock)] |
+/// | SOLID | `get_tim` |
+/// | WASI | [__wasi_clock_time_get (Monotonic Clock)] |
+/// | Windows | [QueryPerformanceCounter] |
+///
+/// [currently]: crate::io#platform-specific-behavior
+/// [QueryPerformanceCounter]: https://docs.microsoft.com/en-us/windows/win32/api/profileapi/nf-profileapi-queryperformancecounter
+/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
+/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
+/// [__wasi_clock_time_get (Monotonic Clock)]: https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/docs.md#clock_time_get
+/// [clock_gettime (Monotonic Clock)]: https://linux.die.net/man/3/clock_gettime
+/// [mach_absolute_time]: https://developer.apple.com/library/archive/documentation/Darwin/Conceptual/KernelProgramming/services/services.html
+///
+/// **Disclaimer:** These system calls might change over time.
+///
+/// > Note: mathematical operations like [`add`] may panic if the underlying
+/// > structure cannot represent the new point in time.
+///
+/// [`add`]: Instant::add
+///
+/// ## Monotonicity
+///
+/// On all platforms `Instant` will try to use an OS API that guarantees monotonic behavior
+/// if available, which is the case for all [tier 1] platforms.
+/// In practice such guarantees are – under rare circumstances – broken by hardware, virtualization
+/// or operating system bugs. To work around these bugs and platforms not offering monotonic clocks
+/// [`duration_since`], [`elapsed`] and [`sub`] saturate to zero. In older Rust versions this
+/// lead to a panic instead. [`checked_duration_since`] can be used to detect and handle situations
+/// where monotonicity is violated, or `Instant`s are subtracted in the wrong order.
+///
+/// This workaround obscures programming errors where earlier and later instants are accidentally
+/// swapped. For this reason future rust versions may reintroduce panics.
+///
+/// [tier 1]: https://doc.rust-lang.org/rustc/platform-support.html
+/// [`duration_since`]: Instant::duration_since
+/// [`elapsed`]: Instant::elapsed
+/// [`sub`]: Instant::sub
+/// [`checked_duration_since`]: Instant::checked_duration_since
+///
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct Instant(time::Instant);
+
+/// A measurement of the system clock, useful for talking to
+/// external entities like the file system or other processes.
+///
+/// Distinct from the [`Instant`] type, this time measurement **is not
+/// monotonic**. This means that you can save a file to the file system, then
+/// save another file to the file system, **and the second file has a
+/// `SystemTime` measurement earlier than the first**. In other words, an
+/// operation that happens after another operation in real time may have an
+/// earlier `SystemTime`!
+///
+/// Consequently, comparing two `SystemTime` instances to learn about the
+/// duration between them returns a [`Result`] instead of an infallible [`Duration`]
+/// to indicate that this sort of time drift may happen and needs to be handled.
+///
+/// Although a `SystemTime` cannot be directly inspected, the [`UNIX_EPOCH`]
+/// constant is provided in this module as an anchor in time to learn
+/// information about a `SystemTime`. By calculating the duration from this
+/// fixed point in time, a `SystemTime` can be converted to a human-readable time,
+/// or perhaps some other string representation.
+///
+/// The size of a `SystemTime` struct may vary depending on the target operating
+/// system.
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, SystemTime};
+/// use std::thread::sleep;
+///
+/// fn main() {
+/// let now = SystemTime::now();
+///
+/// // we sleep for 2 seconds
+/// sleep(Duration::new(2, 0));
+/// match now.elapsed() {
+/// Ok(elapsed) => {
+/// // it prints '2'
+/// println!("{}", elapsed.as_secs());
+/// }
+/// Err(e) => {
+/// // an error occurred!
+/// println!("Error: {e:?}");
+/// }
+/// }
+/// }
+/// ```
+///
+/// # Platform-specific behavior
+///
+/// The precision of `SystemTime` can depend on the underlying OS-specific time format.
+/// For example, on Windows the time is represented in 100 nanosecond intervals whereas Linux
+/// can represent nanosecond intervals.
+///
+/// The following system calls are [currently] being used by `now()` to find out
+/// the current time:
+///
+/// | Platform | System call |
+/// |-----------|----------------------------------------------------------------------|
+/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] |
+/// | UNIX | [clock_gettime (Realtime Clock)] |
+/// | Darwin | [gettimeofday] |
+/// | VXWorks | [clock_gettime (Realtime Clock)] |
+/// | SOLID | `SOLID_RTC_ReadTime` |
+/// | WASI | [__wasi_clock_time_get (Realtime Clock)] |
+/// | Windows | [GetSystemTimePreciseAsFileTime] / [GetSystemTimeAsFileTime] |
+///
+/// [currently]: crate::io#platform-specific-behavior
+/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
+/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
+/// [gettimeofday]: https://man7.org/linux/man-pages/man2/gettimeofday.2.html
+/// [clock_gettime (Realtime Clock)]: https://linux.die.net/man/3/clock_gettime
+/// [__wasi_clock_time_get (Realtime Clock)]: https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/docs.md#clock_time_get
+/// [GetSystemTimePreciseAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimepreciseasfiletime
+/// [GetSystemTimeAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimeasfiletime
+///
+/// **Disclaimer:** These system calls might change over time.
+///
+/// > Note: mathematical operations like [`add`] may panic if the underlying
+/// > structure cannot represent the new point in time.
+///
+/// [`add`]: SystemTime::add
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct SystemTime(time::SystemTime);
+
+/// An error returned from the `duration_since` and `elapsed` methods on
+/// `SystemTime`, used to learn how far in the opposite direction a system time
+/// lies.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::thread::sleep;
+/// use std::time::{Duration, SystemTime};
+///
+/// let sys_time = SystemTime::now();
+/// sleep(Duration::from_secs(1));
+/// let new_sys_time = SystemTime::now();
+/// match sys_time.duration_since(new_sys_time) {
+/// Ok(_) => {}
+/// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
+/// }
+/// ```
+#[derive(Clone, Debug)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct SystemTimeError(Duration);
+
+impl Instant {
+ /// Returns an instant corresponding to "now".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Instant;
+ ///
+ /// let now = Instant::now();
+ /// ```
+ #[must_use]
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn now() -> Instant {
+ Instant(time::Instant::now())
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one,
+ /// or zero duration if that instant is later than this one.
+ ///
+ /// # Panics
+ ///
+ /// Previous rust versions panicked when `earlier` was later than `self`. Currently this
+ /// method saturates. Future versions may reintroduce the panic in some circumstances.
+ /// See [Monotonicity].
+ ///
+ /// [Monotonicity]: Instant#monotonicity
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::{Duration, Instant};
+ /// use std::thread::sleep;
+ ///
+ /// let now = Instant::now();
+ /// sleep(Duration::new(1, 0));
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.duration_since(now));
+ /// println!("{:?}", now.duration_since(new_now)); // 0ns
+ /// ```
+ #[must_use]
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration_since(&self, earlier: Instant) -> Duration {
+ self.checked_duration_since(earlier).unwrap_or_default()
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one,
+ /// or None if that instant is later than this one.
+ ///
+ /// Due to [monotonicity bugs], even under correct logical ordering of the passed `Instant`s,
+ /// this method can return `None`.
+ ///
+ /// [monotonicity bugs]: Instant#monotonicity
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::{Duration, Instant};
+ /// use std::thread::sleep;
+ ///
+ /// let now = Instant::now();
+ /// sleep(Duration::new(1, 0));
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.checked_duration_since(now));
+ /// println!("{:?}", now.checked_duration_since(new_now)); // None
+ /// ```
+ #[must_use]
+ #[stable(feature = "checked_duration_since", since = "1.39.0")]
+ pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> {
+ self.0.checked_sub_instant(&earlier.0)
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one,
+ /// or zero duration if that instant is later than this one.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::{Duration, Instant};
+ /// use std::thread::sleep;
+ ///
+ /// let now = Instant::now();
+ /// sleep(Duration::new(1, 0));
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.saturating_duration_since(now));
+ /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns
+ /// ```
+ #[must_use]
+ #[stable(feature = "checked_duration_since", since = "1.39.0")]
+ pub fn saturating_duration_since(&self, earlier: Instant) -> Duration {
+ self.checked_duration_since(earlier).unwrap_or_default()
+ }
+
+ /// Returns the amount of time elapsed since this instant was created.
+ ///
+ /// # Panics
+ ///
+ /// Previous rust versions panicked when self was earlier than the current time. Currently this
+ /// method returns a Duration of zero in that case. Future versions may reintroduce the panic.
+ /// See [Monotonicity].
+ ///
+ /// [Monotonicity]: Instant#monotonicity
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let instant = Instant::now();
+ /// let three_secs = Duration::from_secs(3);
+ /// sleep(three_secs);
+ /// assert!(instant.elapsed() >= three_secs);
+ /// ```
+ #[must_use]
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn elapsed(&self) -> Duration {
+ Instant::now() - *self
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as
+ /// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
+ /// otherwise.
+ #[stable(feature = "time_checked_add", since = "1.34.0")]
+ pub fn checked_add(&self, duration: Duration) -> Option<Instant> {
+ self.0.checked_add_duration(&duration).map(Instant)
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as
+ /// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
+ /// otherwise.
+ #[stable(feature = "time_checked_add", since = "1.34.0")]
+ pub fn checked_sub(&self, duration: Duration) -> Option<Instant> {
+ self.0.checked_sub_duration(&duration).map(Instant)
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Add<Duration> for Instant {
+ type Output = Instant;
+
+ /// # Panics
+ ///
+ /// This function may panic if the resulting point in time cannot be represented by the
+ /// underlying data structure. See [`Instant::checked_add`] for a version without panic.
+ fn add(self, other: Duration) -> Instant {
+ self.checked_add(other).expect("overflow when adding duration to instant")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for Instant {
+ fn add_assign(&mut self, other: Duration) {
+ *self = *self + other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Duration> for Instant {
+ type Output = Instant;
+
+ fn sub(self, other: Duration) -> Instant {
+ self.checked_sub(other).expect("overflow when subtracting duration from instant")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for Instant {
+ fn sub_assign(&mut self, other: Duration) {
+ *self = *self - other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Instant> for Instant {
+ type Output = Duration;
+
+ /// Returns the amount of time elapsed from another instant to this one,
+ /// or zero duration if that instant is later than this one.
+ ///
+ /// # Panics
+ ///
+ /// Previous rust versions panicked when `other` was later than `self`. Currently this
+ /// method saturates. Future versions may reintroduce the panic in some circumstances.
+ /// See [Monotonicity].
+ ///
+ /// [Monotonicity]: Instant#monotonicity
+ fn sub(self, other: Instant) -> Duration {
+ self.duration_since(other)
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Debug for Instant {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl SystemTime {
+ /// An anchor in time which can be used to create new `SystemTime` instances or
+ /// learn about where in time a `SystemTime` lies.
+ ///
+ /// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
+ /// respect to the system clock. Using `duration_since` on an existing
+ /// `SystemTime` instance can tell how far away from this point in time a
+ /// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
+ /// `SystemTime` instance to represent another fixed point in time.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::SystemTime;
+ ///
+ /// match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
+ /// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
+ /// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
+ /// }
+ /// ```
+ #[stable(feature = "assoc_unix_epoch", since = "1.28.0")]
+ pub const UNIX_EPOCH: SystemTime = UNIX_EPOCH;
+
+ /// Returns the system time corresponding to "now".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::SystemTime;
+ ///
+ /// let sys_time = SystemTime::now();
+ /// ```
+ #[must_use]
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn now() -> SystemTime {
+ SystemTime(time::SystemTime::now())
+ }
+
+ /// Returns the amount of time elapsed from an earlier point in time.
+ ///
+ /// This function may fail because measurements taken earlier are not
+ /// guaranteed to always be before later measurements (due to anomalies such
+ /// as the system clock being adjusted either forwards or backwards).
+ /// [`Instant`] can be used to measure elapsed time without this risk of failure.
+ ///
+ /// If successful, <code>[Ok]\([Duration])</code> is returned where the duration represents
+ /// the amount of time elapsed from the specified measurement to this one.
+ ///
+ /// Returns an [`Err`] if `earlier` is later than `self`, and the error
+ /// contains how far from `self` the time is.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::SystemTime;
+ ///
+ /// let sys_time = SystemTime::now();
+ /// let new_sys_time = SystemTime::now();
+ /// let difference = new_sys_time.duration_since(sys_time)
+ /// .expect("Clock may have gone backwards");
+ /// println!("{difference:?}");
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration_since(&self, earlier: SystemTime) -> Result<Duration, SystemTimeError> {
+ self.0.sub_time(&earlier.0).map_err(SystemTimeError)
+ }
+
+ /// Returns the difference between the clock time when this
+ /// system time was created, and the current clock time.
+ ///
+ /// This function may fail as the underlying system clock is susceptible to
+ /// drift and updates (e.g., the system clock could go backwards), so this
+ /// function might not always succeed. If successful, <code>[Ok]\([Duration])</code> is
+ /// returned where the duration represents the amount of time elapsed from
+ /// this time measurement to the current time.
+ ///
+ /// To measure elapsed time reliably, use [`Instant`] instead.
+ ///
+ /// Returns an [`Err`] if `self` is later than the current system time, and
+ /// the error contains how far from the current system time `self` is.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, SystemTime};
+ ///
+ /// let sys_time = SystemTime::now();
+ /// let one_sec = Duration::from_secs(1);
+ /// sleep(one_sec);
+ /// assert!(sys_time.elapsed().unwrap() >= one_sec);
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn elapsed(&self) -> Result<Duration, SystemTimeError> {
+ SystemTime::now().duration_since(*self)
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as
+ /// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
+ /// otherwise.
+ #[stable(feature = "time_checked_add", since = "1.34.0")]
+ pub fn checked_add(&self, duration: Duration) -> Option<SystemTime> {
+ self.0.checked_add_duration(&duration).map(SystemTime)
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as
+ /// `SystemTime` (which means it's inside the bounds of the underlying data structure), `None`
+ /// otherwise.
+ #[stable(feature = "time_checked_add", since = "1.34.0")]
+ pub fn checked_sub(&self, duration: Duration) -> Option<SystemTime> {
+ self.0.checked_sub_duration(&duration).map(SystemTime)
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Add<Duration> for SystemTime {
+ type Output = SystemTime;
+
+ /// # Panics
+ ///
+ /// This function may panic if the resulting point in time cannot be represented by the
+ /// underlying data structure. See [`SystemTime::checked_add`] for a version without panic.
+ fn add(self, dur: Duration) -> SystemTime {
+ self.checked_add(dur).expect("overflow when adding duration to instant")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for SystemTime {
+ fn add_assign(&mut self, other: Duration) {
+ *self = *self + other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Duration> for SystemTime {
+ type Output = SystemTime;
+
+ fn sub(self, dur: Duration) -> SystemTime {
+ self.checked_sub(dur).expect("overflow when subtracting duration from instant")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for SystemTime {
+ fn sub_assign(&mut self, other: Duration) {
+ *self = *self - other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Debug for SystemTime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+/// An anchor in time which can be used to create new `SystemTime` instances or
+/// learn about where in time a `SystemTime` lies.
+///
+/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
+/// respect to the system clock. Using `duration_since` on an existing
+/// [`SystemTime`] instance can tell how far away from this point in time a
+/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
+/// [`SystemTime`] instance to represent another fixed point in time.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::time::{SystemTime, UNIX_EPOCH};
+///
+/// match SystemTime::now().duration_since(UNIX_EPOCH) {
+/// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
+/// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
+/// }
+/// ```
+#[stable(feature = "time2", since = "1.8.0")]
+pub const UNIX_EPOCH: SystemTime = SystemTime(time::UNIX_EPOCH);
+
+impl SystemTimeError {
+ /// Returns the positive duration which represents how far forward the
+ /// second system time was from the first.
+ ///
+ /// A `SystemTimeError` is returned from the [`SystemTime::duration_since`]
+ /// and [`SystemTime::elapsed`] methods whenever the second system time
+ /// represents a point later in time than the `self` of the method call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, SystemTime};
+ ///
+ /// let sys_time = SystemTime::now();
+ /// sleep(Duration::from_secs(1));
+ /// let new_sys_time = SystemTime::now();
+ /// match sys_time.duration_since(new_sys_time) {
+ /// Ok(_) => {}
+ /// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration(&self) -> Duration {
+ self.0
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Error for SystemTimeError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "other time was not earlier than self"
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Display for SystemTimeError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "second time provided was later than self")
+ }
+}
+
+impl FromInner<time::SystemTime> for SystemTime {
+ fn from_inner(time: time::SystemTime) -> SystemTime {
+ SystemTime(time)
+ }
+}
+
+impl IntoInner<time::SystemTime> for SystemTime {
+ fn into_inner(self) -> time::SystemTime {
+ self.0
+ }
+}
diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs
new file mode 100644
index 000000000..d710a5744
--- /dev/null
+++ b/library/std/src/time/tests.rs
@@ -0,0 +1,236 @@
+use super::{Duration, Instant, SystemTime, UNIX_EPOCH};
+#[cfg(not(target_arch = "wasm32"))]
+use test::{black_box, Bencher};
+
+macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr) => {{
+ let (a, b) = ($a, $b);
+ if a != b {
+ let (a, b) = if a > b { (a, b) } else { (b, a) };
+ assert!(a - Duration::from_micros(1) <= b, "{:?} is not almost equal to {:?}", a, b);
+ }
+ }};
+}
+
+#[test]
+fn instant_monotonic() {
+ let a = Instant::now();
+ loop {
+ let b = Instant::now();
+ assert!(b >= a);
+ if b > a {
+ break;
+ }
+ }
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))]
+fn instant_monotonic_concurrent() -> crate::thread::Result<()> {
+ let threads: Vec<_> = (0..8)
+ .map(|_| {
+ crate::thread::spawn(|| {
+ let mut old = Instant::now();
+ for _ in 0..5_000_000 {
+ let new = Instant::now();
+ assert!(new >= old);
+ old = new;
+ }
+ })
+ })
+ .collect();
+ for t in threads {
+ t.join()?;
+ }
+ Ok(())
+}
+
+#[test]
+fn instant_elapsed() {
+ let a = Instant::now();
+ let _ = a.elapsed();
+}
+
+#[test]
+fn instant_math() {
+ let a = Instant::now();
+ let b = Instant::now();
+ println!("a: {a:?}");
+ println!("b: {b:?}");
+ let dur = b.duration_since(a);
+ println!("dur: {dur:?}");
+ assert_almost_eq!(b - dur, a);
+ assert_almost_eq!(a + dur, b);
+
+ let second = Duration::SECOND;
+ assert_almost_eq!(a - second + second, a);
+ assert_almost_eq!(a.checked_sub(second).unwrap().checked_add(second).unwrap(), a);
+
+ // checked_add_duration will not panic on overflow
+ let mut maybe_t = Some(Instant::now());
+ let max_duration = Duration::from_secs(u64::MAX);
+ // in case `Instant` can store `>= now + max_duration`.
+ for _ in 0..2 {
+ maybe_t = maybe_t.and_then(|t| t.checked_add(max_duration));
+ }
+ assert_eq!(maybe_t, None);
+
+ // checked_add_duration calculates the right time and will work for another year
+ let year = Duration::from_secs(60 * 60 * 24 * 365);
+ assert_eq!(a + year, a.checked_add(year).unwrap());
+}
+
+#[test]
+fn instant_math_is_associative() {
+ let now = Instant::now();
+ let offset = Duration::from_millis(5);
+ // Changing the order of instant math shouldn't change the results,
+ // especially when the expression reduces to X + identity.
+ assert_eq!((now + offset) - now, (now - now) + offset);
+}
+
+#[test]
+fn instant_duration_since_saturates() {
+ let a = Instant::now();
+ assert_eq!((a - Duration::SECOND).duration_since(a), Duration::ZERO);
+}
+
+#[test]
+fn instant_checked_duration_since_nopanic() {
+ let now = Instant::now();
+ let earlier = now - Duration::SECOND;
+ let later = now + Duration::SECOND;
+ assert_eq!(earlier.checked_duration_since(now), None);
+ assert_eq!(later.checked_duration_since(now), Some(Duration::SECOND));
+ assert_eq!(now.checked_duration_since(now), Some(Duration::ZERO));
+}
+
+#[test]
+fn instant_saturating_duration_since_nopanic() {
+ let a = Instant::now();
+ #[allow(deprecated, deprecated_in_future)]
+ let ret = (a - Duration::SECOND).saturating_duration_since(a);
+ assert_eq!(ret, Duration::ZERO);
+}
+
+#[test]
+fn system_time_math() {
+ let a = SystemTime::now();
+ let b = SystemTime::now();
+ match b.duration_since(a) {
+ Ok(Duration::ZERO) => {
+ assert_almost_eq!(a, b);
+ }
+ Ok(dur) => {
+ assert!(b > a);
+ assert_almost_eq!(b - dur, a);
+ assert_almost_eq!(a + dur, b);
+ }
+ Err(dur) => {
+ let dur = dur.duration();
+ assert!(a > b);
+ assert_almost_eq!(b + dur, a);
+ assert_almost_eq!(a - dur, b);
+ }
+ }
+
+ let second = Duration::SECOND;
+ assert_almost_eq!(a.duration_since(a - second).unwrap(), second);
+ assert_almost_eq!(a.duration_since(a + second).unwrap_err().duration(), second);
+
+ assert_almost_eq!(a - second + second, a);
+ assert_almost_eq!(a.checked_sub(second).unwrap().checked_add(second).unwrap(), a);
+
+ let one_second_from_epoch = UNIX_EPOCH + Duration::SECOND;
+ let one_second_from_epoch2 =
+ UNIX_EPOCH + Duration::from_millis(500) + Duration::from_millis(500);
+ assert_eq!(one_second_from_epoch, one_second_from_epoch2);
+
+ // checked_add_duration will not panic on overflow
+ let mut maybe_t = Some(SystemTime::UNIX_EPOCH);
+ let max_duration = Duration::from_secs(u64::MAX);
+ // in case `SystemTime` can store `>= UNIX_EPOCH + max_duration`.
+ for _ in 0..2 {
+ maybe_t = maybe_t.and_then(|t| t.checked_add(max_duration));
+ }
+ assert_eq!(maybe_t, None);
+
+ // checked_add_duration calculates the right time and will work for another year
+ let year = Duration::from_secs(60 * 60 * 24 * 365);
+ assert_eq!(a + year, a.checked_add(year).unwrap());
+}
+
+#[test]
+fn system_time_elapsed() {
+ let a = SystemTime::now();
+ drop(a.elapsed());
+}
+
+#[test]
+fn since_epoch() {
+ let ts = SystemTime::now();
+ let a = ts.duration_since(UNIX_EPOCH + Duration::SECOND).unwrap();
+ let b = ts.duration_since(UNIX_EPOCH).unwrap();
+ assert!(b > a);
+ assert_eq!(b - a, Duration::SECOND);
+
+ let thirty_years = Duration::SECOND * 60 * 60 * 24 * 365 * 30;
+
+ // Right now for CI this test is run in an emulator, and apparently the
+ // aarch64 emulator's sense of time is that we're still living in the
+ // 70s. This is also true for riscv (also qemu)
+ //
+ // Otherwise let's assume that we're all running computers later than
+ // 2000.
+ if !cfg!(target_arch = "aarch64") && !cfg!(target_arch = "riscv64") {
+ assert!(a > thirty_years);
+ }
+
+ // let's assume that we're all running computers earlier than 2090.
+ // Should give us ~70 years to fix this!
+ let hundred_twenty_years = thirty_years * 4;
+ assert!(a < hundred_twenty_years);
+}
+
+macro_rules! bench_instant_threaded {
+ ($bench_name:ident, $thread_count:expr) => {
+ #[bench]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn $bench_name(b: &mut Bencher) -> crate::thread::Result<()> {
+ use crate::sync::atomic::{AtomicBool, Ordering};
+ use crate::sync::Arc;
+
+ let running = Arc::new(AtomicBool::new(true));
+
+ let threads: Vec<_> = (0..$thread_count)
+ .map(|_| {
+ let flag = Arc::clone(&running);
+ crate::thread::spawn(move || {
+ while flag.load(Ordering::Relaxed) {
+ black_box(Instant::now());
+ }
+ })
+ })
+ .collect();
+
+ b.iter(|| {
+ let a = Instant::now();
+ let b = Instant::now();
+ assert!(b >= a);
+ });
+
+ running.store(false, Ordering::Relaxed);
+
+ for t in threads {
+ t.join()?;
+ }
+ Ok(())
+ }
+ };
+}
+
+bench_instant_threaded!(instant_contention_01_threads, 0);
+bench_instant_threaded!(instant_contention_02_threads, 1);
+bench_instant_threaded!(instant_contention_04_threads, 3);
+bench_instant_threaded!(instant_contention_08_threads, 7);
+bench_instant_threaded!(instant_contention_16_threads, 15);
diff --git a/library/std/tests/env.rs b/library/std/tests/env.rs
new file mode 100644
index 000000000..b095c2dde
--- /dev/null
+++ b/library/std/tests/env.rs
@@ -0,0 +1,140 @@
+use std::env::*;
+use std::ffi::{OsStr, OsString};
+
+use rand::distributions::Alphanumeric;
+use rand::{thread_rng, Rng};
+
+fn make_rand_name() -> OsString {
+ let rng = thread_rng();
+ let n = format!("TEST{}", rng.sample_iter(&Alphanumeric).take(10).collect::<String>());
+ let n = OsString::from(n);
+ assert!(var_os(&n).is_none());
+ n
+}
+
+fn eq(a: Option<OsString>, b: Option<&str>) {
+ assert_eq!(a.as_ref().map(|s| &**s), b.map(OsStr::new).map(|s| &*s));
+}
+
+#[test]
+fn test_set_var() {
+ let n = make_rand_name();
+ set_var(&n, "VALUE");
+ eq(var_os(&n), Some("VALUE"));
+}
+
+#[test]
+fn test_remove_var() {
+ let n = make_rand_name();
+ set_var(&n, "VALUE");
+ remove_var(&n);
+ eq(var_os(&n), None);
+}
+
+#[test]
+fn test_set_var_overwrite() {
+ let n = make_rand_name();
+ set_var(&n, "1");
+ set_var(&n, "2");
+ eq(var_os(&n), Some("2"));
+ set_var(&n, "");
+ eq(var_os(&n), Some(""));
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_var_big() {
+ let mut s = "".to_string();
+ let mut i = 0;
+ while i < 100 {
+ s.push_str("aaaaaaaaaa");
+ i += 1;
+ }
+ let n = make_rand_name();
+ set_var(&n, &s);
+ eq(var_os(&n), Some(&s));
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_env_set_get_huge() {
+ let n = make_rand_name();
+ let s = "x".repeat(10000);
+ set_var(&n, &s);
+ eq(var_os(&n), Some(&s));
+ remove_var(&n);
+ eq(var_os(&n), None);
+}
+
+#[test]
+fn test_env_set_var() {
+ let n = make_rand_name();
+
+ let mut e = vars_os();
+ set_var(&n, "VALUE");
+ assert!(!e.any(|(k, v)| { &*k == &*n && &*v == "VALUE" }));
+
+ assert!(vars_os().any(|(k, v)| { &*k == &*n && &*v == "VALUE" }));
+}
+
+#[test]
+#[cfg_attr(not(any(unix, windows)), ignore, allow(unused))]
+#[allow(deprecated)]
+fn env_home_dir() {
+ use std::path::PathBuf;
+
+ fn var_to_os_string(var: Result<String, VarError>) -> Option<OsString> {
+ match var {
+ Ok(var) => Some(OsString::from(var)),
+ Err(VarError::NotUnicode(var)) => Some(var),
+ _ => None,
+ }
+ }
+
+ cfg_if::cfg_if! {
+ if #[cfg(unix)] {
+ let oldhome = var_to_os_string(var("HOME"));
+
+ set_var("HOME", "/home/MountainView");
+ assert_eq!(home_dir(), Some(PathBuf::from("/home/MountainView")));
+
+ remove_var("HOME");
+ if cfg!(target_os = "android") {
+ assert!(home_dir().is_none());
+ } else {
+ // When HOME is not set, some platforms return `None`,
+ // but others return `Some` with a default.
+ // Just check that it is not "/home/MountainView".
+ assert_ne!(home_dir(), Some(PathBuf::from("/home/MountainView")));
+ }
+
+ if let Some(oldhome) = oldhome { set_var("HOME", oldhome); }
+ } else if #[cfg(windows)] {
+ let oldhome = var_to_os_string(var("HOME"));
+ let olduserprofile = var_to_os_string(var("USERPROFILE"));
+
+ remove_var("HOME");
+ remove_var("USERPROFILE");
+
+ assert!(home_dir().is_some());
+
+ set_var("HOME", "/home/MountainView");
+ assert_eq!(home_dir(), Some(PathBuf::from("/home/MountainView")));
+
+ remove_var("HOME");
+
+ set_var("USERPROFILE", "/home/MountainView");
+ assert_eq!(home_dir(), Some(PathBuf::from("/home/MountainView")));
+
+ set_var("HOME", "/home/MountainView");
+ set_var("USERPROFILE", "/home/PaloAlto");
+ assert_eq!(home_dir(), Some(PathBuf::from("/home/MountainView")));
+
+ remove_var("HOME");
+ remove_var("USERPROFILE");
+
+ if let Some(oldhome) = oldhome { set_var("HOME", oldhome); }
+ if let Some(olduserprofile) = olduserprofile { set_var("USERPROFILE", olduserprofile); }
+ }
+ }
+}
diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs
new file mode 100644
index 000000000..a57a52d9b
--- /dev/null
+++ b/library/std/tests/run-time-detect.rs
@@ -0,0 +1,153 @@
+//! These tests just check that the macros are available in libstd.
+
+#![cfg_attr(
+ any(
+ all(target_arch = "arm", any(target_os = "linux", target_os = "android")),
+ all(bootstrap, target_arch = "aarch64", any(target_os = "linux", target_os = "android")),
+ all(target_arch = "powerpc", target_os = "linux"),
+ all(target_arch = "powerpc64", target_os = "linux"),
+ ),
+ feature(stdsimd)
+)]
+
+#[test]
+#[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android")))]
+fn arm_linux() {
+ use std::arch::is_arm_feature_detected;
+ println!("neon: {}", is_arm_feature_detected!("neon"));
+ println!("pmull: {}", is_arm_feature_detected!("pmull"));
+ println!("crypto: {}", is_arm_feature_detected!("crypto"));
+ println!("crc: {}", is_arm_feature_detected!("crc"));
+ println!("aes: {}", is_arm_feature_detected!("aes"));
+ println!("sha2: {}", is_arm_feature_detected!("sha2"));
+}
+
+#[test]
+#[cfg(all(target_arch = "aarch64", any(target_os = "linux", target_os = "android")))]
+fn aarch64_linux() {
+ use std::arch::is_aarch64_feature_detected;
+ println!("neon: {}", is_aarch64_feature_detected!("neon"));
+ println!("asimd: {}", is_aarch64_feature_detected!("asimd"));
+ println!("pmull: {}", is_aarch64_feature_detected!("pmull"));
+ println!("fp16: {}", is_aarch64_feature_detected!("fp16"));
+ println!("sve: {}", is_aarch64_feature_detected!("sve"));
+ println!("crc: {}", is_aarch64_feature_detected!("crc"));
+ println!("lse: {}", is_aarch64_feature_detected!("lse"));
+ println!("lse2: {}", is_aarch64_feature_detected!("lse2"));
+ println!("rdm: {}", is_aarch64_feature_detected!("rdm"));
+ println!("rcpc: {}", is_aarch64_feature_detected!("rcpc"));
+ println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2"));
+ println!("dotprod: {}", is_aarch64_feature_detected!("dotprod"));
+ println!("tme: {}", is_aarch64_feature_detected!("tme"));
+ println!("fhm: {}", is_aarch64_feature_detected!("fhm"));
+ println!("dit: {}", is_aarch64_feature_detected!("dit"));
+ println!("flagm: {}", is_aarch64_feature_detected!("flagm"));
+ println!("ssbs: {}", is_aarch64_feature_detected!("ssbs"));
+ println!("sb: {}", is_aarch64_feature_detected!("sb"));
+ println!("paca: {}", is_aarch64_feature_detected!("paca"));
+ println!("pacg: {}", is_aarch64_feature_detected!("pacg"));
+ println!("dpb: {}", is_aarch64_feature_detected!("dpb"));
+ println!("dpb2: {}", is_aarch64_feature_detected!("dpb2"));
+ println!("sve2: {}", is_aarch64_feature_detected!("sve2"));
+ println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes"));
+ println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4"));
+ println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3"));
+ println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm"));
+ println!("frintts: {}", is_aarch64_feature_detected!("frintts"));
+ println!("i8mm: {}", is_aarch64_feature_detected!("i8mm"));
+ println!("f32mm: {}", is_aarch64_feature_detected!("f32mm"));
+ println!("f64mm: {}", is_aarch64_feature_detected!("f64mm"));
+ println!("bf16: {}", is_aarch64_feature_detected!("bf16"));
+ println!("rand: {}", is_aarch64_feature_detected!("rand"));
+ println!("bti: {}", is_aarch64_feature_detected!("bti"));
+ println!("mte: {}", is_aarch64_feature_detected!("mte"));
+ println!("jsconv: {}", is_aarch64_feature_detected!("jsconv"));
+ println!("fcma: {}", is_aarch64_feature_detected!("fcma"));
+ println!("aes: {}", is_aarch64_feature_detected!("aes"));
+ println!("sha2: {}", is_aarch64_feature_detected!("sha2"));
+ println!("sha3: {}", is_aarch64_feature_detected!("sha3"));
+ println!("sm4: {}", is_aarch64_feature_detected!("sm4"));
+}
+
+#[test]
+#[cfg(all(target_arch = "powerpc", target_os = "linux"))]
+fn powerpc_linux() {
+ use std::arch::is_powerpc_feature_detected;
+ println!("altivec: {}", is_powerpc_feature_detected!("altivec"));
+ println!("vsx: {}", is_powerpc_feature_detected!("vsx"));
+ println!("power8: {}", is_powerpc_feature_detected!("power8"));
+}
+
+#[test]
+#[cfg(all(target_arch = "powerpc64", target_os = "linux"))]
+fn powerpc64_linux() {
+ use std::arch::is_powerpc64_feature_detected;
+ println!("altivec: {}", is_powerpc64_feature_detected!("altivec"));
+ println!("vsx: {}", is_powerpc64_feature_detected!("vsx"));
+ println!("power8: {}", is_powerpc64_feature_detected!("power8"));
+}
+
+#[test]
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn x86_all() {
+ use std::arch::is_x86_feature_detected;
+
+ // the below is the set of features we can test at runtime, but don't actually
+ // use to gate anything and are thus not part of the X86_ALLOWED_FEATURES list
+
+ println!("abm: {:?}", is_x86_feature_detected!("abm")); // this is a synonym for lzcnt but we test it anyways
+ println!("mmx: {:?}", is_x86_feature_detected!("mmx"));
+ println!("tsc: {:?}", is_x86_feature_detected!("tsc"));
+
+ // the below is in alphabetical order and matches
+ // the order of X86_ALLOWED_FEATURES in rustc_codegen_ssa's target_features.rs
+
+ println!("adx: {:?}", is_x86_feature_detected!("adx"));
+ println!("aes: {:?}", is_x86_feature_detected!("aes"));
+ println!("avx: {:?}", is_x86_feature_detected!("avx"));
+ println!("avx2: {:?}", is_x86_feature_detected!("avx2"));
+ println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16"));
+ println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg"));
+ println!("avx512bw: {:?}", is_x86_feature_detected!("avx512bw"));
+ println!("avx512cd: {:?}", is_x86_feature_detected!("avx512cd"));
+ println!("avx512dq: {:?}", is_x86_feature_detected!("avx512dq"));
+ println!("avx512er: {:?}", is_x86_feature_detected!("avx512er"));
+ println!("avx512f: {:?}", is_x86_feature_detected!("avx512f"));
+ println!("avx512gfni: {:?}", is_x86_feature_detected!("avx512gfni"));
+ println!("avx512ifma: {:?}", is_x86_feature_detected!("avx512ifma"));
+ println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf"));
+ println!("avx512vaes: {:?}", is_x86_feature_detected!("avx512vaes"));
+ println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi"));
+ println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2"));
+ println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl"));
+ println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni"));
+ println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect"));
+ println!("avx512vpclmulqdq: {:?}", is_x86_feature_detected!("avx512vpclmulqdq"));
+ println!("avx512vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq"));
+ println!("bmi1: {:?}", is_x86_feature_detected!("bmi1"));
+ println!("bmi2: {:?}", is_x86_feature_detected!("bmi2"));
+ println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b"));
+ println!("f16c: {:?}", is_x86_feature_detected!("f16c"));
+ println!("fma: {:?}", is_x86_feature_detected!("fma"));
+ println!("fxsr: {:?}", is_x86_feature_detected!("fxsr"));
+ println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt"));
+ //println!("movbe: {:?}", is_x86_feature_detected!("movbe")); // movbe is unsupported as a target feature
+ println!("pclmulqdq: {:?}", is_x86_feature_detected!("pclmulqdq"));
+ println!("popcnt: {:?}", is_x86_feature_detected!("popcnt"));
+ println!("rdrand: {:?}", is_x86_feature_detected!("rdrand"));
+ println!("rdseed: {:?}", is_x86_feature_detected!("rdseed"));
+ println!("rtm: {:?}", is_x86_feature_detected!("rtm"));
+ println!("sha: {:?}", is_x86_feature_detected!("sha"));
+ println!("sse: {:?}", is_x86_feature_detected!("sse"));
+ println!("sse2: {:?}", is_x86_feature_detected!("sse2"));
+ println!("sse3: {:?}", is_x86_feature_detected!("sse3"));
+ println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1"));
+ println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2"));
+ println!("sse4a: {:?}", is_x86_feature_detected!("sse4a"));
+ println!("ssse3: {:?}", is_x86_feature_detected!("ssse3"));
+ println!("tbm: {:?}", is_x86_feature_detected!("tbm"));
+ println!("xsave: {:?}", is_x86_feature_detected!("xsave"));
+ println!("xsavec: {:?}", is_x86_feature_detected!("xsavec"));
+ println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt"));
+ println!("xsaves: {:?}", is_x86_feature_detected!("xsaves"));
+}
diff --git a/library/std/tests/thread.rs b/library/std/tests/thread.rs
new file mode 100644
index 000000000..754b264c6
--- /dev/null
+++ b/library/std/tests/thread.rs
@@ -0,0 +1,16 @@
+use std::sync::{Arc, Mutex};
+use std::thread;
+use std::time::Duration;
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn sleep() {
+ let finished = Arc::new(Mutex::new(false));
+ let t_finished = finished.clone();
+ thread::spawn(move || {
+ thread::sleep(Duration::new(u64::MAX, 0));
+ *t_finished.lock().unwrap() = true;
+ });
+ thread::sleep(Duration::from_millis(100));
+ assert_eq!(*finished.lock().unwrap(), false);
+}