summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/src/alloc.rs22
-rw-r--r--library/alloc/src/boxed.rs48
-rw-r--r--library/alloc/src/collections/btree/map.rs62
-rw-r--r--library/alloc/src/collections/btree/set.rs13
-rw-r--r--library/alloc/src/collections/linked_list.rs24
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs46
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs4
-rw-r--r--library/alloc/src/lib.rs5
-rw-r--r--library/alloc/src/raw_vec.rs30
-rw-r--r--library/alloc/src/rc.rs899
-rw-r--r--library/alloc/src/str.rs8
-rw-r--r--library/alloc/src/string.rs98
-rw-r--r--library/alloc/src/sync.rs945
-rw-r--r--library/alloc/src/vec/mod.rs4
-rw-r--r--library/alloc/src/vec/spec_extend.rs4
-rw-r--r--library/alloc/tests/str.rs31
-rw-r--r--library/alloc/tests/string.rs48
-rw-r--r--library/alloc/tests/vec.rs65
-rw-r--r--library/core/benches/iter.rs1
-rw-r--r--library/core/src/any.rs590
-rw-r--r--library/core/src/array/mod.rs2
-rw-r--r--library/core/src/ascii/ascii_char.rs4
-rw-r--r--library/core/src/borrow.rs2
-rw-r--r--library/core/src/cell.rs3
-rw-r--r--library/core/src/cell/once.rs8
-rw-r--r--library/core/src/clone.rs40
-rw-r--r--library/core/src/cmp.rs16
-rw-r--r--library/core/src/default.rs45
-rw-r--r--library/core/src/error.md6
-rw-r--r--library/core/src/error.rs745
-rw-r--r--library/core/src/escape.rs4
-rw-r--r--library/core/src/ffi/c_str.rs35
-rw-r--r--library/core/src/ffi/mod.rs8
-rw-r--r--library/core/src/fmt/builders.rs6
-rw-r--r--library/core/src/fmt/mod.rs20
-rw-r--r--library/core/src/intrinsics.rs56
-rw-r--r--library/core/src/intrinsics/mir.rs12
-rw-r--r--library/core/src/iter/adapters/flatten.rs7
-rw-r--r--library/core/src/iter/adapters/map_windows.rs293
-rw-r--r--library/core/src/iter/adapters/mod.rs4
-rw-r--r--library/core/src/iter/mod.rs8
-rw-r--r--library/core/src/iter/traits/collect.rs6
-rw-r--r--library/core/src/iter/traits/double_ended.rs62
-rw-r--r--library/core/src/iter/traits/iterator.rs222
-rw-r--r--library/core/src/lib.rs11
-rw-r--r--library/core/src/macros/mod.rs1
-rw-r--r--library/core/src/marker.rs19
-rw-r--r--library/core/src/mem/transmutability.rs4
-rw-r--r--library/core/src/net/ip_addr.rs170
-rw-r--r--library/core/src/num/int_macros.rs1
-rw-r--r--library/core/src/num/uint_macros.rs13
-rw-r--r--library/core/src/option.rs36
-rw-r--r--library/core/src/panic/panic_info.rs10
-rw-r--r--library/core/src/panicking.rs14
-rw-r--r--library/core/src/ptr/const_ptr.rs17
-rw-r--r--library/core/src/ptr/metadata.rs3
-rw-r--r--library/core/src/ptr/mod.rs1
-rw-r--r--library/core/src/ptr/mut_ptr.rs17
-rw-r--r--library/core/src/ptr/non_null.rs29
-rw-r--r--library/core/src/ptr/unique.rs2
-rw-r--r--library/core/src/result.rs3
-rw-r--r--library/core/src/slice/cmp.rs21
-rw-r--r--library/core/src/slice/index.rs6
-rw-r--r--library/core/src/slice/iter.rs25
-rw-r--r--library/core/src/slice/iter/macros.rs146
-rw-r--r--library/core/src/slice/mod.rs4
-rw-r--r--library/core/src/str/iter.rs21
-rw-r--r--library/core/src/str/mod.rs20
-rw-r--r--library/core/src/str/pattern.rs4
-rw-r--r--library/core/src/str/traits.rs52
-rw-r--r--library/core/src/sync/atomic.rs147
-rw-r--r--library/core/src/tuple.rs2
-rw-r--r--library/core/tests/any.rs62
-rw-r--r--library/core/tests/error.rs66
-rw-r--r--library/core/tests/iter/adapters/map_windows.rs283
-rw-r--r--library/core/tests/iter/adapters/mod.rs1
-rw-r--r--library/core/tests/iter/traits/iterator.rs31
-rw-r--r--library/core/tests/lib.rs7
-rw-r--r--library/core/tests/manually_drop.rs2
-rw-r--r--library/core/tests/slice.rs28
-rw-r--r--library/panic_abort/src/lib.rs1
-rw-r--r--library/panic_unwind/src/lib.rs3
-rw-r--r--library/portable-simd/crates/core_simd/examples/dot_product.rs2
-rw-r--r--library/proc_macro/src/lib.rs48
-rw-r--r--library/profiler_builtins/build.rs6
-rw-r--r--library/profiler_builtins/src/lib.rs1
-rw-r--r--library/std/Cargo.toml18
-rw-r--r--library/std/src/alloc.rs1
-rw-r--r--library/std/src/backtrace.rs59
-rw-r--r--library/std/src/backtrace/tests.rs13
-rw-r--r--library/std/src/collections/hash/map.rs6
-rw-r--r--library/std/src/collections/hash/set.rs11
-rw-r--r--library/std/src/env.rs13
-rw-r--r--library/std/src/env/tests.rs20
-rw-r--r--library/std/src/error.rs16
-rw-r--r--library/std/src/error/tests.rs4
-rw-r--r--library/std/src/f32.rs57
-rw-r--r--library/std/src/f32/tests.rs32
-rw-r--r--library/std/src/f64.rs83
-rw-r--r--library/std/src/f64/tests.rs32
-rw-r--r--library/std/src/ffi/mod.rs2
-rw-r--r--library/std/src/ffi/os_str.rs73
-rw-r--r--library/std/src/fs.rs100
-rw-r--r--library/std/src/fs/tests.rs2
-rw-r--r--library/std/src/io/copy.rs81
-rw-r--r--library/std/src/io/copy/tests.rs38
-rw-r--r--library/std/src/io/readbuf.rs8
-rw-r--r--library/std/src/io/util.rs100
-rw-r--r--library/std/src/io/util/tests.rs13
-rw-r--r--library/std/src/lib.rs20
-rw-r--r--library/std/src/net/tcp.rs2
-rw-r--r--library/std/src/os/android/raw.rs107
-rw-r--r--library/std/src/os/l4re/raw.rs1
-rw-r--r--library/std/src/os/linux/raw.rs4
-rw-r--r--library/std/src/os/raw/mod.rs5
-rw-r--r--library/std/src/os/unix/fs.rs40
-rw-r--r--library/std/src/os/unix/net/stream.rs1
-rw-r--r--library/std/src/os/unix/net/tests.rs18
-rw-r--r--library/std/src/panicking.rs6
-rw-r--r--library/std/src/path.rs40
-rw-r--r--library/std/src/path/tests.rs50
-rw-r--r--library/std/src/process.rs19
-rw-r--r--library/std/src/sync/barrier.rs7
-rw-r--r--library/std/src/sync/condvar.rs18
-rw-r--r--library/std/src/sync/lazy_lock.rs37
-rw-r--r--library/std/src/sync/mpmc/utils.rs4
-rw-r--r--library/std/src/sync/mpsc/mod.rs5
-rw-r--r--library/std/src/sync/mutex.rs8
-rw-r--r--library/std/src/sync/once_lock.rs8
-rw-r--r--library/std/src/sync/rwlock.rs8
-rw-r--r--library/std/src/sys/common/alloc.rs3
-rw-r--r--library/std/src/sys/common/thread_local/fast_local.rs4
-rw-r--r--library/std/src/sys/common/thread_local/os_local.rs5
-rw-r--r--library/std/src/sys/common/thread_local/static_local.rs4
-rw-r--r--library/std/src/sys/hermit/fs.rs1
-rw-r--r--library/std/src/sys/hermit/os.rs28
-rw-r--r--library/std/src/sys/hermit/thread.rs3
-rw-r--r--library/std/src/sys/hermit/time.rs4
-rw-r--r--library/std/src/sys/mod.rs50
-rw-r--r--library/std/src/sys/personality/dwarf/eh.rs (renamed from library/std/src/personality/dwarf/eh.rs)0
-rw-r--r--library/std/src/sys/personality/dwarf/mod.rs (renamed from library/std/src/personality/dwarf/mod.rs)0
-rw-r--r--library/std/src/sys/personality/dwarf/tests.rs (renamed from library/std/src/personality/dwarf/tests.rs)0
-rw-r--r--library/std/src/sys/personality/emcc.rs (renamed from library/std/src/personality/emcc.rs)0
-rw-r--r--library/std/src/sys/personality/gcc.rs (renamed from library/std/src/personality/gcc.rs)10
-rw-r--r--library/std/src/sys/personality/mod.rs (renamed from library/std/src/personality.rs)0
-rw-r--r--library/std/src/sys/sgx/os.rs51
-rw-r--r--library/std/src/sys/sgx/thread.rs10
-rw-r--r--library/std/src/sys/solid/os.rs58
-rw-r--r--library/std/src/sys/unix/cmath.rs4
-rw-r--r--library/std/src/sys/unix/fs.rs28
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs18
-rw-r--r--library/std/src/sys/unix/mod.rs14
-rw-r--r--library/std/src/sys/unix/os.rs49
-rw-r--r--library/std/src/sys/unix/os_str.rs10
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs2
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs223
-rw-r--r--library/std/src/sys/unix/process/process_unix/tests.rs25
-rw-r--r--library/std/src/sys/unix/process/process_unsupported.rs2
-rw-r--r--library/std/src/sys/unix/process/process_vxworks.rs2
-rw-r--r--library/std/src/sys/unix/rand.rs22
-rw-r--r--library/std/src/sys/unix/stdio.rs2
-rw-r--r--library/std/src/sys/unsupported/os.rs18
-rw-r--r--library/std/src/sys/unsupported/process.rs37
-rw-r--r--library/std/src/sys/wasi/fd.rs10
-rw-r--r--library/std/src/sys/wasi/mod.rs22
-rw-r--r--library/std/src/sys/wasi/os.rs57
-rw-r--r--library/std/src/sys/wasi/thread.rs132
-rw-r--r--library/std/src/sys/windows/cmath.rs6
-rw-r--r--library/std/src/sys/windows/compat.rs5
-rw-r--r--library/std/src/sys/windows/os.rs62
-rw-r--r--library/std/src/sys/windows/os_str.rs10
-rw-r--r--library/std/src/sys/windows/process.rs2
-rw-r--r--library/std/src/sys/windows/thread_local_dtor.rs27
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs46
-rw-r--r--library/std/src/sys/windows/thread_local_key/tests.rs4
-rw-r--r--library/std/src/sys_common/backtrace.rs19
-rw-r--r--library/std/src/sys_common/thread_info.rs1
-rw-r--r--library/std/src/sys_common/thread_parking/id.rs17
-rw-r--r--library/std/src/sys_common/wtf8.rs17
-rw-r--r--library/std/src/thread/local.rs27
-rw-r--r--library/std/tests/process_spawning.rs38
-rw-r--r--library/std/tests/switch-stdout.rs53
-rw-r--r--library/test/src/formatters/junit.rs2
-rw-r--r--library/test/src/lib.rs4
-rw-r--r--library/test/src/term/terminfo/searcher/tests.rs12
-rw-r--r--library/test/src/types.rs2
-rw-r--r--library/unwind/src/lib.rs1
-rw-r--r--library/unwind/src/libunwind.rs7
188 files changed, 6043 insertions, 2318 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index e24a0fe51..5205ed9fb 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -4,11 +4,7 @@
#[cfg(not(test))]
use core::intrinsics;
-#[cfg(all(bootstrap, not(test)))]
-use core::intrinsics::{min_align_of_val, size_of_val};
-#[cfg(all(bootstrap, not(test)))]
-use core::ptr::Unique;
#[cfg(not(test))]
use core::ptr::{self, NonNull};
@@ -337,23 +333,6 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
}
}
-#[cfg(all(bootstrap, not(test)))]
-#[lang = "box_free"]
-#[inline]
-// This signature has to be the same as `Box`, otherwise an ICE will happen.
-// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
-// well.
-// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
-// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
- unsafe {
- let size = size_of_val(ptr.as_ref());
- let align = min_align_of_val(ptr.as_ref());
- let layout = Layout::from_size_align_unchecked(size, align);
- alloc.deallocate(From::from(ptr.cast()), layout)
- }
-}
-
// # Allocation error handler
#[cfg(not(no_global_oom_handling))]
@@ -413,7 +392,6 @@ pub mod __alloc_error_handler {
static __rust_alloc_error_handler_should_panic: u8;
}
- #[allow(unused_unsafe)]
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {size} bytes failed")
} else {
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 8ef2bac92..96b93830f 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -157,12 +157,12 @@ use core::hash::{Hash, Hasher};
use core::iter::FusedIterator;
use core::marker::Tuple;
use core::marker::Unsize;
-use core::mem;
+use core::mem::{self, SizedTypeProperties};
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
};
use core::pin::Pin;
-use core::ptr::{self, Unique};
+use core::ptr::{self, NonNull, Unique};
use core::task::{Context, Poll};
#[cfg(not(no_global_oom_handling))]
@@ -479,8 +479,12 @@ impl<T, A: Allocator> Box<T, A> {
where
A: Allocator,
{
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- let ptr = alloc.allocate(layout)?.cast();
+ let ptr = if T::IS_ZST {
+ NonNull::dangling()
+ } else {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ alloc.allocate(layout)?.cast()
+ };
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
@@ -549,8 +553,12 @@ impl<T, A: Allocator> Box<T, A> {
where
A: Allocator,
{
- let layout = Layout::new::<mem::MaybeUninit<T>>();
- let ptr = alloc.allocate_zeroed(layout)?.cast();
+ let ptr = if T::IS_ZST {
+ NonNull::dangling()
+ } else {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ alloc.allocate_zeroed(layout)?.cast()
+ };
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
@@ -675,14 +683,16 @@ impl<T> Box<[T]> {
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- unsafe {
+ let ptr = if T::IS_ZST || len == 0 {
+ NonNull::dangling()
+ } else {
let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
Ok(l) => l,
Err(_) => return Err(AllocError),
};
- let ptr = Global.allocate(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
- }
+ Global.allocate(layout)?.cast()
+ };
+ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
/// Constructs a new boxed slice with uninitialized contents, with the memory
@@ -707,14 +717,16 @@ impl<T> Box<[T]> {
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
- unsafe {
+ let ptr = if T::IS_ZST || len == 0 {
+ NonNull::dangling()
+ } else {
let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
Ok(l) => l,
Err(_) => return Err(AllocError),
};
- let ptr = Global.allocate_zeroed(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
- }
+ Global.allocate_zeroed(layout)?.cast()
+ };
+ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
}
@@ -1219,7 +1231,9 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
unsafe {
let layout = Layout::for_value_raw(ptr.as_ptr());
- self.1.deallocate(From::from(ptr.cast()), layout)
+ if layout.size() != 0 {
+ self.1.deallocate(From::from(ptr.cast()), layout);
+ }
}
}
}
@@ -2169,7 +2183,7 @@ impl dyn Error + Send {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send` marker.
- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send))
})
}
}
@@ -2183,7 +2197,7 @@ impl dyn Error + Send + Sync {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send + Sync` marker.
- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send + Sync))
})
}
}
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index ff908ec12..5481b327d 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -613,8 +613,6 @@ impl<K, V> BTreeMap<K, V> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -636,8 +634,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -661,8 +657,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// # #![feature(allocator_api)]
/// # #![feature(btreemap_alloc)]
@@ -688,8 +682,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -744,8 +736,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -830,8 +820,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -917,8 +905,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -943,8 +929,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -982,8 +966,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -1017,8 +999,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// #![feature(map_try_insert)]
///
@@ -1051,8 +1031,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -1078,8 +1056,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -1208,8 +1184,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
/// use std::ops::Bound::Included;
@@ -1251,8 +1225,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -1283,8 +1255,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -1336,8 +1306,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2388,8 +2356,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2420,8 +2386,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2453,8 +2417,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2474,8 +2436,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2495,8 +2455,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2521,8 +2479,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2546,8 +2502,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::collections::BTreeMap;
///
@@ -2578,8 +2532,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// #![feature(btree_cursors)]
///
@@ -2591,6 +2543,8 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// a.insert(2, "b");
/// a.insert(3, "c");
/// a.insert(4, "c");
+ /// let cursor = a.lower_bound(Bound::Included(&2));
+ /// assert_eq!(cursor.key(), Some(&2));
/// let cursor = a.lower_bound(Bound::Excluded(&2));
/// assert_eq!(cursor.key(), Some(&3));
/// ```
@@ -2619,8 +2573,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// #![feature(btree_cursors)]
///
@@ -2632,6 +2584,8 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// a.insert(2, "b");
/// a.insert(3, "c");
/// a.insert(4, "c");
+ /// let cursor = a.lower_bound_mut(Bound::Included(&2));
+ /// assert_eq!(cursor.key(), Some(&2));
/// let cursor = a.lower_bound_mut(Bound::Excluded(&2));
/// assert_eq!(cursor.key(), Some(&3));
/// ```
@@ -2673,8 +2627,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// #![feature(btree_cursors)]
///
@@ -2686,6 +2638,8 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// a.insert(2, "b");
/// a.insert(3, "c");
/// a.insert(4, "c");
+ /// let cursor = a.upper_bound(Bound::Included(&3));
+ /// assert_eq!(cursor.key(), Some(&3));
/// let cursor = a.upper_bound(Bound::Excluded(&3));
/// assert_eq!(cursor.key(), Some(&2));
/// ```
@@ -2714,8 +2668,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// #![feature(btree_cursors)]
///
@@ -2727,6 +2679,8 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// a.insert(2, "b");
/// a.insert(3, "c");
/// a.insert(4, "c");
+ /// let cursor = a.upper_bound_mut(Bound::Included(&3));
+ /// assert_eq!(cursor.key(), Some(&3));
/// let cursor = a.upper_bound_mut(Bound::Excluded(&3));
/// assert_eq!(cursor.key(), Some(&2));
/// ```
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index c4461040b..9da230915 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -1121,19 +1121,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
/// use std::collections::BTreeSet;
///
- /// let set = BTreeSet::from([1, 2, 3]);
- /// let mut set_iter = set.iter();
- /// assert_eq!(set_iter.next(), Some(&1));
- /// assert_eq!(set_iter.next(), Some(&2));
- /// assert_eq!(set_iter.next(), Some(&3));
- /// assert_eq!(set_iter.next(), None);
- /// ```
- ///
- /// Values returned by the iterator are returned in ascending order:
- ///
- /// ```
- /// use std::collections::BTreeSet;
- ///
/// let set = BTreeSet::from([3, 1, 2]);
/// let mut set_iter = set.iter();
/// assert_eq!(set_iter.next(), Some(&1));
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index 052edf453..2c26f9e03 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -18,7 +18,7 @@ use core::hash::{Hash, Hasher};
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
-use core::ptr::{NonNull, Unique};
+use core::ptr::NonNull;
use super::SpecExtend;
use crate::alloc::{Allocator, Global};
@@ -168,15 +168,16 @@ impl<T, A: Allocator> LinkedList<T, A> {
/// Adds the given node to the front of the list.
///
/// # Safety
- /// `node` must point to a valid node that was boxed using the list's allocator.
+ /// `node` must point to a valid node that was boxed and leaked using the list's allocator.
+ /// This method takes ownership of the node, so the pointer should not be used again.
#[inline]
- unsafe fn push_front_node(&mut self, node: Unique<Node<T>>) {
+ unsafe fn push_front_node(&mut self, node: NonNull<Node<T>>) {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
unsafe {
(*node.as_ptr()).next = self.head;
(*node.as_ptr()).prev = None;
- let node = Some(NonNull::from(node));
+ let node = Some(node);
match self.head {
None => self.tail = node,
@@ -212,15 +213,16 @@ impl<T, A: Allocator> LinkedList<T, A> {
/// Adds the given node to the back of the list.
///
/// # Safety
- /// `node` must point to a valid node that was boxed using the list's allocator.
+ /// `node` must point to a valid node that was boxed and leaked using the list's allocator.
+ /// This method takes ownership of the node, so the pointer should not be used again.
#[inline]
- unsafe fn push_back_node(&mut self, node: Unique<Node<T>>) {
+ unsafe fn push_back_node(&mut self, node: NonNull<Node<T>>) {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
unsafe {
(*node.as_ptr()).next = None;
(*node.as_ptr()).prev = self.tail;
- let node = Some(NonNull::from(node));
+ let node = Some(node);
match self.tail {
None => self.head = node,
@@ -842,8 +844,8 @@ impl<T, A: Allocator> LinkedList<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, elt: T) {
let node = Box::new_in(Node::new(elt), &self.alloc);
- let node_ptr = Unique::from(Box::leak(node));
- // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc
+ let node_ptr = NonNull::from(Box::leak(node));
+ // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked
unsafe {
self.push_front_node(node_ptr);
}
@@ -890,8 +892,8 @@ impl<T, A: Allocator> LinkedList<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, elt: T) {
let node = Box::new_in(Node::new(elt), &self.alloc);
- let node_ptr = Unique::from(Box::leak(node));
- // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc
+ let node_ptr = NonNull::from(Box::leak(node));
+ // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked
unsafe {
self.push_back_node(node_ptr);
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 896da37f9..5965ec2af 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -2283,21 +2283,21 @@ impl<T, A: Allocator> VecDeque<T, A> {
unsafe { slice::from_raw_parts_mut(ptr.add(self.head), self.len) }
}
- /// Rotates the double-ended queue `mid` places to the left.
+ /// Rotates the double-ended queue `n` places to the left.
///
/// Equivalently,
- /// - Rotates item `mid` into the first position.
- /// - Pops the first `mid` items and pushes them to the end.
- /// - Rotates `len() - mid` places to the right.
+ /// - Rotates item `n` into the first position.
+ /// - Pops the first `n` items and pushes them to the end.
+ /// - Rotates `len() - n` places to the right.
///
/// # Panics
///
- /// If `mid` is greater than `len()`. Note that `mid == len()`
+ /// If `n` is greater than `len()`. Note that `n == len()`
/// does _not_ panic and is a no-op rotation.
///
/// # Complexity
///
- /// Takes `*O*(min(mid, len() - mid))` time and no extra space.
+ /// Takes `*O*(min(n, len() - n))` time and no extra space.
///
/// # Examples
///
@@ -2316,31 +2316,31 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// ```
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
- pub fn rotate_left(&mut self, mid: usize) {
- assert!(mid <= self.len());
- let k = self.len - mid;
- if mid <= k {
- unsafe { self.rotate_left_inner(mid) }
+ pub fn rotate_left(&mut self, n: usize) {
+ assert!(n <= self.len());
+ let k = self.len - n;
+ if n <= k {
+ unsafe { self.rotate_left_inner(n) }
} else {
unsafe { self.rotate_right_inner(k) }
}
}
- /// Rotates the double-ended queue `k` places to the right.
+ /// Rotates the double-ended queue `n` places to the right.
///
/// Equivalently,
- /// - Rotates the first item into position `k`.
- /// - Pops the last `k` items and pushes them to the front.
- /// - Rotates `len() - k` places to the left.
+ /// - Rotates the first item into position `n`.
+ /// - Pops the last `n` items and pushes them to the front.
+ /// - Rotates `len() - n` places to the left.
///
/// # Panics
///
- /// If `k` is greater than `len()`. Note that `k == len()`
+ /// If `n` is greater than `len()`. Note that `n == len()`
/// does _not_ panic and is a no-op rotation.
///
/// # Complexity
///
- /// Takes `*O*(min(k, len() - k))` time and no extra space.
+ /// Takes `*O*(min(n, len() - n))` time and no extra space.
///
/// # Examples
///
@@ -2359,13 +2359,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// ```
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
- pub fn rotate_right(&mut self, k: usize) {
- assert!(k <= self.len());
- let mid = self.len - k;
- if k <= mid {
- unsafe { self.rotate_right_inner(k) }
+ pub fn rotate_right(&mut self, n: usize) {
+ assert!(n <= self.len());
+ let k = self.len - n;
+ if n <= k {
+ unsafe { self.rotate_right_inner(n) }
} else {
- unsafe { self.rotate_left_inner(mid) }
+ unsafe { self.rotate_left_inner(k) }
}
}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 205a8ff3c..b7fdebfa6 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -351,14 +351,14 @@ fn test_rotate_left_right() {
}
#[test]
-#[should_panic = "assertion failed: mid <= self.len()"]
+#[should_panic = "assertion failed: n <= self.len()"]
fn test_rotate_left_panic() {
let mut tester: VecDeque<_> = (1..=10).collect();
tester.rotate_left(tester.len() + 1);
}
#[test]
-#[should_panic = "assertion failed: k <= self.len()"]
+#[should_panic = "assertion failed: n <= self.len()"]
fn test_rotate_right_panic() {
let mut tester: VecDeque<_> = (1..=10).collect();
tester.rotate_right(tester.len() + 1);
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 967ad3a0e..ffe6d6373 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -58,7 +58,7 @@
// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
+// rustc itself never sets the feature, so this line has no effect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
#![allow(unused_attributes)]
@@ -88,6 +88,8 @@
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![warn(multiple_supertrait_upcastable)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
//
// Library features:
// tidy-alphabetical-start
@@ -137,7 +139,6 @@
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
-#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index dfd30d99c..01b03de6a 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -432,16 +432,26 @@ impl<T, A: Allocator> RawVec<T, A> {
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
// See current_memory() why this assert is here
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
- let ptr = unsafe {
- // `Layout::array` cannot overflow here because it would have
- // overflowed earlier when capacity was larger.
- let new_size = mem::size_of::<T>().unchecked_mul(cap);
- let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
- self.alloc
- .shrink(ptr, layout, new_layout)
- .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
- };
- self.set_ptr_and_cap(ptr, cap);
+
+ // If shrinking to 0, deallocate the buffer. We don't reach this point
+ // for the T::IS_ZST case since current_memory() will have returned
+ // None.
+ if cap == 0 {
+ unsafe { self.alloc.deallocate(ptr, layout) };
+ self.ptr = Unique::dangling();
+ self.cap = 0;
+ } else {
+ let ptr = unsafe {
+ // `Layout::array` cannot overflow here because it would have
+ // overflowed earlier when capacity was larger.
+ let new_size = mem::size_of::<T>().unchecked_mul(cap);
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+ };
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index b3305b8ca..c485680f9 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -313,13 +313,17 @@ fn rcbox_layout_for_value_layout(layout: Layout) -> Layout {
#[cfg_attr(not(test), rustc_diagnostic_item = "Rc")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_insignificant_dtor]
-pub struct Rc<T: ?Sized> {
+pub struct Rc<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
ptr: NonNull<RcBox<T>>,
phantom: PhantomData<RcBox<T>>,
+ alloc: A,
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !Send for Rc<T> {}
+impl<T: ?Sized, A: Allocator> !Send for Rc<T, A> {}
// Note that this negative impl isn't strictly necessary for correctness,
// as `Rc` transitively contains a `Cell`, which is itself `!Sync`.
@@ -327,20 +331,32 @@ impl<T: ?Sized> !Send for Rc<T> {}
// having an explicit negative impl is nice for documentation purposes
// and results in nicer error messages.
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !Sync for Rc<T> {}
+impl<T: ?Sized, A: Allocator> !Sync for Rc<T, A> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
-impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
+impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Rc<T, A> {}
#[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
-impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
+impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> RefUnwindSafe for Rc<T, A> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Rc<U, A>> for Rc<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
impl<T: ?Sized> Rc<T> {
+ #[inline]
+ unsafe fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
+ unsafe { Self::from_inner_in(ptr, Global) }
+ }
+
+ #[inline]
+ unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Rc<T, A> {
#[inline(always)]
fn inner(&self) -> &RcBox<T> {
// This unsafety is ok because while this Rc is alive we're guaranteed
@@ -348,12 +364,14 @@ impl<T: ?Sized> Rc<T> {
unsafe { self.ptr.as_ref() }
}
- unsafe fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
- Self { ptr, phantom: PhantomData }
+ #[inline]
+ unsafe fn from_inner_in(ptr: NonNull<RcBox<T>>, alloc: A) -> Self {
+ Self { ptr, phantom: PhantomData, alloc }
}
- unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ #[inline]
+ unsafe fn from_ptr_in(ptr: *mut RcBox<T>, alloc: A) -> Self {
+ unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
}
}
@@ -450,7 +468,7 @@ impl<T> Rc<T> {
let init_ptr: NonNull<RcBox<T>> = uninit_ptr.cast();
- let weak = Weak { ptr: init_ptr };
+ let weak = Weak { ptr: init_ptr, alloc: Global };
// It's important we don't give up ownership of the weak pointer, or
// else the memory might be freed by the time `data_fn` returns. If
@@ -504,7 +522,7 @@ impl<T> Rc<T> {
Rc::from_ptr(Rc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
- |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
))
}
}
@@ -537,7 +555,7 @@ impl<T> Rc<T> {
Rc::from_ptr(Rc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate_zeroed(layout),
- |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
))
}
}
@@ -594,7 +612,7 @@ impl<T> Rc<T> {
Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
- |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
)?))
}
}
@@ -627,7 +645,7 @@ impl<T> Rc<T> {
Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate_zeroed(layout),
- |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
)?))
}
}
@@ -639,6 +657,235 @@ impl<T> Rc<T> {
pub fn pin(value: T) -> Pin<Rc<T>> {
unsafe { Pin::new_unchecked(Rc::new(value)) }
}
+}
+
+impl<T, A: Allocator> Rc<T, A> {
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Rc::allocator(&r)` instead of `r.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn allocator(this: &Self) -> &A {
+ &this.alloc
+ }
+ /// Constructs a new `Rc` in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let five = Rc::new_in(5, System);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn new_in(value: T, alloc: A) -> Rc<T, A> {
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Self::try_new_in(value, alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(Layout::new::<RcBox<T>>()),
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Rc::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_uninit_in(alloc: A) -> Rc<mem::MaybeUninit<T>, A> {
+ unsafe {
+ Rc::from_ptr_in(
+ Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate(layout),
+ <*mut u8>::cast,
+ ),
+ alloc,
+ )
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let zero = Rc::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_zeroed_in(alloc: A) -> Rc<mem::MaybeUninit<T>, A> {
+ unsafe {
+ Rc::from_ptr_in(
+ Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate_zeroed(layout),
+ <*mut u8>::cast,
+ ),
+ alloc,
+ )
+ }
+ }
+
+ /// Constructs a new `Rc<T>` in the provided allocator, returning an error if the allocation
+ /// fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let five = Rc::try_new_in(5, System);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_in(value: T, alloc: A) -> Result<Self, AllocError> {
+ // There is an implicit weak pointer owned by all the strong
+ // pointers, which ensures that the weak destructor never frees
+ // the allocation while the strong destructor is running, even
+ // if the weak pointer is stored inside the strong one.
+ let (ptr, alloc) = Box::into_unique(Box::try_new_in(
+ RcBox { strong: Cell::new(1), weak: Cell::new(1), value },
+ alloc,
+ )?);
+ Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, in the provided allocator, returning an
+ /// error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Rc::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_uninit_in(alloc: A) -> Result<Rc<mem::MaybeUninit<T>, A>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr_in(
+ Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate(layout),
+ <*mut u8>::cast,
+ )?,
+ alloc,
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, in the provided allocator, returning an error if the allocation
+ /// fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let zero = Rc::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ //#[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Rc<mem::MaybeUninit<T>, A>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr_in(
+ Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate_zeroed(layout),
+ <*mut u8>::cast,
+ )?,
+ alloc,
+ ))
+ }
+ }
+
+ /// Constructs a new `Pin<Rc<T>>` in the provided allocator. If `T` does not implement `Unpin`, then
+ /// `value` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn pin_in(value: T, alloc: A) -> Pin<Self> {
+ unsafe { Pin::new_unchecked(Rc::new_in(value, alloc)) }
+ }
/// Returns the inner value, if the `Rc` has exactly one strong reference.
///
@@ -665,13 +912,14 @@ impl<T> Rc<T> {
if Rc::strong_count(&this) == 1 {
unsafe {
let val = ptr::read(&*this); // copy the contained object
+ let alloc = ptr::read(&this.alloc); // copy the allocator
// Indicate to Weaks that they can't be promoted by decrementing
// the strong count, and then remove the implicit "strong weak"
// pointer while also handling drop logic by just crafting a
// fake Weak.
this.inner().dec_strong();
- let _weak = Weak { ptr: this.ptr };
+ let _weak = Weak { ptr: this.ptr, alloc };
forget(this);
Ok(val)
}
@@ -758,7 +1006,7 @@ impl<T> Rc<[T]> {
Layout::array::<T>(len).unwrap(),
|layout| Global.allocate_zeroed(layout),
|mem| {
- ptr::slice_from_raw_parts_mut(mem as *mut T, len)
+ ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
as *mut RcBox<[mem::MaybeUninit<T>]>
},
))
@@ -766,7 +1014,84 @@ impl<T> Rc<[T]> {
}
}
-impl<T> Rc<mem::MaybeUninit<T>> {
+impl<T, A: Allocator> Rc<[T], A> {
+ /// Constructs a new reference-counted slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Rc::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Rc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Rc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Rc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Rc<[mem::MaybeUninit<T>], A> {
+ unsafe { Rc::from_ptr_in(Rc::allocate_for_slice_in(len, &alloc), alloc) }
+ }
+
+ /// Constructs a new reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::rc::Rc;
+ /// use std::alloc::System;
+ ///
+ /// let values = Rc::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Rc<[mem::MaybeUninit<T>], A> {
+ unsafe {
+ Rc::from_ptr_in(
+ Rc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| alloc.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
+ as *mut RcBox<[mem::MaybeUninit<T>]>
+ },
+ ),
+ alloc,
+ )
+ }
+ }
+}
+
+impl<T, A: Allocator> Rc<mem::MaybeUninit<T>, A> {
/// Converts to `Rc<T>`.
///
/// # Safety
@@ -798,12 +1123,16 @@ impl<T> Rc<mem::MaybeUninit<T>> {
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
#[inline]
- pub unsafe fn assume_init(self) -> Rc<T> {
- unsafe { Rc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) }
+ pub unsafe fn assume_init(self) -> Rc<T, A>
+ where
+ A: Clone,
+ {
+ let md_self = mem::ManuallyDrop::new(self);
+ unsafe { Rc::from_inner_in(md_self.ptr.cast(), md_self.alloc.clone()) }
}
}
-impl<T> Rc<[mem::MaybeUninit<T>]> {
+impl<T, A: Allocator> Rc<[mem::MaybeUninit<T>], A> {
/// Converts to `Rc<[T]>`.
///
/// # Safety
@@ -838,12 +1167,128 @@ impl<T> Rc<[mem::MaybeUninit<T>]> {
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
#[inline]
- pub unsafe fn assume_init(self) -> Rc<[T]> {
- unsafe { Rc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ pub unsafe fn assume_init(self) -> Rc<[T], A>
+ where
+ A: Clone,
+ {
+ let md_self = mem::ManuallyDrop::new(self);
+ unsafe { Rc::from_ptr_in(md_self.ptr.as_ptr() as _, md_self.alloc.clone()) }
}
}
impl<T: ?Sized> Rc<T> {
+ /// Constructs an `Rc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+ /// and alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The raw pointer must point to a block of memory allocated by the global allocator
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Rc<T>` is never accessed.
+ ///
+ /// [into_raw]: Rc::into_raw
+ /// [transmute]: core::mem::transmute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let x_ptr = Rc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Rc` to prevent leak.
+ /// let x = Rc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Rc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe { Self::from_raw_in(ptr, Global) }
+ }
+
+ /// Increments the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method, and `ptr` must point to a block of memory
+ /// allocated by the global allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ unsafe { Self::increment_strong_count_in(ptr, Global) }
+ }
+
+ /// Decrements the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method, and `ptr` must point to a block of memory
+ /// allocated by the global allocator. This method can be used to release the final `Rc` and
+ /// backing storage, but **should not** be called after the final `Rc` has been released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// Rc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { Self::decrement_strong_count_in(ptr, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// Consumes the `Rc`, returning the wrapped pointer.
///
/// To avoid a memory leak the pointer must be converted back to an `Rc` using
@@ -891,16 +1336,18 @@ impl<T: ?Sized> Rc<T> {
unsafe { ptr::addr_of_mut!((*ptr).value) }
}
- /// Constructs an `Rc<T>` from a raw pointer.
+ /// Constructs an `Rc<T, A>` from a raw pointer in the provided allocator.
///
/// The raw pointer must have been previously returned by a call to
- /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+ /// [`Rc<U, A>::into_raw`][into_raw] where `U` must have the same size
/// and alignment as `T`. This is trivially true if `U` is `T`.
/// Note that if `U` is not `T` but has the same size and alignment, this is
/// basically like transmuting references of different types. See
/// [`mem::transmute`] for more information on what
/// restrictions apply in this case.
///
+ /// The raw pointer must point to a block of memory allocated by `alloc`
+ ///
/// The user of `from_raw` has to make sure a specific value of `T` is only
/// dropped once.
///
@@ -912,14 +1359,17 @@ impl<T: ?Sized> Rc<T> {
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::rc::Rc;
+ /// use std::alloc::System;
///
- /// let x = Rc::new("hello".to_owned());
+ /// let x = Rc::new_in("hello".to_owned(), System);
/// let x_ptr = Rc::into_raw(x);
///
/// unsafe {
/// // Convert back to an `Rc` to prevent leak.
- /// let x = Rc::from_raw(x_ptr);
+ /// let x = Rc::from_raw_in(x_ptr, System);
/// assert_eq!(&*x, "hello");
///
/// // Further calls to `Rc::from_raw(x_ptr)` would be memory-unsafe.
@@ -927,14 +1377,14 @@ impl<T: ?Sized> Rc<T> {
///
/// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
/// ```
- #[stable(feature = "rc_raw", since = "1.17.0")]
- pub unsafe fn from_raw(ptr: *const T) -> Self {
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
let offset = unsafe { data_offset(ptr) };
// Reverse the offset to find the original RcBox.
let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut RcBox<T> };
- unsafe { Self::from_ptr(rc_ptr) }
+ unsafe { Self::from_ptr_in(rc_ptr, alloc) }
}
/// Creates a new [`Weak`] pointer to this allocation.
@@ -951,11 +1401,14 @@ impl<T: ?Sized> Rc<T> {
#[must_use = "this returns a new `Weak` pointer, \
without modifying the original `Rc`"]
#[stable(feature = "rc_weak", since = "1.4.0")]
- pub fn downgrade(this: &Self) -> Weak<T> {
+ pub fn downgrade(this: &Self) -> Weak<T, A>
+ where
+ A: Clone,
+ {
this.inner().inc_weak();
// Make sure we do not create a dangling Weak
debug_assert!(!is_dangling(this.ptr.as_ptr()));
- Weak { ptr: this.ptr }
+ Weak { ptr: this.ptr, alloc: this.alloc.clone() }
}
/// Gets the number of [`Weak`] pointers to this allocation.
@@ -999,30 +1452,37 @@ impl<T: ?Sized> Rc<T> {
///
/// # Safety
///
- /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// The pointer must have been obtained through `Rc::into_raw`, the
/// associated `Rc` instance must be valid (i.e. the strong count must be at
- /// least 1) for the duration of this method.
+ /// least 1) for the duration of this method, and `ptr` must point to a block of memory
+ /// allocated by `alloc`
///
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::rc::Rc;
+ /// use std::alloc::System;
///
- /// let five = Rc::new(5);
+ /// let five = Rc::new_in(5, System);
///
/// unsafe {
/// let ptr = Rc::into_raw(five);
- /// Rc::increment_strong_count(ptr);
+ /// Rc::increment_strong_count_in(ptr, System);
///
- /// let five = Rc::from_raw(ptr);
+ /// let five = Rc::from_raw_in(ptr, System);
/// assert_eq!(2, Rc::strong_count(&five));
/// }
/// ```
#[inline]
- #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
- pub unsafe fn increment_strong_count(ptr: *const T) {
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
+ where
+ A: Clone,
+ {
// Retain Rc, but don't touch refcount by wrapping in ManuallyDrop
- let rc = unsafe { mem::ManuallyDrop::new(Rc::<T>::from_raw(ptr)) };
+ let rc = unsafe { mem::ManuallyDrop::new(Rc::<T, A>::from_raw_in(ptr, alloc)) };
// Now increase refcount, but don't drop new refcount either
let _rc_clone: mem::ManuallyDrop<_> = rc.clone();
}
@@ -1032,33 +1492,36 @@ impl<T: ?Sized> Rc<T> {
///
/// # Safety
///
- /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// The pointer must have been obtained through `Rc::into_raw`, the
/// associated `Rc` instance must be valid (i.e. the strong count must be at
- /// least 1) when invoking this method. This method can be used to release
- /// the final `Rc` and backing storage, but **should not** be called after
- /// the final `Rc` has been released.
+ /// least 1) when invoking this method, and `ptr` must point to a block of memory
+ /// allocated by `alloc`. This method can be used to release the final `Rc` and backing storage,
+ /// but **should not** be called after the final `Rc` has been released.
///
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::rc::Rc;
+ /// use std::alloc::System;
///
- /// let five = Rc::new(5);
+ /// let five = Rc::new_in(5, System);
///
/// unsafe {
/// let ptr = Rc::into_raw(five);
- /// Rc::increment_strong_count(ptr);
+ /// Rc::increment_strong_count_in(ptr, System);
///
- /// let five = Rc::from_raw(ptr);
+ /// let five = Rc::from_raw_in(ptr, System);
/// assert_eq!(2, Rc::strong_count(&five));
- /// Rc::decrement_strong_count(ptr);
+ /// Rc::decrement_strong_count_in(ptr, System);
/// assert_eq!(1, Rc::strong_count(&five));
/// }
/// ```
#[inline]
- #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
- pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { drop(Rc::from_raw(ptr)) };
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
+ unsafe { drop(Rc::from_raw_in(ptr, alloc)) };
}
/// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
@@ -1188,7 +1651,7 @@ impl<T: ?Sized> Rc<T> {
}
}
-impl<T: Clone> Rc<T> {
+impl<T: Clone, A: Allocator + Clone> Rc<T, A> {
/// Makes a mutable reference into the given `Rc`.
///
/// If there are other `Rc` pointers to the same allocation, then `make_mut` will
@@ -1246,7 +1709,7 @@ impl<T: Clone> Rc<T> {
if Rc::strong_count(this) != 1 {
// Gotta clone the data, there are other Rcs.
// Pre-allocate memory to allow writing the cloned value directly.
- let mut rc = Self::new_uninit();
+ let mut rc = Self::new_uninit_in(this.alloc.clone());
unsafe {
let data = Rc::get_mut_unchecked(&mut rc);
(**this).write_clone_into_raw(data.as_mut_ptr());
@@ -1254,7 +1717,7 @@ impl<T: Clone> Rc<T> {
}
} else if Rc::weak_count(this) != 0 {
// Can just steal the data, all that's left is Weaks
- let mut rc = Self::new_uninit();
+ let mut rc = Self::new_uninit_in(this.alloc.clone());
unsafe {
let data = Rc::get_mut_unchecked(&mut rc);
data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
@@ -1310,7 +1773,7 @@ impl<T: Clone> Rc<T> {
}
}
-impl Rc<dyn Any> {
+impl<A: Allocator + Clone> Rc<dyn Any, A> {
/// Attempt to downcast the `Rc<dyn Any>` to a concrete type.
///
/// # Examples
@@ -1331,12 +1794,13 @@ impl Rc<dyn Any> {
/// ```
#[inline]
#[stable(feature = "rc_downcast", since = "1.29.0")]
- pub fn downcast<T: Any>(self) -> Result<Rc<T>, Rc<dyn Any>> {
+ pub fn downcast<T: Any>(self) -> Result<Rc<T, A>, Self> {
if (*self).is::<T>() {
unsafe {
let ptr = self.ptr.cast::<RcBox<T>>();
+ let alloc = self.alloc.clone();
forget(self);
- Ok(Rc::from_inner(ptr))
+ Ok(Rc::from_inner_in(ptr, alloc))
}
} else {
Err(self)
@@ -1371,11 +1835,12 @@ impl Rc<dyn Any> {
/// [`downcast`]: Self::downcast
#[inline]
#[unstable(feature = "downcast_unchecked", issue = "90850")]
- pub unsafe fn downcast_unchecked<T: Any>(self) -> Rc<T> {
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Rc<T, A> {
unsafe {
let ptr = self.ptr.cast::<RcBox<T>>();
+ let alloc = self.alloc.clone();
mem::forget(self);
- Rc::from_inner(ptr)
+ Rc::from_inner_in(ptr, alloc)
}
}
}
@@ -1427,25 +1892,27 @@ impl<T: ?Sized> Rc<T> {
Ok(inner)
}
+}
+impl<T: ?Sized, A: Allocator> Rc<T, A> {
/// Allocates an `RcBox<T>` with sufficient space for an unsized inner value
#[cfg(not(no_global_oom_handling))]
- unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
+ unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut RcBox<T> {
// Allocate for the `RcBox<T>` using the given value.
unsafe {
- Self::allocate_for_layout(
+ Rc::<T>::allocate_for_layout(
Layout::for_value(&*ptr),
- |layout| Global.allocate(layout),
+ |layout| alloc.allocate(layout),
|mem| mem.with_metadata_of(ptr as *const RcBox<T>),
)
}
}
#[cfg(not(no_global_oom_handling))]
- fn from_box(src: Box<T>) -> Rc<T> {
+ fn from_box_in(src: Box<T, A>) -> Rc<T, A> {
unsafe {
let value_size = size_of_val(&*src);
- let ptr = Self::allocate_for_ptr(&*src);
+ let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
// Copy value as bytes
ptr::copy_nonoverlapping(
@@ -1455,10 +1922,11 @@ impl<T: ?Sized> Rc<T> {
);
// Free the allocation without dropping its contents
- let src = Box::from_raw(Box::into_raw(src) as *mut mem::ManuallyDrop<T>);
+ let (bptr, alloc) = Box::into_raw_with_allocator(src);
+ let src = Box::from_raw(bptr as *mut mem::ManuallyDrop<T>);
drop(src);
- Self::from_ptr(ptr)
+ Self::from_ptr_in(ptr, alloc)
}
}
}
@@ -1471,7 +1939,7 @@ impl<T> Rc<[T]> {
Self::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.allocate(layout),
- |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
+ |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut RcBox<[T]>,
)
}
}
@@ -1538,6 +2006,21 @@ impl<T> Rc<[T]> {
}
}
+impl<T, A: Allocator> Rc<[T], A> {
+ /// Allocates an `RcBox<[T]>` with the given length.
+ #[inline]
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut RcBox<[T]> {
+ unsafe {
+ Rc::<[T]>::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| alloc.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut RcBox<[T]>,
+ )
+ }
+ }
+}
+
/// Specialization trait used for `From<&[T]>`.
trait RcFromSlice<T> {
fn from_slice(slice: &[T]) -> Self;
@@ -1560,7 +2043,7 @@ impl<T: Copy> RcFromSlice<T> for Rc<[T]> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Deref for Rc<T> {
+impl<T: ?Sized, A: Allocator> Deref for Rc<T, A> {
type Target = T;
#[inline(always)]
@@ -1573,7 +2056,7 @@ impl<T: ?Sized> Deref for Rc<T> {
impl<T: ?Sized> Receiver for Rc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Rc<T, A> {
/// Drops the `Rc`.
///
/// This will decrement the strong reference count. If the strong reference
@@ -1611,7 +2094,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
self.inner().dec_weak();
if self.inner().weak() == 0 {
- Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
+ self.alloc.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
}
}
}
@@ -1619,7 +2102,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Clone for Rc<T> {
+impl<T: ?Sized, A: Allocator + Clone> Clone for Rc<T, A> {
/// Makes a clone of the `Rc` pointer.
///
/// This creates another pointer to the same allocation, increasing the
@@ -1635,10 +2118,10 @@ impl<T: ?Sized> Clone for Rc<T> {
/// let _ = Rc::clone(&five);
/// ```
#[inline]
- fn clone(&self) -> Rc<T> {
+ fn clone(&self) -> Self {
unsafe {
self.inner().inc_strong();
- Self::from_inner(self.ptr)
+ Self::from_inner_in(self.ptr, self.alloc.clone())
}
}
}
@@ -1663,20 +2146,20 @@ impl<T: Default> Default for Rc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-trait RcEqIdent<T: ?Sized + PartialEq> {
- fn eq(&self, other: &Rc<T>) -> bool;
- fn ne(&self, other: &Rc<T>) -> bool;
+trait RcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
+ fn eq(&self, other: &Rc<T, A>) -> bool;
+ fn ne(&self, other: &Rc<T, A>) -> bool;
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
+impl<T: ?Sized + PartialEq, A: Allocator> RcEqIdent<T, A> for Rc<T, A> {
#[inline]
- default fn eq(&self, other: &Rc<T>) -> bool {
+ default fn eq(&self, other: &Rc<T, A>) -> bool {
**self == **other
}
#[inline]
- default fn ne(&self, other: &Rc<T>) -> bool {
+ default fn ne(&self, other: &Rc<T, A>) -> bool {
**self != **other
}
}
@@ -1695,20 +2178,20 @@ impl<T: Eq> MarkerEq for T {}
///
/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
+impl<T: ?Sized + MarkerEq, A: Allocator> RcEqIdent<T, A> for Rc<T, A> {
#[inline]
- fn eq(&self, other: &Rc<T>) -> bool {
+ fn eq(&self, other: &Rc<T, A>) -> bool {
Rc::ptr_eq(self, other) || **self == **other
}
#[inline]
- fn ne(&self, other: &Rc<T>) -> bool {
+ fn ne(&self, other: &Rc<T, A>) -> bool {
!Rc::ptr_eq(self, other) && **self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Rc<T, A> {
/// Equality for two `Rc`s.
///
/// Two `Rc`s are equal if their inner values are equal, even if they are
@@ -1728,7 +2211,7 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// assert!(five == Rc::new(5));
/// ```
#[inline]
- fn eq(&self, other: &Rc<T>) -> bool {
+ fn eq(&self, other: &Rc<T, A>) -> bool {
RcEqIdent::eq(self, other)
}
@@ -1750,16 +2233,16 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// assert!(five != Rc::new(6));
/// ```
#[inline]
- fn ne(&self, other: &Rc<T>) -> bool {
+ fn ne(&self, other: &Rc<T, A>) -> bool {
RcEqIdent::ne(self, other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> Eq for Rc<T> {}
+impl<T: ?Sized + Eq, A: Allocator> Eq for Rc<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Rc<T, A> {
/// Partial comparison for two `Rc`s.
///
/// The two are compared by calling `partial_cmp()` on their inner values.
@@ -1775,7 +2258,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Rc::new(6)));
/// ```
#[inline(always)]
- fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
+ fn partial_cmp(&self, other: &Rc<T, A>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
@@ -1793,7 +2276,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// assert!(five < Rc::new(6));
/// ```
#[inline(always)]
- fn lt(&self, other: &Rc<T>) -> bool {
+ fn lt(&self, other: &Rc<T, A>) -> bool {
**self < **other
}
@@ -1811,7 +2294,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// assert!(five <= Rc::new(5));
/// ```
#[inline(always)]
- fn le(&self, other: &Rc<T>) -> bool {
+ fn le(&self, other: &Rc<T, A>) -> bool {
**self <= **other
}
@@ -1829,7 +2312,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// assert!(five > Rc::new(4));
/// ```
#[inline(always)]
- fn gt(&self, other: &Rc<T>) -> bool {
+ fn gt(&self, other: &Rc<T, A>) -> bool {
**self > **other
}
@@ -1847,13 +2330,13 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
/// assert!(five >= Rc::new(5));
/// ```
#[inline(always)]
- fn ge(&self, other: &Rc<T>) -> bool {
+ fn ge(&self, other: &Rc<T, A>) -> bool {
**self >= **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Ord> Ord for Rc<T> {
+impl<T: ?Sized + Ord, A: Allocator> Ord for Rc<T, A> {
/// Comparison for two `Rc`s.
///
/// The two are compared by calling `cmp()` on their inner values.
@@ -1869,34 +2352,34 @@ impl<T: ?Sized + Ord> Ord for Rc<T> {
/// assert_eq!(Ordering::Less, five.cmp(&Rc::new(6)));
/// ```
#[inline]
- fn cmp(&self, other: &Rc<T>) -> Ordering {
+ fn cmp(&self, other: &Rc<T, A>) -> Ordering {
(**self).cmp(&**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Hash> Hash for Rc<T> {
+impl<T: ?Sized + Hash, A: Allocator> Hash for Rc<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + fmt::Display> fmt::Display for Rc<T> {
+impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Rc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> {
+impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Rc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> fmt::Pointer for Rc<T> {
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Rc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&(&**self as *const T), f)
}
@@ -1982,7 +2465,7 @@ impl From<String> for Rc<str> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
-impl<T: ?Sized> From<Box<T>> for Rc<T> {
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Rc<T, A> {
/// Move a boxed object to a new, reference counted, allocation.
///
/// # Example
@@ -1994,31 +2477,37 @@ impl<T: ?Sized> From<Box<T>> for Rc<T> {
/// assert_eq!(1, *shared);
/// ```
#[inline]
- fn from(v: Box<T>) -> Rc<T> {
- Rc::from_box(v)
+ fn from(v: Box<T, A>) -> Rc<T, A> {
+ Rc::from_box_in(v)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
-impl<T> From<Vec<T>> for Rc<[T]> {
+impl<T, A: Allocator> From<Vec<T, A>> for Rc<[T], A> {
/// Allocate a reference-counted slice and move `v`'s items into it.
///
/// # Example
///
/// ```
/// # use std::rc::Rc;
- /// let original: Box<Vec<i32>> = Box::new(vec![1, 2, 3]);
- /// let shared: Rc<Vec<i32>> = Rc::from(original);
- /// assert_eq!(vec![1, 2, 3], *shared);
+ /// let unique: Vec<i32> = vec![1, 2, 3];
+ /// let shared: Rc<[i32]> = Rc::from(unique);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
/// ```
#[inline]
- fn from(mut v: Vec<T>) -> Rc<[T]> {
+ fn from(v: Vec<T, A>) -> Rc<[T], A> {
unsafe {
- let rc = Rc::copy_from_slice(&v);
- // Allow the Vec to free its memory, but not destroy its contents
- v.set_len(0);
- rc
+ let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+
+ let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
+ ptr::copy_nonoverlapping(vec_ptr, &mut (*rc_ptr).value as *mut [T] as *mut T, len);
+
+ // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
+ // without dropping its contents or the allocator
+ let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
+
+ Self::from_ptr_in(rc_ptr, alloc)
}
}
}
@@ -2189,7 +2678,10 @@ impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
///
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "rc_weak", since = "1.4.0")]
-pub struct Weak<T: ?Sized> {
+pub struct Weak<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
@@ -2197,15 +2689,16 @@ pub struct Weak<T: ?Sized> {
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<RcBox<T>>,
+ alloc: A,
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !Send for Weak<T> {}
+impl<T: ?Sized, A: Allocator> !Send for Weak<T, A> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !Sync for Weak<T> {}
+impl<T: ?Sized, A: Allocator> !Sync for Weak<T, A> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
@@ -2224,16 +2717,45 @@ impl<T> Weak<T> {
/// let empty: Weak<i64> = Weak::new();
/// assert!(empty.upgrade().is_none());
/// ```
+ #[inline]
#[stable(feature = "downgraded_weak", since = "1.10.0")]
- #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
+ #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
#[must_use]
pub const fn new() -> Weak<T> {
- Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) } }
+ Weak {
+ ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) },
+ alloc: Global,
+ }
+ }
+}
+
+impl<T, A: Allocator> Weak<T, A> {
+ /// Constructs a new `Weak<T>`, without allocating any memory, technically in the provided
+ /// allocator.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Weak;
+ ///
+ /// let empty: Weak<i64> = Weak::new();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn new_in(alloc: A) -> Weak<T, A> {
+ Weak {
+ ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) },
+ alloc,
+ }
}
}
pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool {
- (ptr as *mut ()).addr() == usize::MAX
+ (ptr.cast::<()>()).addr() == usize::MAX
}
/// Helper type to allow accessing the reference counts without
@@ -2244,6 +2766,56 @@ struct WeakInner<'a> {
}
impl<T: ?Sized> Weak<T> {
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference, and `ptr` must point to a block of memory allocated by the global allocator.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Rc::downgrade(&strong).into_raw();
+ /// let raw_2 = Rc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Rc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ /// [`new`]: Weak::new
+ #[inline]
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe { Self::from_raw_in(ptr, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Weak<T, A> {
/// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
///
/// The pointer is valid only if there are some strong references. The pointer may be dangling,
@@ -2321,6 +2893,45 @@ impl<T: ?Sized> Weak<T> {
result
}
+ /// Consumes the `Weak<T>` and turns it into a raw pointer.
+ ///
+ /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
+ /// one weak reference (the weak count is not modified by this operation). It can be turned
+ /// back into the `Weak<T>` with [`from_raw`].
+ ///
+ /// The same restrictions of accessing the target of the pointer as with
+ /// [`as_ptr`] apply.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ /// let weak = Rc::downgrade(&strong);
+ /// let raw = weak.into_raw();
+ ///
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ /// assert_eq!("hello", unsafe { &*raw });
+ ///
+ /// drop(unsafe { Weak::from_raw(raw) });
+ /// assert_eq!(0, Rc::weak_count(&strong));
+ /// ```
+ ///
+ /// [`from_raw`]: Weak::from_raw
+ /// [`as_ptr`]: Weak::as_ptr
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn into_raw_and_alloc(self) -> (*const T, A)
+ where
+ A: Clone,
+ {
+ let result = self.as_ptr();
+ let alloc = self.alloc.clone();
+ mem::forget(self);
+ (result, alloc)
+ }
+
/// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
///
/// This can be used to safely get a strong reference (by calling [`upgrade`]
@@ -2332,7 +2943,7 @@ impl<T: ?Sized> Weak<T> {
/// # Safety
///
/// The pointer must have originated from the [`into_raw`] and must still own its potential
- /// weak reference.
+ /// weak reference, and `ptr` must point to a block of memory allocated by `alloc`.
///
/// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
/// takes ownership of one weak reference currently represented as a raw pointer (the weak
@@ -2363,8 +2974,9 @@ impl<T: ?Sized> Weak<T> {
/// [`into_raw`]: Weak::into_raw
/// [`upgrade`]: Weak::upgrade
/// [`new`]: Weak::new
- #[stable(feature = "weak_into_raw", since = "1.45.0")]
- pub unsafe fn from_raw(ptr: *const T) -> Self {
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
// See Weak::as_ptr for context on how the input pointer is derived.
let ptr = if is_dangling(ptr as *mut T) {
@@ -2380,7 +2992,7 @@ impl<T: ?Sized> Weak<T> {
};
// SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
- Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
}
/// Attempts to upgrade the `Weak` pointer to an [`Rc`], delaying
@@ -2409,7 +3021,10 @@ impl<T: ?Sized> Weak<T> {
#[must_use = "this returns a new `Rc`, \
without modifying the original weak pointer"]
#[stable(feature = "rc_weak", since = "1.4.0")]
- pub fn upgrade(&self) -> Option<Rc<T>> {
+ pub fn upgrade(&self) -> Option<Rc<T, A>>
+ where
+ A: Clone,
+ {
let inner = self.inner()?;
if inner.strong() == 0 {
@@ -2417,7 +3032,7 @@ impl<T: ?Sized> Weak<T> {
} else {
unsafe {
inner.inc_strong();
- Some(Rc::from_inner(self.ptr))
+ Some(Rc::from_inner_in(self.ptr, self.alloc.clone()))
}
}
}
@@ -2437,15 +3052,15 @@ impl<T: ?Sized> Weak<T> {
#[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn weak_count(&self) -> usize {
- self.inner()
- .map(|inner| {
- if inner.strong() > 0 {
- inner.weak() - 1 // subtract the implicit weak ptr
- } else {
- 0
- }
- })
- .unwrap_or(0)
+ if let Some(inner) = self.inner() {
+ if inner.strong() > 0 {
+ inner.weak() - 1 // subtract the implicit weak ptr
+ } else {
+ 0
+ }
+ } else {
+ 0
+ }
}
/// Returns `None` when the pointer is dangling and there is no allocated `RcBox`,
@@ -2513,7 +3128,7 @@ impl<T: ?Sized> Weak<T> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
/// Drops the `Weak` pointer.
///
/// # Examples
@@ -2546,14 +3161,14 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
// the strong pointers have disappeared.
if inner.weak() == 0 {
unsafe {
- Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
+ self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
}
}
}
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> Clone for Weak<T> {
+impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
/// Makes a clone of the `Weak` pointer that points to the same allocation.
///
/// # Examples
@@ -2566,16 +3181,16 @@ impl<T: ?Sized> Clone for Weak<T> {
/// let _ = Weak::clone(&weak_five);
/// ```
#[inline]
- fn clone(&self) -> Weak<T> {
+ fn clone(&self) -> Weak<T, A> {
if let Some(inner) = self.inner() {
inner.inc_weak()
}
- Weak { ptr: self.ptr }
+ Weak { ptr: self.ptr, alloc: self.alloc.clone() }
}
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> fmt::Debug for Weak<T> {
+impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
@@ -2707,21 +3322,21 @@ impl<'a> RcInnerPtr for WeakInner<'a> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> borrow::Borrow<T> for Rc<T> {
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Rc<T, A> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
-impl<T: ?Sized> AsRef<T> for Rc<T> {
+impl<T: ?Sized, A: Allocator> AsRef<T> for Rc<T, A> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(feature = "pin", since = "1.33.0")]
-impl<T: ?Sized> Unpin for Rc<T> {}
+impl<T: ?Sized, A: Allocator> Unpin for Rc<T, A> {}
/// Get the offset within an `RcBox` for the payload behind a pointer.
///
@@ -2822,7 +3437,7 @@ impl<T> UniqueRc<T> {
unsafe {
this.ptr.as_ref().inc_weak();
}
- Weak { ptr: this.ptr }
+ Weak { ptr: this.ptr, alloc: Global }
}
/// Converts the `UniqueRc` into a regular [`Rc`]
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index 849774099..38f9f39fb 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -223,8 +223,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "this is a string";
/// let boxed_str = s.to_owned().into_boxed_str();
@@ -487,8 +485,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let string = String::from("birthday gift");
/// let boxed_str = string.clone().into_boxed_str();
@@ -602,8 +598,6 @@ impl str {
///
/// # Examples
///
-/// Basic usage:
-///
/// ```
/// let smile_utf8 = Box::new([226, 152, 186]);
/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
@@ -618,7 +612,7 @@ pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
}
/// Converts the bytes while the bytes are still ascii.
-/// For better average performance, this is happens in chunks of `2*size_of::<usize>()`.
+/// For better average performance, this happens in chunks of `2*size_of::<usize>()`.
/// Returns a vec with the converted bytes.
#[inline]
#[cfg(not(test))]
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index ad7b77f54..ed43244eb 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -388,8 +388,6 @@ pub struct String {
///
/// # Examples
///
-/// Basic usage:
-///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
@@ -412,9 +410,8 @@ pub struct FromUtf8Error {
/// This type is the error type for the [`from_utf16`] method on [`String`].
///
/// [`from_utf16`]: String::from_utf16
-/// # Examples
///
-/// Basic usage:
+/// # Examples
///
/// ```
/// // 𝄞mu<invalid>ic
@@ -441,8 +438,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::new();
/// ```
@@ -472,8 +467,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::with_capacity(10);
///
@@ -661,8 +654,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // 𝄞music
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
@@ -704,8 +695,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // 𝄞mus<invalid>ic<invalid>
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
@@ -784,8 +773,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// use std::mem;
///
@@ -827,8 +814,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
@@ -852,8 +837,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::from("hello");
/// let bytes = s.into_bytes();
@@ -871,8 +854,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::from("foo");
///
@@ -889,8 +870,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("foobar");
/// let s_mut_str = s.as_mut_str();
@@ -910,8 +889,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("foo");
///
@@ -966,8 +943,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::with_capacity(10);
///
@@ -1157,8 +1132,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("foo");
///
@@ -1206,8 +1179,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("abc");
///
@@ -1235,8 +1206,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::from("hello");
///
@@ -1263,8 +1232,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("hello");
///
@@ -1287,14 +1254,12 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
- /// let mut s = String::from("foo");
+ /// let mut s = String::from("abč");
///
- /// assert_eq!(s.pop(), Some('o'));
- /// assert_eq!(s.pop(), Some('o'));
- /// assert_eq!(s.pop(), Some('f'));
+ /// assert_eq!(s.pop(), Some('č'));
+ /// assert_eq!(s.pop(), Some('b'));
+ /// assert_eq!(s.pop(), Some('a'));
///
/// assert_eq!(s.pop(), None);
/// ```
@@ -1321,14 +1286,12 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
- /// let mut s = String::from("foo");
+ /// let mut s = String::from("abç");
///
- /// assert_eq!(s.remove(0), 'f');
- /// assert_eq!(s.remove(1), 'o');
- /// assert_eq!(s.remove(0), 'o');
+ /// assert_eq!(s.remove(0), 'a');
+ /// assert_eq!(s.remove(1), 'ç');
+ /// assert_eq!(s.remove(0), 'b');
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -1514,8 +1477,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::with_capacity(3);
///
@@ -1563,8 +1524,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("bar");
///
@@ -1595,8 +1554,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("hello");
///
@@ -1620,8 +1577,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let a = String::from("foo");
/// assert_eq!(a.len(), 3);
@@ -1641,8 +1596,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut v = String::new();
/// assert!(v.is_empty());
@@ -1697,8 +1650,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("foo");
///
@@ -1734,8 +1685,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
@@ -1784,8 +1733,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
@@ -1834,8 +1781,6 @@ impl String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = String::from("hello");
///
@@ -1866,8 +1811,6 @@ impl String {
///
/// # Examples
///
- /// Simple usage:
- ///
/// ```
/// let x = String::from("bucket");
/// let static_ref: &'static mut str = x.leak();
@@ -1886,8 +1829,6 @@ impl FromUtf8Error {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
@@ -1910,8 +1851,6 @@ impl FromUtf8Error {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
@@ -1938,8 +1877,6 @@ impl FromUtf8Error {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
@@ -2490,8 +2427,6 @@ pub trait ToString {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let i = 5;
/// let five = String::from("5");
@@ -2527,6 +2462,7 @@ impl<T: fmt::Display + ?Sized> ToString for T {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "ascii_char", issue = "110998")]
impl ToString for core::ascii::Char {
@@ -2536,6 +2472,7 @@ impl ToString for core::ascii::Char {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
impl ToString for char {
@@ -2545,6 +2482,7 @@ impl ToString for char {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "bool_to_string_specialization", since = "1.68.0")]
impl ToString for bool {
@@ -2554,6 +2492,7 @@ impl ToString for bool {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
impl ToString for u8 {
@@ -2574,6 +2513,7 @@ impl ToString for u8 {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "i8_to_string_specialization", since = "1.54.0")]
impl ToString for i8 {
@@ -2597,6 +2537,7 @@ impl ToString for i8 {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
impl ToString for str {
@@ -2606,6 +2547,7 @@ impl ToString for str {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
impl ToString for Cow<'_, str> {
@@ -2615,6 +2557,7 @@ impl ToString for Cow<'_, str> {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
impl ToString for String {
@@ -2624,6 +2567,7 @@ impl ToString for String {
}
}
+#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "fmt_arguments_to_string_specialization", since = "1.71.0")]
impl ToString for fmt::Arguments<'_> {
@@ -2702,8 +2646,6 @@ impl From<Box<str>> for String {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = s1.into_boxed_str();
@@ -2723,8 +2665,6 @@ impl From<String> for Box<str> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = Box::from(s1);
@@ -2857,8 +2797,6 @@ impl From<String> for Vec<u8> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s1 = String::from("hello world");
/// let v1 = Vec::from(s1);
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 5bb1a93ae..d3b755844 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -153,7 +153,7 @@ macro_rules! acquire {
///
/// ## `Deref` behavior
///
-/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
+/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
/// functions, called using [fully qualified syntax]:
@@ -187,7 +187,6 @@ macro_rules! acquire {
/// [mutex]: ../../std/sync/struct.Mutex.html
/// [rwlock]: ../../std/sync/struct.RwLock.html
/// [atomic]: core::sync::atomic
-/// [deref]: core::ops::Deref
/// [downgrade]: Arc::downgrade
/// [upgrade]: Weak::upgrade
/// [RefCell\<T>]: core::cell::RefCell
@@ -246,32 +245,48 @@ macro_rules! acquire {
/// [rc_examples]: crate::rc#examples
#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Arc<T: ?Sized> {
+pub struct Arc<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
ptr: NonNull<ArcInner<T>>,
phantom: PhantomData<ArcInner<T>>,
+ alloc: A,
}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
-impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
+impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
impl<T: ?Sized> Arc<T> {
unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
- Self { ptr, phantom: PhantomData }
+ unsafe { Self::from_inner_in(ptr, Global) }
}
unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ unsafe { Self::from_ptr_in(ptr, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Arc<T, A> {
+ #[inline]
+ unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
+ Self { ptr, phantom: PhantomData, alloc }
+ }
+
+ #[inline]
+ unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
+ unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
}
}
@@ -296,7 +311,10 @@ impl<T: ?Sized> Arc<T> {
///
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "arc_weak", since = "1.4.0")]
-pub struct Weak<T: ?Sized> {
+pub struct Weak<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
@@ -304,15 +322,16 @@ pub struct Weak<T: ?Sized> {
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<ArcInner<T>>,
+ alloc: A,
}
#[stable(feature = "arc_weak", since = "1.4.0")]
-unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
+unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
-unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
+unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
-impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
@@ -442,7 +461,7 @@ impl<T> Arc<T> {
.into();
let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
- let weak = Weak { ptr: init_ptr };
+ let weak = Weak { ptr: init_ptr, alloc: Global };
// It's important we don't give up ownership of the weak pointer, or
// else the memory might be freed by the time `data_fn` returns. If
@@ -510,7 +529,7 @@ impl<T> Arc<T> {
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
- |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
))
}
}
@@ -544,7 +563,7 @@ impl<T> Arc<T> {
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate_zeroed(layout),
- |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
))
}
}
@@ -617,7 +636,7 @@ impl<T> Arc<T> {
Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
- |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
)?))
}
}
@@ -650,10 +669,258 @@ impl<T> Arc<T> {
Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate_zeroed(layout),
- |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ <*mut u8>::cast,
)?))
}
}
+}
+
+impl<T, A: Allocator> Arc<T, A> {
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn allocator(this: &Self) -> &A {
+ &this.alloc
+ }
+ /// Constructs a new `Arc<T>` in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let five = Arc::new_in(5, System);
+ /// ```
+ #[inline]
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x = Box::new_in(
+ ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ },
+ alloc,
+ );
+ let (ptr, alloc) = Box::into_unique(x);
+ unsafe { Self::from_inner_in(ptr.into(), alloc) }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Arc::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
+ unsafe {
+ Arc::from_ptr_in(
+ Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate(layout),
+ <*mut u8>::cast,
+ ),
+ alloc,
+ )
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let zero = Arc::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
+ unsafe {
+ Arc::from_ptr_in(
+ Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate_zeroed(layout),
+ <*mut u8>::cast,
+ ),
+ alloc,
+ )
+ }
+ }
+
+ /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
+ /// then `data` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>> {
+ unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
+ }
+
+ /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
+ /// fails.
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError> {
+ unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
+ }
+
+ /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let five = Arc::try_new_in(5, System)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x = Box::try_new_in(
+ ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ },
+ alloc,
+ )?;
+ let (ptr, alloc) = Box::into_unique(x);
+ Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
+ /// error if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr_in(
+ Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate(layout),
+ <*mut u8>::cast,
+ )?,
+ alloc,
+ ))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
+ /// fails.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr_in(
+ Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| alloc.allocate_zeroed(layout),
+ <*mut u8>::cast,
+ )?,
+ alloc,
+ ))
+ }
+ }
/// Returns the inner value, if the `Arc` has exactly one strong reference.
///
/// Otherwise, an [`Err`] is returned with the same `Arc` that was
@@ -695,9 +962,10 @@ impl<T> Arc<T> {
unsafe {
let elem = ptr::read(&this.ptr.as_ref().data);
+ let alloc = ptr::read(&this.alloc); // copy the allocator
// Make a weak pointer to clean up the implicit strong-weak reference
- let _weak = Weak { ptr: this.ptr };
+ let _weak = Weak { ptr: this.ptr, alloc };
mem::forget(this);
Ok(elem)
@@ -814,9 +1082,11 @@ impl<T> Arc<T> {
// in `drop_slow`. Instead of dropping the value behind the pointer,
// it is read and eventually returned; `ptr::read` has the same
// safety conditions as `ptr::drop_in_place`.
+
let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
+ let alloc = unsafe { ptr::read(&this.alloc) };
- drop(Weak { ptr: this.ptr });
+ drop(Weak { ptr: this.ptr, alloc });
Some(inner)
}
@@ -891,7 +1161,83 @@ impl<T> Arc<[T]> {
}
}
-impl<T> Arc<mem::MaybeUninit<T>> {
+impl<T, A: Allocator> Arc<[T], A> {
+ /// Constructs a new atomically reference-counted slice with uninitialized contents in the
+ /// provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
+ /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
+ /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
+ unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
+ }
+
+ /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes, in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::alloc::System;
+ ///
+ /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
+ unsafe {
+ Arc::from_ptr_in(
+ Arc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| alloc.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
+ as *mut ArcInner<[mem::MaybeUninit<T>]>
+ },
+ ),
+ alloc,
+ )
+ }
+ }
+}
+
+impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
/// Converts to `Arc<T>`.
///
/// # Safety
@@ -924,12 +1270,16 @@ impl<T> Arc<mem::MaybeUninit<T>> {
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
- pub unsafe fn assume_init(self) -> Arc<T> {
- unsafe { Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) }
+ pub unsafe fn assume_init(self) -> Arc<T, A>
+ where
+ A: Clone,
+ {
+ let md_self = mem::ManuallyDrop::new(self);
+ unsafe { Arc::from_inner_in(md_self.ptr.cast(), md_self.alloc.clone()) }
}
}
-impl<T> Arc<[mem::MaybeUninit<T>]> {
+impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
/// Converts to `Arc<[T]>`.
///
/// # Safety
@@ -965,12 +1315,129 @@ impl<T> Arc<[mem::MaybeUninit<T>]> {
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
- pub unsafe fn assume_init(self) -> Arc<[T]> {
- unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ pub unsafe fn assume_init(self) -> Arc<[T], A>
+ where
+ A: Clone,
+ {
+ let md_self = mem::ManuallyDrop::new(self);
+ unsafe { Arc::from_ptr_in(md_self.ptr.as_ptr() as _, md_self.alloc.clone()) }
}
}
impl<T: ?Sized> Arc<T> {
+ /// Constructs an `Arc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+ /// alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Arc<T>` is never accessed.
+ ///
+ /// [into_raw]: Arc::into_raw
+ /// [transmute]: core::mem::transmute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let x_ptr = Arc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Arc` to prevent leak.
+ /// let x = Arc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe { Arc::from_raw_in(ptr, Global) }
+ }
+
+ /// Increments the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ unsafe { Arc::increment_strong_count_in(ptr, Global) }
+ }
+
+ /// Decrements the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method. This method can be used to release the final
+ /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
+ /// released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // Those assertions are deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// Arc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { Arc::decrement_strong_count_in(ptr, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// Consumes the `Arc`, returning the wrapped pointer.
///
/// To avoid a memory leak the pointer must be converted back to an `Arc` using
@@ -1020,16 +1487,18 @@ impl<T: ?Sized> Arc<T> {
unsafe { ptr::addr_of_mut!((*ptr).data) }
}
- /// Constructs an `Arc<T>` from a raw pointer.
+ /// Constructs an `Arc<T, A>` from a raw pointer.
///
/// The raw pointer must have been previously returned by a call to
- /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+ /// [`Arc<U, A>::into_raw`][into_raw] where `U` must have the same size and
/// alignment as `T`. This is trivially true if `U` is `T`.
/// Note that if `U` is not `T` but has the same size and alignment, this is
/// basically like transmuting references of different types. See
- /// [`mem::transmute`][transmute] for more information on what
+ /// [`mem::transmute`] for more information on what
/// restrictions apply in this case.
///
+ /// The raw pointer must point to a block of memory allocated by `alloc`
+ ///
/// The user of `from_raw` has to make sure a specific value of `T` is only
/// dropped once.
///
@@ -1037,19 +1506,21 @@ impl<T: ?Sized> Arc<T> {
/// even if the returned `Arc<T>` is never accessed.
///
/// [into_raw]: Arc::into_raw
- /// [transmute]: core::mem::transmute
///
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::sync::Arc;
+ /// use std::alloc::System;
///
- /// let x = Arc::new("hello".to_owned());
+ /// let x = Arc::new_in("hello".to_owned(), System);
/// let x_ptr = Arc::into_raw(x);
///
/// unsafe {
/// // Convert back to an `Arc` to prevent leak.
- /// let x = Arc::from_raw(x_ptr);
+ /// let x = Arc::from_raw_in(x_ptr, System);
/// assert_eq!(&*x, "hello");
///
/// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
@@ -1057,15 +1528,16 @@ impl<T: ?Sized> Arc<T> {
///
/// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
/// ```
- #[stable(feature = "rc_raw", since = "1.17.0")]
- pub unsafe fn from_raw(ptr: *const T) -> Self {
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
unsafe {
let offset = data_offset(ptr);
// Reverse the offset to find the original ArcInner.
let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
- Self::from_ptr(arc_ptr)
+ Self::from_ptr_in(arc_ptr, alloc)
}
}
@@ -1083,7 +1555,10 @@ impl<T: ?Sized> Arc<T> {
#[must_use = "this returns a new `Weak` pointer, \
without modifying the original `Arc`"]
#[stable(feature = "arc_weak", since = "1.4.0")]
- pub fn downgrade(this: &Self) -> Weak<T> {
+ pub fn downgrade(this: &Self) -> Weak<T, A>
+ where
+ A: Clone,
+ {
// This Relaxed is OK because we're checking the value in the CAS
// below.
let mut cur = this.inner().weak.load(Relaxed);
@@ -1110,7 +1585,7 @@ impl<T: ?Sized> Arc<T> {
Ok(_) => {
// Make sure we do not create a dangling Weak
debug_assert!(!is_dangling(this.ptr.as_ptr()));
- return Weak { ptr: this.ptr };
+ return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
}
Err(old) => cur = old,
}
@@ -1181,30 +1656,37 @@ impl<T: ?Sized> Arc<T> {
///
/// The pointer must have been obtained through `Arc::into_raw`, and the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
- /// least 1) for the duration of this method.
+ /// least 1) for the duration of this method,, and `ptr` must point to a block of memory
+ /// allocated by `alloc`.
///
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::sync::Arc;
+ /// use std::alloc::System;
///
- /// let five = Arc::new(5);
+ /// let five = Arc::new_in(5, System);
///
/// unsafe {
/// let ptr = Arc::into_raw(five);
- /// Arc::increment_strong_count(ptr);
+ /// Arc::increment_strong_count_in(ptr, System);
///
/// // This assertion is deterministic because we haven't shared
/// // the `Arc` between threads.
- /// let five = Arc::from_raw(ptr);
+ /// let five = Arc::from_raw_in(ptr, System);
/// assert_eq!(2, Arc::strong_count(&five));
/// }
/// ```
#[inline]
- #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
- pub unsafe fn increment_strong_count(ptr: *const T) {
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
+ where
+ A: Clone,
+ {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
- let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
+ let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
// Now increase refcount, but don't drop new refcount either
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}
@@ -1214,35 +1696,39 @@ impl<T: ?Sized> Arc<T> {
///
/// # Safety
///
- /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// The pointer must have been obtained through `Arc::into_raw`, the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
- /// least 1) when invoking this method. This method can be used to release the final
+ /// least 1) when invoking this method, and `ptr` must point to a block of memory
+ /// allocated by `alloc`. This method can be used to release the final
/// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
/// released.
///
/// # Examples
///
/// ```
+ /// #![feature(allocator_api)]
+ ///
/// use std::sync::Arc;
+ /// use std::alloc::System;
///
- /// let five = Arc::new(5);
+ /// let five = Arc::new_in(5, System);
///
/// unsafe {
/// let ptr = Arc::into_raw(five);
- /// Arc::increment_strong_count(ptr);
+ /// Arc::increment_strong_count_in(ptr, System);
///
/// // Those assertions are deterministic because we haven't shared
/// // the `Arc` between threads.
- /// let five = Arc::from_raw(ptr);
+ /// let five = Arc::from_raw_in(ptr, System);
/// assert_eq!(2, Arc::strong_count(&five));
- /// Arc::decrement_strong_count(ptr);
+ /// Arc::decrement_strong_count_in(ptr, System);
/// assert_eq!(1, Arc::strong_count(&five));
/// }
/// ```
#[inline]
- #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
- pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { drop(Arc::from_raw(ptr)) };
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
+ unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
}
#[inline]
@@ -1263,7 +1749,10 @@ impl<T: ?Sized> Arc<T> {
unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
// Drop the weak ref collectively held by all strong references
- drop(Weak { ptr: self.ptr });
+ // Take a reference to `self.alloc` instead of cloning because 1. it'll
+ // last long enough, and 2. you should be able to drop `Arc`s with
+ // unclonable allocators
+ drop(Weak { ptr: self.ptr, alloc: &self.alloc });
}
/// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
@@ -1345,25 +1834,28 @@ impl<T: ?Sized> Arc<T> {
inner
}
+}
+impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
+ #[inline]
#[cfg(not(no_global_oom_handling))]
- unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
+ unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
// Allocate for the `ArcInner<T>` using the given value.
unsafe {
- Self::allocate_for_layout(
+ Arc::allocate_for_layout(
Layout::for_value(&*ptr),
- |layout| Global.allocate(layout),
+ |layout| alloc.allocate(layout),
|mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
)
}
}
#[cfg(not(no_global_oom_handling))]
- fn from_box(src: Box<T>) -> Arc<T> {
+ fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
unsafe {
let value_size = size_of_val(&*src);
- let ptr = Self::allocate_for_ptr(&*src);
+ let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
// Copy value as bytes
ptr::copy_nonoverlapping(
@@ -1373,10 +1865,11 @@ impl<T: ?Sized> Arc<T> {
);
// Free the allocation without dropping its contents
- let src = Box::from_raw(Box::into_raw(src) as *mut mem::ManuallyDrop<T>);
+ let (bptr, alloc) = Box::into_raw_with_allocator(src);
+ let src = Box::from_raw(bptr as *mut mem::ManuallyDrop<T>);
drop(src);
- Self::from_ptr(ptr)
+ Self::from_ptr_in(ptr, alloc)
}
}
}
@@ -1389,7 +1882,7 @@ impl<T> Arc<[T]> {
Self::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.allocate(layout),
- |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+ |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
)
}
}
@@ -1458,6 +1951,21 @@ impl<T> Arc<[T]> {
}
}
+impl<T, A: Allocator> Arc<[T], A> {
+ /// Allocates an `ArcInner<[T]>` with the given length.
+ #[inline]
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
+ unsafe {
+ Arc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| alloc.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
+ )
+ }
+ }
+}
+
/// Specialization trait used for `From<&[T]>`.
#[cfg(not(no_global_oom_handling))]
trait ArcFromSlice<T> {
@@ -1481,7 +1989,7 @@ impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Clone for Arc<T> {
+impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
/// Makes a clone of the `Arc` pointer.
///
/// This creates another pointer to the same allocation, increasing the
@@ -1497,7 +2005,7 @@ impl<T: ?Sized> Clone for Arc<T> {
/// let _ = Arc::clone(&five);
/// ```
#[inline]
- fn clone(&self) -> Arc<T> {
+ fn clone(&self) -> Arc<T, A> {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
@@ -1530,12 +2038,12 @@ impl<T: ?Sized> Clone for Arc<T> {
abort();
}
- unsafe { Self::from_inner(self.ptr) }
+ unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> Deref for Arc<T> {
+impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
type Target = T;
#[inline]
@@ -1547,7 +2055,7 @@ impl<T: ?Sized> Deref for Arc<T> {
#[unstable(feature = "receiver_trait", issue = "none")]
impl<T: ?Sized> Receiver for Arc<T> {}
-impl<T: Clone> Arc<T> {
+impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
/// Makes a mutable reference into the given `Arc`.
///
/// If there are other `Arc` pointers to the same allocation, then `make_mut` will
@@ -1613,7 +2121,7 @@ impl<T: Clone> Arc<T> {
if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
// Another strong pointer exists, so we must clone.
// Pre-allocate memory to allow writing the cloned value directly.
- let mut arc = Self::new_uninit();
+ let mut arc = Self::new_uninit_in(this.alloc.clone());
unsafe {
let data = Arc::get_mut_unchecked(&mut arc);
(**this).write_clone_into_raw(data.as_mut_ptr());
@@ -1634,10 +2142,10 @@ impl<T: Clone> Arc<T> {
// Materialize our own implicit weak pointer, so that it can clean
// up the ArcInner as needed.
- let _weak = Weak { ptr: this.ptr };
+ let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
// Can just steal the data, all that's left is Weaks
- let mut arc = Self::new_uninit();
+ let mut arc = Self::new_uninit_in(this.alloc.clone());
unsafe {
let data = Arc::get_mut_unchecked(&mut arc);
data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
@@ -1690,7 +2198,7 @@ impl<T: Clone> Arc<T> {
}
}
-impl<T: ?Sized> Arc<T> {
+impl<T: ?Sized, A: Allocator> Arc<T, A> {
/// Returns a mutable reference into the given `Arc`, if there are
/// no other `Arc` or [`Weak`] pointers to the same allocation.
///
@@ -1828,7 +2336,7 @@ impl<T: ?Sized> Arc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
/// Drops the `Arc`.
///
/// This will decrement the strong reference count. If the strong reference
@@ -1899,7 +2407,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
}
}
-impl Arc<dyn Any + Send + Sync> {
+impl<A: Allocator + Clone> Arc<dyn Any + Send + Sync, A> {
/// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
///
/// # Examples
@@ -1920,15 +2428,16 @@ impl Arc<dyn Any + Send + Sync> {
/// ```
#[inline]
#[stable(feature = "rc_downcast", since = "1.29.0")]
- pub fn downcast<T>(self) -> Result<Arc<T>, Self>
+ pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
where
T: Any + Send + Sync,
{
if (*self).is::<T>() {
unsafe {
let ptr = self.ptr.cast::<ArcInner<T>>();
+ let alloc = self.alloc.clone();
mem::forget(self);
- Ok(Arc::from_inner(ptr))
+ Ok(Arc::from_inner_in(ptr, alloc))
}
} else {
Err(self)
@@ -1963,14 +2472,15 @@ impl Arc<dyn Any + Send + Sync> {
/// [`downcast`]: Self::downcast
#[inline]
#[unstable(feature = "downcast_unchecked", issue = "90850")]
- pub unsafe fn downcast_unchecked<T>(self) -> Arc<T>
+ pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
where
T: Any + Send + Sync,
{
unsafe {
let ptr = self.ptr.cast::<ArcInner<T>>();
+ let alloc = self.alloc.clone();
mem::forget(self);
- Arc::from_inner(ptr)
+ Arc::from_inner_in(ptr, alloc)
}
}
}
@@ -1989,11 +2499,43 @@ impl<T> Weak<T> {
/// let empty: Weak<i64> = Weak::new();
/// assert!(empty.upgrade().is_none());
/// ```
+ #[inline]
#[stable(feature = "downgraded_weak", since = "1.10.0")]
- #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
+ #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
#[must_use]
pub const fn new() -> Weak<T> {
- Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) } }
+ Weak {
+ ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) },
+ alloc: Global,
+ }
+ }
+}
+
+impl<T, A: Allocator> Weak<T, A> {
+ /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
+ /// allocator.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::sync::Weak;
+ /// use std::alloc::System;
+ ///
+ /// let empty: Weak<i64, _> = Weak::new_in(System);
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn new_in(alloc: A) -> Weak<T, A> {
+ Weak {
+ ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) },
+ alloc,
+ }
}
}
@@ -2005,6 +2547,55 @@ struct WeakInner<'a> {
}
impl<T: ?Sized> Weak<T> {
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Arc::downgrade(&strong).into_raw();
+ /// let raw_2 = Arc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Arc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Arc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`new`]: Weak::new
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ #[inline]
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe { Weak::from_raw_in(ptr, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Weak<T, A> {
/// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
///
/// The pointer is valid only if there are some strong references. The pointer may be dangling,
@@ -2082,7 +2673,8 @@ impl<T: ?Sized> Weak<T> {
result
}
- /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
+ /// allocator.
///
/// This can be used to safely get a strong reference (by calling [`upgrade`]
/// later) or to deallocate the weak count by dropping the `Weak<T>`.
@@ -2093,7 +2685,7 @@ impl<T: ?Sized> Weak<T> {
/// # Safety
///
/// The pointer must have originated from the [`into_raw`] and must still own its potential
- /// weak reference.
+ /// weak reference, and must point to a block of memory allocated by `alloc`.
///
/// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
/// takes ownership of one weak reference currently represented as a raw pointer (the weak
@@ -2123,8 +2715,9 @@ impl<T: ?Sized> Weak<T> {
/// [`new`]: Weak::new
/// [`into_raw`]: Weak::into_raw
/// [`upgrade`]: Weak::upgrade
- #[stable(feature = "weak_into_raw", since = "1.45.0")]
- pub unsafe fn from_raw(ptr: *const T) -> Self {
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
// See Weak::as_ptr for context on how the input pointer is derived.
let ptr = if is_dangling(ptr as *mut T) {
@@ -2140,11 +2733,11 @@ impl<T: ?Sized> Weak<T> {
};
// SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
- Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
}
}
-impl<T: ?Sized> Weak<T> {
+impl<T: ?Sized, A: Allocator> Weak<T, A> {
/// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
/// dropping of the inner value if successful.
///
@@ -2171,28 +2764,35 @@ impl<T: ?Sized> Weak<T> {
#[must_use = "this returns a new `Arc`, \
without modifying the original weak pointer"]
#[stable(feature = "arc_weak", since = "1.4.0")]
- pub fn upgrade(&self) -> Option<Arc<T>> {
+ pub fn upgrade(&self) -> Option<Arc<T, A>>
+ where
+ A: Clone,
+ {
+ #[inline]
+ fn checked_increment(n: usize) -> Option<usize> {
+ // Any write of 0 we can observe leaves the field in permanently zero state.
+ if n == 0 {
+ return None;
+ }
+ // See comments in `Arc::clone` for why we do this (for `mem::forget`).
+ assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
+ Some(n + 1)
+ }
+
// We use a CAS loop to increment the strong count instead of a
// fetch_add as this function should never take the reference count
// from zero to one.
- self.inner()?
- .strong
- // Relaxed is fine for the failure case because we don't have any expectations about the new state.
- // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
- // value can be initialized after `Weak` references have already been created. In that case, we
- // expect to observe the fully initialized value.
- .fetch_update(Acquire, Relaxed, |n| {
- // Any write of 0 we can observe leaves the field in permanently zero state.
- if n == 0 {
- return None;
- }
- // See comments in `Arc::clone` for why we do this (for `mem::forget`).
- assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
- Some(n + 1)
- })
- .ok()
- // null checked above
- .map(|_| unsafe { Arc::from_inner(self.ptr) })
+ //
+ // Relaxed is fine for the failure case because we don't have any expectations about the new state.
+ // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
+ // value can be initialized after `Weak` references have already been created. In that case, we
+ // expect to observe the fully initialized value.
+ if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
+ // SAFETY: pointer is not null, verified in checked_increment
+ unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
+ } else {
+ None
+ }
}
/// Gets the number of strong (`Arc`) pointers pointing to this allocation.
@@ -2218,22 +2818,22 @@ impl<T: ?Sized> Weak<T> {
#[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn weak_count(&self) -> usize {
- self.inner()
- .map(|inner| {
- let weak = inner.weak.load(Acquire);
- let strong = inner.strong.load(Acquire);
- if strong == 0 {
- 0
- } else {
- // Since we observed that there was at least one strong pointer
- // after reading the weak count, we know that the implicit weak
- // reference (present whenever any strong references are alive)
- // was still around when we observed the weak count, and can
- // therefore safely subtract it.
- weak - 1
- }
- })
- .unwrap_or(0)
+ if let Some(inner) = self.inner() {
+ let weak = inner.weak.load(Acquire);
+ let strong = inner.strong.load(Acquire);
+ if strong == 0 {
+ 0
+ } else {
+ // Since we observed that there was at least one strong pointer
+ // after reading the weak count, we know that the implicit weak
+ // reference (present whenever any strong references are alive)
+ // was still around when we observed the weak count, and can
+ // therefore safely subtract it.
+ weak - 1
+ }
+ } else {
+ 0
+ }
}
/// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
@@ -2303,7 +2903,7 @@ impl<T: ?Sized> Weak<T> {
}
#[stable(feature = "arc_weak", since = "1.4.0")]
-impl<T: ?Sized> Clone for Weak<T> {
+impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
/// Makes a clone of the `Weak` pointer that points to the same allocation.
///
/// # Examples
@@ -2316,11 +2916,11 @@ impl<T: ?Sized> Clone for Weak<T> {
/// let _ = Weak::clone(&weak_five);
/// ```
#[inline]
- fn clone(&self) -> Weak<T> {
+ fn clone(&self) -> Weak<T, A> {
let inner = if let Some(inner) = self.inner() {
inner
} else {
- return Weak { ptr: self.ptr };
+ return Weak { ptr: self.ptr, alloc: self.alloc.clone() };
};
// See comments in Arc::clone() for why this is relaxed. This can use a
// fetch_add (ignoring the lock) because the weak count is only locked
@@ -2333,7 +2933,7 @@ impl<T: ?Sized> Clone for Weak<T> {
abort();
}
- Weak { ptr: self.ptr }
+ Weak { ptr: self.ptr, alloc: self.alloc.clone() }
}
}
@@ -2359,7 +2959,7 @@ impl<T> Default for Weak<T> {
}
#[stable(feature = "arc_weak", since = "1.4.0")]
-unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
/// Drops the `Weak` pointer.
///
/// # Examples
@@ -2397,25 +2997,27 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
if inner.weak.fetch_sub(1, Release) == 1 {
acquire!(inner.weak);
- unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) }
+ unsafe {
+ self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
+ }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-trait ArcEqIdent<T: ?Sized + PartialEq> {
- fn eq(&self, other: &Arc<T>) -> bool;
- fn ne(&self, other: &Arc<T>) -> bool;
+trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
+ fn eq(&self, other: &Arc<T, A>) -> bool;
+ fn ne(&self, other: &Arc<T, A>) -> bool;
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
+impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
#[inline]
- default fn eq(&self, other: &Arc<T>) -> bool {
+ default fn eq(&self, other: &Arc<T, A>) -> bool {
**self == **other
}
#[inline]
- default fn ne(&self, other: &Arc<T>) -> bool {
+ default fn ne(&self, other: &Arc<T, A>) -> bool {
**self != **other
}
}
@@ -2428,20 +3030,20 @@ impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
///
/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
+impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
#[inline]
- fn eq(&self, other: &Arc<T>) -> bool {
+ fn eq(&self, other: &Arc<T, A>) -> bool {
Arc::ptr_eq(self, other) || **self == **other
}
#[inline]
- fn ne(&self, other: &Arc<T>) -> bool {
+ fn ne(&self, other: &Arc<T, A>) -> bool {
!Arc::ptr_eq(self, other) && **self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
/// Equality for two `Arc`s.
///
/// Two `Arc`s are equal if their inner values are equal, even if they are
@@ -2460,7 +3062,7 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// assert!(five == Arc::new(5));
/// ```
#[inline]
- fn eq(&self, other: &Arc<T>) -> bool {
+ fn eq(&self, other: &Arc<T, A>) -> bool {
ArcEqIdent::eq(self, other)
}
@@ -2481,13 +3083,13 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// assert!(five != Arc::new(6));
/// ```
#[inline]
- fn ne(&self, other: &Arc<T>) -> bool {
+ fn ne(&self, other: &Arc<T, A>) -> bool {
ArcEqIdent::ne(self, other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
/// Partial comparison for two `Arc`s.
///
/// The two are compared by calling `partial_cmp()` on their inner values.
@@ -2502,7 +3104,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
///
/// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
/// ```
- fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
+ fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
@@ -2519,7 +3121,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
///
/// assert!(five < Arc::new(6));
/// ```
- fn lt(&self, other: &Arc<T>) -> bool {
+ fn lt(&self, other: &Arc<T, A>) -> bool {
*(*self) < *(*other)
}
@@ -2536,7 +3138,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
///
/// assert!(five <= Arc::new(5));
/// ```
- fn le(&self, other: &Arc<T>) -> bool {
+ fn le(&self, other: &Arc<T, A>) -> bool {
*(*self) <= *(*other)
}
@@ -2553,7 +3155,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
///
/// assert!(five > Arc::new(4));
/// ```
- fn gt(&self, other: &Arc<T>) -> bool {
+ fn gt(&self, other: &Arc<T, A>) -> bool {
*(*self) > *(*other)
}
@@ -2570,12 +3172,12 @@ impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
///
/// assert!(five >= Arc::new(5));
/// ```
- fn ge(&self, other: &Arc<T>) -> bool {
+ fn ge(&self, other: &Arc<T, A>) -> bool {
*(*self) >= *(*other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Ord> Ord for Arc<T> {
+impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
/// Comparison for two `Arc`s.
///
/// The two are compared by calling `cmp()` on their inner values.
@@ -2590,29 +3192,29 @@ impl<T: ?Sized + Ord> Ord for Arc<T> {
///
/// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
/// ```
- fn cmp(&self, other: &Arc<T>) -> Ordering {
+ fn cmp(&self, other: &Arc<T, A>) -> Ordering {
(**self).cmp(&**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Eq> Eq for Arc<T> {}
+impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
+impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
+impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> fmt::Pointer for Arc<T> {
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&(&**self as *const T), f)
}
@@ -2637,7 +3239,7 @@ impl<T: Default> Default for Arc<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + Hash> Hash for Arc<T> {
+impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
@@ -2724,7 +3326,7 @@ impl From<String> for Arc<str> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
-impl<T: ?Sized> From<Box<T>> for Arc<T> {
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
/// Move a boxed object to a new, reference-counted allocation.
///
/// # Example
@@ -2736,14 +3338,14 @@ impl<T: ?Sized> From<Box<T>> for Arc<T> {
/// assert_eq!("eggplant", &shared[..]);
/// ```
#[inline]
- fn from(v: Box<T>) -> Arc<T> {
- Arc::from_box(v)
+ fn from(v: Box<T, A>) -> Arc<T, A> {
+ Arc::from_box_in(v)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_slice", since = "1.21.0")]
-impl<T> From<Vec<T>> for Arc<[T]> {
+impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
/// Allocate a reference-counted slice and move `v`'s items into it.
///
/// # Example
@@ -2755,12 +3357,18 @@ impl<T> From<Vec<T>> for Arc<[T]> {
/// assert_eq!(&[1, 2, 3], &shared[..]);
/// ```
#[inline]
- fn from(mut v: Vec<T>) -> Arc<[T]> {
+ fn from(v: Vec<T, A>) -> Arc<[T], A> {
unsafe {
- let rc = Arc::copy_from_slice(&v);
- // Allow the Vec to free its memory, but not destroy its contents
- v.set_len(0);
- rc
+ let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+
+ let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
+ ptr::copy_nonoverlapping(vec_ptr, &mut (*rc_ptr).data as *mut [T] as *mut T, len);
+
+ // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
+ // without dropping its contents or the allocator
+ let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
+
+ Self::from_ptr_in(rc_ptr, alloc)
}
}
}
@@ -2812,12 +3420,13 @@ impl From<Arc<str>> for Arc<[u8]> {
}
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
-impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
- type Error = Arc<[T]>;
+impl<T, A: Allocator + Clone, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
+ type Error = Arc<[T], A>;
- fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
+ fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
- Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
+ let alloc = boxed_slice.alloc.clone();
+ Ok(unsafe { Arc::from_raw_in(Arc::into_raw(boxed_slice) as *mut [T; N], alloc) })
} else {
Err(boxed_slice)
}
@@ -2910,21 +3519,21 @@ impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
-impl<T: ?Sized> AsRef<T> for Arc<T> {
+impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
fn as_ref(&self) -> &T {
&**self
}
}
#[stable(feature = "pin", since = "1.33.0")]
-impl<T: ?Sized> Unpin for Arc<T> {}
+impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
/// Get the offset within an `ArcInner` for the payload behind a pointer.
///
@@ -2964,7 +3573,7 @@ impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
core::error::Error::source(&**self)
}
- fn provide<'a>(&'a self, req: &mut core::any::Demand<'a>) {
+ fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
core::error::Error::provide(&**self, req);
}
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 598ecf05e..e45ddc789 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -213,7 +213,7 @@ mod spec_extend;
///
/// # Indexing
///
-/// The `Vec` type allows to access values by index, because it implements the
+/// The `Vec` type allows access to values by index, because it implements the
/// [`Index`] trait. An example will be more explicit:
///
/// ```
@@ -2961,7 +2961,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`copy_from_slice`]: slice::copy_from_slice
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+impl<'a, T: Copy + 'a, A: Allocator> Extend<&'a T> for Vec<T, A> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.spec_extend(iter.into_iter())
}
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 56065ce56..e2f865d0f 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -36,7 +36,7 @@ impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
}
}
-impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A>
where
I: Iterator<Item = &'a T>,
T: Clone,
@@ -46,7 +46,7 @@ where
}
}
-impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
where
T: Copy,
{
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index 0ba5d088f..cb59a9d4a 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1,4 +1,4 @@
-#![cfg_attr(not(bootstrap), allow(invalid_from_utf8))]
+#![allow(invalid_from_utf8)]
use std::assert_matches::assert_matches;
use std::borrow::Cow;
@@ -1739,6 +1739,28 @@ fn test_utf16_code_units() {
}
#[test]
+fn test_utf16_size_hint() {
+ assert_eq!("".encode_utf16().size_hint(), (0, Some(0)));
+ assert_eq!("123".encode_utf16().size_hint(), (1, Some(3)));
+ assert_eq!("1234".encode_utf16().size_hint(), (2, Some(4)));
+ assert_eq!("12345678".encode_utf16().size_hint(), (3, Some(8)));
+
+ fn hint_vec(src: &str) -> Vec<(usize, Option<usize>)> {
+ let mut it = src.encode_utf16();
+ let mut result = Vec::new();
+ result.push(it.size_hint());
+ while it.next().is_some() {
+ result.push(it.size_hint())
+ }
+ result
+ }
+
+ assert_eq!(hint_vec("12"), [(1, Some(2)), (1, Some(1)), (0, Some(0))]);
+ assert_eq!(hint_vec("\u{101234}"), [(2, Some(4)), (1, Some(1)), (0, Some(0))]);
+ assert_eq!(hint_vec("\u{101234}a"), [(2, Some(5)), (2, Some(2)), (1, Some(1)), (0, Some(0))]);
+}
+
+#[test]
fn starts_with_in_unicode() {
assert!(!"├── Cargo.toml".starts_with("# "));
}
@@ -2416,10 +2438,7 @@ fn ceil_char_boundary() {
check_many("🇯🇵", 0..=0, 0);
check_many("🇯🇵", 1..=4, 4);
check_many("🇯🇵", 5..=8, 8);
-}
-#[test]
-#[should_panic]
-fn ceil_char_boundary_above_len_panic() {
- let _ = "x".ceil_char_boundary(2);
+ // above len
+ check_many("hello", 5..=10, 5);
}
diff --git a/library/alloc/tests/string.rs b/library/alloc/tests/string.rs
index 17d56d491..711e4eef2 100644
--- a/library/alloc/tests/string.rs
+++ b/library/alloc/tests/string.rs
@@ -368,29 +368,73 @@ fn remove_bad() {
#[test]
fn test_remove_matches() {
+ // test_single_pattern_occurrence
let mut s = "abc".to_string();
-
s.remove_matches('b');
assert_eq!(s, "ac");
+ // repeat_test_single_pattern_occurrence
s.remove_matches('b');
assert_eq!(s, "ac");
+ // test_single_character_pattern
let mut s = "abcb".to_string();
-
s.remove_matches('b');
assert_eq!(s, "ac");
+ // test_pattern_with_special_characters
let mut s = "ศไทย中华Việt Nam; foobarศ".to_string();
s.remove_matches('ศ');
assert_eq!(s, "ไทย中华Việt Nam; foobar");
+ // test_pattern_empty_text_and_pattern
let mut s = "".to_string();
s.remove_matches("");
assert_eq!(s, "");
+ // test_pattern_empty_text
+ let mut s = "".to_string();
+ s.remove_matches("something");
+ assert_eq!(s, "");
+
+ // test_empty_pattern
+ let mut s = "Testing with empty pattern.".to_string();
+ s.remove_matches("");
+ assert_eq!(s, "Testing with empty pattern.");
+
+ // test_multiple_consecutive_patterns_1
let mut s = "aaaaa".to_string();
s.remove_matches('a');
assert_eq!(s, "");
+
+ // test_multiple_consecutive_patterns_2
+ let mut s = "Hello **world****today!**".to_string();
+ s.remove_matches("**");
+ assert_eq!(s, "Hello worldtoday!");
+
+ // test_case_insensitive_pattern
+ let mut s = "CASE ** SeNsItIvE ** PaTtErN.".to_string();
+ s.remove_matches("sEnSiTiVe");
+ assert_eq!(s, "CASE ** SeNsItIvE ** PaTtErN.");
+
+ // test_pattern_with_digits
+ let mut s = "123 ** 456 ** 789".to_string();
+ s.remove_matches("**");
+ assert_eq!(s, "123 456 789");
+
+ // test_pattern_occurs_after_empty_string
+ let mut s = "abc X defXghi".to_string();
+ s.remove_matches("X");
+ assert_eq!(s, "abc defghi");
+
+ // test_large_pattern
+ let mut s = "aaaXbbbXcccXdddXeee".to_string();
+ s.remove_matches("X");
+ assert_eq!(s, "aaabbbcccdddeee");
+
+ // test_pattern_at_multiple_positions
+ let mut s = "Pattern ** found ** multiple ** times ** in ** text.".to_string();
+ s.remove_matches("**");
+ assert_eq!(s, "Pattern found multiple times in text.");
}
#[test]
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index ddd93e9a4..183dd8e6e 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -2498,3 +2498,68 @@ fn test_into_flattened_size_overflow() {
let v = vec![[(); usize::MAX]; 2];
let _ = v.into_flattened();
}
+
+#[cfg(not(bootstrap))]
+#[test]
+fn test_box_zero_allocator() {
+ use core::{alloc::AllocError, cell::RefCell};
+ use std::collections::HashSet;
+
+ // Track ZST allocations and ensure that they all have a matching free.
+ struct ZstTracker {
+ state: RefCell<(HashSet<usize>, usize)>,
+ }
+ unsafe impl Allocator for ZstTracker {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = if layout.size() == 0 {
+ let mut state = self.state.borrow_mut();
+ let addr = state.1;
+ assert!(state.0.insert(addr));
+ state.1 += 1;
+ std::println!("allocating {addr}");
+ std::ptr::invalid_mut(addr)
+ } else {
+ unsafe { std::alloc::alloc(layout) }
+ };
+ Ok(NonNull::slice_from_raw_parts(NonNull::new(ptr).ok_or(AllocError)?, layout.size()))
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() == 0 {
+ let addr = ptr.as_ptr() as usize;
+ let mut state = self.state.borrow_mut();
+ std::println!("freeing {addr}");
+ assert!(state.0.remove(&addr), "ZST free that wasn't allocated");
+ } else {
+ unsafe { std::alloc::dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+ }
+
+ // Start the state at 100 to avoid returning null pointers.
+ let alloc = ZstTracker { state: RefCell::new((HashSet::new(), 100)) };
+
+ // Ensure that unsizing retains the same behavior.
+ {
+ let b1: Box<[u8; 0], &ZstTracker> = Box::new_in([], &alloc);
+ let b2: Box<[u8], &ZstTracker> = b1.clone();
+ let _b3: Box<[u8], &ZstTracker> = b2.clone();
+ }
+
+ // Ensure that shrinking doesn't leak a ZST allocation.
+ {
+ let mut v1: Vec<u8, &ZstTracker> = Vec::with_capacity_in(100, &alloc);
+ v1.shrink_to_fit();
+ }
+
+ // Ensure that conversion to/from vec works.
+ {
+ let v1: Vec<(), &ZstTracker> = Vec::with_capacity_in(100, &alloc);
+ let _b1: Box<[()], &ZstTracker> = v1.into_boxed_slice();
+ let b2: Box<[()], &ZstTracker> = Box::new_in([(), (), ()], &alloc);
+ let _v2: Vec<(), &ZstTracker> = b2.into();
+ }
+
+ // Ensure all ZSTs have been freed.
+ assert!(alloc.state.borrow().0.is_empty());
+}
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
index 5ec22e514..05fec0c4b 100644
--- a/library/core/benches/iter.rs
+++ b/library/core/benches/iter.rs
@@ -473,6 +473,7 @@ fn bench_next_chunk_copied(b: &mut Bencher) {
/// Exercises the TrustedRandomAccess specialization in ArrayChunks
#[bench]
+#[allow(noop_method_call)]
fn bench_next_chunk_trusted_random_access(b: &mut Bencher) {
let v = vec![1u8; 1024];
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 09f52d692..8f5404d97 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -83,72 +83,6 @@
//! }
//! ```
//!
-//! # `Provider` and `Demand`
-//!
-//! `Provider` and the associated APIs support generic, type-driven access to data, and a mechanism
-//! for implementers to provide such data. The key parts of the interface are the `Provider`
-//! trait for objects which can provide data, and the [`request_value`] and [`request_ref`]
-//! functions for requesting data from an object which implements `Provider`. Generally, end users
-//! should not call `request_*` directly, they are helper functions for intermediate implementers
-//! to use to implement a user-facing interface. This is purely for the sake of ergonomics, there is
-//! no safety concern here; intermediate implementers can typically support methods rather than
-//! free functions and use more specific names.
-//!
-//! Typically, a data provider is a trait object of a trait which extends `Provider`. A user will
-//! request data from a trait object by specifying the type of the data.
-//!
-//! ## Data flow
-//!
-//! * A user requests an object of a specific type, which is delegated to `request_value` or
-//! `request_ref`
-//! * `request_*` creates a `Demand` object and passes it to `Provider::provide`
-//! * The data provider's implementation of `Provider::provide` tries providing values of
-//! different types using `Demand::provide_*`. If the type matches the type requested by
-//! the user, the value will be stored in the `Demand` object.
-//! * `request_*` unpacks the `Demand` object and returns any stored value to the user.
-//!
-//! ## Examples
-//!
-//! ```
-//! # #![feature(provide_any)]
-//! use std::any::{Provider, Demand, request_ref};
-//!
-//! // Definition of MyTrait, a data provider.
-//! trait MyTrait: Provider {
-//! // ...
-//! }
-//!
-//! // Methods on `MyTrait` trait objects.
-//! impl dyn MyTrait + '_ {
-//! /// Get a reference to a field of the implementing struct.
-//! pub fn get_context_by_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
-//! request_ref::<T>(self)
-//! }
-//! }
-//!
-//! // Downstream implementation of `MyTrait` and `Provider`.
-//! # struct SomeConcreteType { some_string: String }
-//! impl MyTrait for SomeConcreteType {
-//! // ...
-//! }
-//!
-//! impl Provider for SomeConcreteType {
-//! fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
-//! // Provide a string reference. We could provide multiple values with
-//! // different types here.
-//! demand.provide_ref::<String>(&self.some_string);
-//! }
-//! }
-//!
-//! // Downstream usage of `MyTrait`.
-//! fn use_my_trait(obj: &dyn MyTrait) {
-//! // Request a &String from obj.
-//! let _ = obj.get_context_by_ref::<String>().unwrap();
-//! }
-//! ```
-//!
-//! In this example, if the concrete type of `obj` in `use_my_trait` is `SomeConcreteType`, then
-//! the `get_context_by_ref` call will return a reference to `obj.some_string` with type `&String`.
#![stable(feature = "rust1", since = "1.0.0")]
@@ -697,9 +631,6 @@ impl TypeId {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
pub const fn of<T: ?Sized + 'static>() -> TypeId {
- #[cfg(bootstrap)]
- let t = intrinsics::type_id::<T>() as u128;
- #[cfg(not(bootstrap))]
let t: u128 = intrinsics::type_id::<T>();
TypeId { t }
}
@@ -801,524 +732,3 @@ pub const fn type_name<T: ?Sized>() -> &'static str {
pub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
}
-
-///////////////////////////////////////////////////////////////////////////////
-// Provider trait
-///////////////////////////////////////////////////////////////////////////////
-
-/// Trait implemented by a type which can dynamically provide values based on type.
-#[unstable(feature = "provide_any", issue = "96024")]
-pub trait Provider {
- /// Data providers should implement this method to provide *all* values they are able to
- /// provide by using `demand`.
- ///
- /// Note that the `provide_*` methods on `Demand` have short-circuit semantics: if an earlier
- /// method has successfully provided a value, then later methods will not get an opportunity to
- /// provide.
- ///
- /// # Examples
- ///
- /// Provides a reference to a field with type `String` as a `&str`, and a value of
- /// type `i32`.
- ///
- /// ```rust
- /// # #![feature(provide_any)]
- /// use std::any::{Provider, Demand};
- /// # struct SomeConcreteType { field: String, num_field: i32 }
- ///
- /// impl Provider for SomeConcreteType {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_ref::<str>(&self.field)
- /// .provide_value::<i32>(self.num_field);
- /// }
- /// }
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- fn provide<'a>(&'a self, demand: &mut Demand<'a>);
-}
-
-/// Request a value from the `Provider`.
-///
-/// # Examples
-///
-/// Get a string value from a provider.
-///
-/// ```rust
-/// # #![feature(provide_any)]
-/// use std::any::{Provider, request_value};
-///
-/// fn get_string(provider: &impl Provider) -> String {
-/// request_value::<String>(provider).unwrap()
-/// }
-/// ```
-#[unstable(feature = "provide_any", issue = "96024")]
-pub fn request_value<'a, T>(provider: &'a (impl Provider + ?Sized)) -> Option<T>
-where
- T: 'static,
-{
- request_by_type_tag::<'a, tags::Value<T>>(provider)
-}
-
-/// Request a reference from the `Provider`.
-///
-/// # Examples
-///
-/// Get a string reference from a provider.
-///
-/// ```rust
-/// # #![feature(provide_any)]
-/// use std::any::{Provider, request_ref};
-///
-/// fn get_str(provider: &impl Provider) -> &str {
-/// request_ref::<str>(provider).unwrap()
-/// }
-/// ```
-#[unstable(feature = "provide_any", issue = "96024")]
-pub fn request_ref<'a, T>(provider: &'a (impl Provider + ?Sized)) -> Option<&'a T>
-where
- T: 'static + ?Sized,
-{
- request_by_type_tag::<'a, tags::Ref<tags::MaybeSizedValue<T>>>(provider)
-}
-
-/// Request a specific value by tag from the `Provider`.
-fn request_by_type_tag<'a, I>(provider: &'a (impl Provider + ?Sized)) -> Option<I::Reified>
-where
- I: tags::Type<'a>,
-{
- let mut tagged = TaggedOption::<'a, I>(None);
- provider.provide(tagged.as_demand());
- tagged.0
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// Demand and its methods
-///////////////////////////////////////////////////////////////////////////////
-
-/// A helper object for providing data by type.
-///
-/// A data provider provides values by calling this type's provide methods.
-#[unstable(feature = "provide_any", issue = "96024")]
-#[cfg_attr(not(doc), repr(transparent))] // work around https://github.com/rust-lang/rust/issues/90435
-pub struct Demand<'a>(dyn Erased<'a> + 'a);
-
-impl<'a> Demand<'a> {
- /// Create a new `&mut Demand` from a `&mut dyn Erased` trait object.
- fn new<'b>(erased: &'b mut (dyn Erased<'a> + 'a)) -> &'b mut Demand<'a> {
- // SAFETY: transmuting `&mut (dyn Erased<'a> + 'a)` to `&mut Demand<'a>` is safe since
- // `Demand` is repr(transparent).
- unsafe { &mut *(erased as *mut dyn Erased<'a> as *mut Demand<'a>) }
- }
-
- /// Provide a value or other type with only static lifetimes.
- ///
- /// # Examples
- ///
- /// Provides an `u8`.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- /// # struct SomeConcreteType { field: u8 }
- ///
- /// impl Provider for SomeConcreteType {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_value::<u8>(self.field);
- /// }
- /// }
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn provide_value<T>(&mut self, value: T) -> &mut Self
- where
- T: 'static,
- {
- self.provide::<tags::Value<T>>(value)
- }
-
- /// Provide a value or other type with only static lifetimes computed using a closure.
- ///
- /// # Examples
- ///
- /// Provides a `String` by cloning.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- /// # struct SomeConcreteType { field: String }
- ///
- /// impl Provider for SomeConcreteType {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_value_with::<String>(|| self.field.clone());
- /// }
- /// }
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn provide_value_with<T>(&mut self, fulfil: impl FnOnce() -> T) -> &mut Self
- where
- T: 'static,
- {
- self.provide_with::<tags::Value<T>>(fulfil)
- }
-
- /// Provide a reference. The referee type must be bounded by `'static`,
- /// but may be unsized.
- ///
- /// # Examples
- ///
- /// Provides a reference to a field as a `&str`.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- /// # struct SomeConcreteType { field: String }
- ///
- /// impl Provider for SomeConcreteType {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_ref::<str>(&self.field);
- /// }
- /// }
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn provide_ref<T: ?Sized + 'static>(&mut self, value: &'a T) -> &mut Self {
- self.provide::<tags::Ref<tags::MaybeSizedValue<T>>>(value)
- }
-
- /// Provide a reference computed using a closure. The referee type
- /// must be bounded by `'static`, but may be unsized.
- ///
- /// # Examples
- ///
- /// Provides a reference to a field as a `&str`.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- /// # struct SomeConcreteType { business: String, party: String }
- /// # fn today_is_a_weekday() -> bool { true }
- ///
- /// impl Provider for SomeConcreteType {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_ref_with::<str>(|| {
- /// if today_is_a_weekday() {
- /// &self.business
- /// } else {
- /// &self.party
- /// }
- /// });
- /// }
- /// }
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn provide_ref_with<T: ?Sized + 'static>(
- &mut self,
- fulfil: impl FnOnce() -> &'a T,
- ) -> &mut Self {
- self.provide_with::<tags::Ref<tags::MaybeSizedValue<T>>>(fulfil)
- }
-
- /// Provide a value with the given `Type` tag.
- fn provide<I>(&mut self, value: I::Reified) -> &mut Self
- where
- I: tags::Type<'a>,
- {
- if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
- res.0 = Some(value);
- }
- self
- }
-
- /// Provide a value with the given `Type` tag, using a closure to prevent unnecessary work.
- fn provide_with<I>(&mut self, fulfil: impl FnOnce() -> I::Reified) -> &mut Self
- where
- I: tags::Type<'a>,
- {
- if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
- res.0 = Some(fulfil());
- }
- self
- }
-
- /// Check if the `Demand` would be satisfied if provided with a
- /// value of the specified type. If the type does not match or has
- /// already been provided, returns false.
- ///
- /// # Examples
- ///
- /// Check if an `u8` still needs to be provided and then provides
- /// it.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- ///
- /// struct Parent(Option<u8>);
- ///
- /// impl Provider for Parent {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// if let Some(v) = self.0 {
- /// demand.provide_value::<u8>(v);
- /// }
- /// }
- /// }
- ///
- /// struct Child {
- /// parent: Parent,
- /// }
- ///
- /// impl Child {
- /// // Pretend that this takes a lot of resources to evaluate.
- /// fn an_expensive_computation(&self) -> Option<u8> {
- /// Some(99)
- /// }
- /// }
- ///
- /// impl Provider for Child {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// // In general, we don't know if this call will provide
- /// // an `u8` value or not...
- /// self.parent.provide(demand);
- ///
- /// // ...so we check to see if the `u8` is needed before
- /// // we run our expensive computation.
- /// if demand.would_be_satisfied_by_value_of::<u8>() {
- /// if let Some(v) = self.an_expensive_computation() {
- /// demand.provide_value::<u8>(v);
- /// }
- /// }
- ///
- /// // The demand will be satisfied now, regardless of if
- /// // the parent provided the value or we did.
- /// assert!(!demand.would_be_satisfied_by_value_of::<u8>());
- /// }
- /// }
- ///
- /// let parent = Parent(Some(42));
- /// let child = Child { parent };
- /// assert_eq!(Some(42), std::any::request_value::<u8>(&child));
- ///
- /// let parent = Parent(None);
- /// let child = Child { parent };
- /// assert_eq!(Some(99), std::any::request_value::<u8>(&child));
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn would_be_satisfied_by_value_of<T>(&self) -> bool
- where
- T: 'static,
- {
- self.would_be_satisfied_by::<tags::Value<T>>()
- }
-
- /// Check if the `Demand` would be satisfied if provided with a
- /// reference to a value of the specified type. If the type does
- /// not match or has already been provided, returns false.
- ///
- /// # Examples
- ///
- /// Check if a `&str` still needs to be provided and then provides
- /// it.
- ///
- /// ```rust
- /// #![feature(provide_any)]
- ///
- /// use std::any::{Provider, Demand};
- ///
- /// struct Parent(Option<String>);
- ///
- /// impl Provider for Parent {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// if let Some(v) = &self.0 {
- /// demand.provide_ref::<str>(v);
- /// }
- /// }
- /// }
- ///
- /// struct Child {
- /// parent: Parent,
- /// name: String,
- /// }
- ///
- /// impl Child {
- /// // Pretend that this takes a lot of resources to evaluate.
- /// fn an_expensive_computation(&self) -> Option<&str> {
- /// Some(&self.name)
- /// }
- /// }
- ///
- /// impl Provider for Child {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// // In general, we don't know if this call will provide
- /// // a `str` reference or not...
- /// self.parent.provide(demand);
- ///
- /// // ...so we check to see if the `&str` is needed before
- /// // we run our expensive computation.
- /// if demand.would_be_satisfied_by_ref_of::<str>() {
- /// if let Some(v) = self.an_expensive_computation() {
- /// demand.provide_ref::<str>(v);
- /// }
- /// }
- ///
- /// // The demand will be satisfied now, regardless of if
- /// // the parent provided the reference or we did.
- /// assert!(!demand.would_be_satisfied_by_ref_of::<str>());
- /// }
- /// }
- ///
- /// let parent = Parent(Some("parent".into()));
- /// let child = Child { parent, name: "child".into() };
- /// assert_eq!(Some("parent"), std::any::request_ref::<str>(&child));
- ///
- /// let parent = Parent(None);
- /// let child = Child { parent, name: "child".into() };
- /// assert_eq!(Some("child"), std::any::request_ref::<str>(&child));
- /// ```
- #[unstable(feature = "provide_any", issue = "96024")]
- pub fn would_be_satisfied_by_ref_of<T>(&self) -> bool
- where
- T: ?Sized + 'static,
- {
- self.would_be_satisfied_by::<tags::Ref<tags::MaybeSizedValue<T>>>()
- }
-
- fn would_be_satisfied_by<I>(&self) -> bool
- where
- I: tags::Type<'a>,
- {
- matches!(self.0.downcast::<I>(), Some(TaggedOption(None)))
- }
-}
-
-#[unstable(feature = "provide_any", issue = "96024")]
-impl<'a> fmt::Debug for Demand<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Demand").finish_non_exhaustive()
- }
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// Type tags
-///////////////////////////////////////////////////////////////////////////////
-
-mod tags {
- //! Type tags are used to identify a type using a separate value. This module includes type tags
- //! for some very common types.
- //!
- //! Currently type tags are not exposed to the user. But in the future, if you want to use the
- //! Provider API with more complex types (typically those including lifetime parameters), you
- //! will need to write your own tags.
-
- use crate::marker::PhantomData;
-
- /// This trait is implemented by specific tag types in order to allow
- /// describing a type which can be requested for a given lifetime `'a`.
- ///
- /// A few example implementations for type-driven tags can be found in this
- /// module, although crates may also implement their own tags for more
- /// complex types with internal lifetimes.
- pub trait Type<'a>: Sized + 'static {
- /// The type of values which may be tagged by this tag for the given
- /// lifetime.
- type Reified: 'a;
- }
-
- /// Similar to the [`Type`] trait, but represents a type which may be unsized (i.e., has a
- /// `?Sized` bound). E.g., `str`.
- pub trait MaybeSizedType<'a>: Sized + 'static {
- type Reified: 'a + ?Sized;
- }
-
- impl<'a, T: Type<'a>> MaybeSizedType<'a> for T {
- type Reified = T::Reified;
- }
-
- /// Type-based tag for types bounded by `'static`, i.e., with no borrowed elements.
- #[derive(Debug)]
- pub struct Value<T: 'static>(PhantomData<T>);
-
- impl<'a, T: 'static> Type<'a> for Value<T> {
- type Reified = T;
- }
-
- /// Type-based tag similar to [`Value`] but which may be unsized (i.e., has a `?Sized` bound).
- #[derive(Debug)]
- pub struct MaybeSizedValue<T: ?Sized + 'static>(PhantomData<T>);
-
- impl<'a, T: ?Sized + 'static> MaybeSizedType<'a> for MaybeSizedValue<T> {
- type Reified = T;
- }
-
- /// Type-based tag for reference types (`&'a T`, where T is represented by
- /// `<I as MaybeSizedType<'a>>::Reified`.
- #[derive(Debug)]
- pub struct Ref<I>(PhantomData<I>);
-
- impl<'a, I: MaybeSizedType<'a>> Type<'a> for Ref<I> {
- type Reified = &'a I::Reified;
- }
-}
-
-/// An `Option` with a type tag `I`.
-///
-/// Since this struct implements `Erased`, the type can be erased to make a dynamically typed
-/// option. The type can be checked dynamically using `Erased::tag_id` and since this is statically
-/// checked for the concrete type, there is some degree of type safety.
-#[repr(transparent)]
-struct TaggedOption<'a, I: tags::Type<'a>>(Option<I::Reified>);
-
-impl<'a, I: tags::Type<'a>> TaggedOption<'a, I> {
- fn as_demand(&mut self) -> &mut Demand<'a> {
- Demand::new(self as &mut (dyn Erased<'a> + 'a))
- }
-}
-
-/// Represents a type-erased but identifiable object.
-///
-/// This trait is exclusively implemented by the `TaggedOption` type.
-unsafe trait Erased<'a>: 'a {
- /// The `TypeId` of the erased type.
- fn tag_id(&self) -> TypeId;
-}
-
-unsafe impl<'a, I: tags::Type<'a>> Erased<'a> for TaggedOption<'a, I> {
- fn tag_id(&self) -> TypeId {
- TypeId::of::<I>()
- }
-}
-
-#[unstable(feature = "provide_any", issue = "96024")]
-impl<'a> dyn Erased<'a> + 'a {
- /// Returns some reference to the dynamic value if it is tagged with `I`,
- /// or `None` otherwise.
- #[inline]
- fn downcast<I>(&self) -> Option<&TaggedOption<'a, I>>
- where
- I: tags::Type<'a>,
- {
- if self.tag_id() == TypeId::of::<I>() {
- // SAFETY: Just checked whether we're pointing to an I.
- Some(unsafe { &*(self as *const Self).cast::<TaggedOption<'a, I>>() })
- } else {
- None
- }
- }
-
- /// Returns some mutable reference to the dynamic value if it is tagged with `I`,
- /// or `None` otherwise.
- #[inline]
- fn downcast_mut<I>(&mut self) -> Option<&mut TaggedOption<'a, I>>
- where
- I: tags::Type<'a>,
- {
- if self.tag_id() == TypeId::of::<I>() {
- // SAFETY: Just checked whether we're pointing to an I.
- Some(unsafe { &mut *(self as *mut Self).cast::<TaggedOption<'a, I>>() })
- } else {
- None
- }
- }
-}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index 76b3589b9..ebd4a8c05 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -925,7 +925,7 @@ fn iter_next_chunk_erased<T>(
// so we need to defuse the guard instead of using `?`.
let initialized = guard.initialized;
mem::forget(guard);
- return Err(initialized)
+ return Err(initialized);
};
// SAFETY: The loop condition ensures we have space to push the item
diff --git a/library/core/src/ascii/ascii_char.rs b/library/core/src/ascii/ascii_char.rs
index f093a0990..5378b210e 100644
--- a/library/core/src/ascii/ascii_char.rs
+++ b/library/core/src/ascii/ascii_char.rs
@@ -518,14 +518,14 @@ impl AsciiChar {
/// Gets this ASCII character as a byte.
#[unstable(feature = "ascii_char", issue = "110998")]
#[inline]
- pub const fn as_u8(self) -> u8 {
+ pub const fn to_u8(self) -> u8 {
self as u8
}
/// Gets this ASCII character as a `char` Unicode Scalar Value.
#[unstable(feature = "ascii_char", issue = "110998")]
#[inline]
- pub const fn as_char(self) -> char {
+ pub const fn to_char(self) -> char {
self as u8 as char
}
diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs
index efc9ada38..bc026d0a4 100644
--- a/library/core/src/borrow.rs
+++ b/library/core/src/borrow.rs
@@ -22,7 +22,7 @@
/// Types express that they can be borrowed as some type `T` by implementing
/// `Borrow<T>`, providing a reference to a `T` in the trait’s
/// [`borrow`] method. A type is free to borrow as several different types.
-/// If it wishes to mutably borrow as the type – allowing the underlying data
+/// If it wishes to mutably borrow as the type, allowing the underlying data
/// to be modified, it can additionally implement [`BorrowMut<T>`].
///
/// Further, when providing implementations for additional traits, it needs
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 909b32547..bf4c682d3 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -1893,7 +1893,8 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// on an _exclusive_ `UnsafeCell<T>`. Even though `T` and `UnsafeCell<T>` have the
/// same memory layout, the following is not allowed and undefined behavior:
///
-/// ```rust,no_run
+#[cfg_attr(bootstrap, doc = "```rust,no_run")]
+#[cfg_attr(not(bootstrap), doc = "```rust,compile_fail")]
/// # use std::cell::UnsafeCell;
/// unsafe fn not_allowed<T>(ptr: &UnsafeCell<T>) -> &mut T {
/// let t = ptr as *const UnsafeCell<T> as *mut T;
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 5f06a7b07..2e8534f65 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -250,10 +250,12 @@ impl<T> Default for OnceCell<T> {
#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_tuple("OnceCell");
match self.get() {
- Some(v) => f.debug_tuple("OnceCell").field(v).finish(),
- None => f.write_str("OnceCell(Uninit)"),
- }
+ Some(v) => d.field(v),
+ None => d.field(&format_args!("<uninit>")),
+ };
+ d.finish()
}
}
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index a6d6230d3..d7ca9c22d 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -86,6 +86,46 @@
/// }
/// ```
///
+/// If we `derive`:
+///
+/// ```
+/// #[derive(Copy, Clone)]
+/// struct Generate<T>(fn() -> T);
+/// ```
+///
+/// the auto-derived implementations will have unnecessary `T: Copy` and `T: Clone` bounds:
+///
+/// ```
+/// # struct Generate<T>(fn() -> T);
+///
+/// // Automatically derived
+/// impl<T: Copy> Copy for Generate<T> { }
+///
+/// // Automatically derived
+/// impl<T: Clone> Clone for Generate<T> {
+/// fn clone(&self) -> Generate<T> {
+/// Generate(Clone::clone(&self.0))
+/// }
+/// }
+/// ```
+///
+/// The bounds are unnecessary because clearly the function itself should be
+/// copy- and cloneable even if its return type is not:
+///
+/// ```compile_fail,E0599
+/// #[derive(Copy, Clone)]
+/// struct Generate<T>(fn() -> T);
+///
+/// struct NotCloneable;
+///
+/// fn generate_not_cloneable() -> NotCloneable {
+/// NotCloneable
+/// }
+///
+/// Generate(generate_not_cloneable).clone(); // error: trait bounds were not satisfied
+/// // Note: With the manual implementations the above line will compile.
+/// ```
+///
/// ## Additional implementors
///
/// In addition to the [implementors listed below][impls],
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index faf48ae57..3c127efb3 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -1406,6 +1406,22 @@ mod impls {
_ => unsafe { unreachable_unchecked() },
}
}
+
+ #[inline]
+ fn min(self, other: bool) -> bool {
+ self & other
+ }
+
+ #[inline]
+ fn max(self, other: bool) -> bool {
+ self | other
+ }
+
+ #[inline]
+ fn clamp(self, min: bool, max: bool) -> bool {
+ assert!(min <= max);
+ self.max(min).min(max)
+ }
}
ord_impl! { char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index 1f7be85d3..5242e97eb 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -133,51 +133,6 @@ pub trait Default: Sized {
fn default() -> Self;
}
-/// Return the default value of a type according to the `Default` trait.
-///
-/// The type to return is inferred from context; this is equivalent to
-/// `Default::default()` but shorter to type.
-///
-/// For example:
-/// ```
-/// #![feature(default_free_fn)]
-///
-/// use std::default::default;
-///
-/// #[derive(Default)]
-/// struct AppConfig {
-/// foo: FooConfig,
-/// bar: BarConfig,
-/// }
-///
-/// #[derive(Default)]
-/// struct FooConfig {
-/// foo: i32,
-/// }
-///
-/// #[derive(Default)]
-/// struct BarConfig {
-/// bar: f32,
-/// baz: u8,
-/// }
-///
-/// fn main() {
-/// let options = AppConfig {
-/// foo: default(),
-/// bar: BarConfig {
-/// bar: 10.1,
-/// ..default()
-/// },
-/// };
-/// }
-/// ```
-#[unstable(feature = "default_free_fn", issue = "73014")]
-#[must_use]
-#[inline]
-pub fn default<T: Default>() -> T {
- Default::default()
-}
-
/// Derive macro generating an impl of the trait `Default`.
#[rustc_builtin_macro(Default, attributes(default))]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
diff --git a/library/core/src/error.md b/library/core/src/error.md
index 78808d489..7771b8adc 100644
--- a/library/core/src/error.md
+++ b/library/core/src/error.md
@@ -93,7 +93,8 @@ information that is already communicated by the source error being
unwrapped:
```text
-thread 'main' panicked at 'env variable `IMPORTANT_PATH` is not set: NotPresent', src/main.rs:4:6
+thread 'main' panicked at src/main.rs:4:6:
+env variable `IMPORTANT_PATH` is not set: NotPresent
```
In this example we end up mentioning that an env variable is not set,
@@ -109,7 +110,8 @@ prevent the source error, we end up introducing new information that is
independent from our source error.
```text
-thread 'main' panicked at 'env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`: NotPresent', src/main.rs:4:6
+thread 'main' panicked at src/main.rs:4:6:
+env variable `IMPORTANT_PATH` should be set by `wrapper_script.sh`: NotPresent
```
In this example we are communicating not only the name of the
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
index 11cb08275..1170221c1 100644
--- a/library/core/src/error.rs
+++ b/library/core/src/error.rs
@@ -4,8 +4,8 @@
#[cfg(test)]
mod tests;
-use crate::any::{Demand, Provider, TypeId};
-use crate::fmt::{Debug, Display};
+use crate::any::TypeId;
+use crate::fmt::{Debug, Display, Formatter, Result};
/// `Error` is a trait representing the basic expectations for error values,
/// i.e., values of type `E` in [`Result<T, E>`].
@@ -123,16 +123,21 @@ pub trait Error: Debug + Display {
/// Provides type based access to context intended for error reports.
///
- /// Used in conjunction with [`Demand::provide_value`] and [`Demand::provide_ref`] to extract
+ /// Used in conjunction with [`Request::provide_value`] and [`Request::provide_ref`] to extract
/// references to member variables from `dyn Error` trait objects.
///
/// # Example
///
/// ```rust
- /// #![feature(provide_any)]
/// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
/// use core::fmt;
- /// use core::any::Demand;
+ /// use core::error::{request_ref, Request};
+ ///
+ /// #[derive(Debug)]
+ /// enum MyLittleTeaPot {
+ /// Empty,
+ /// }
///
/// #[derive(Debug)]
/// struct MyBacktrace {
@@ -147,21 +152,7 @@ pub trait Error: Debug + Display {
/// }
///
/// #[derive(Debug)]
- /// struct SourceError {
- /// // ...
- /// }
- ///
- /// impl fmt::Display for SourceError {
- /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- /// write!(f, "Example Source Error")
- /// }
- /// }
- ///
- /// impl std::error::Error for SourceError {}
- ///
- /// #[derive(Debug)]
/// struct Error {
- /// source: SourceError,
/// backtrace: MyBacktrace,
/// }
///
@@ -172,38 +163,26 @@ pub trait Error: Debug + Display {
/// }
///
/// impl std::error::Error for Error {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand
- /// .provide_ref::<MyBacktrace>(&self.backtrace)
- /// .provide_ref::<dyn std::error::Error + 'static>(&self.source);
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request
+ /// .provide_ref::<MyBacktrace>(&self.backtrace);
/// }
/// }
///
/// fn main() {
/// let backtrace = MyBacktrace::new();
- /// let source = SourceError {};
- /// let error = Error { source, backtrace };
+ /// let error = Error { backtrace };
/// let dyn_error = &error as &dyn std::error::Error;
- /// let backtrace_ref = dyn_error.request_ref::<MyBacktrace>().unwrap();
+ /// let backtrace_ref = request_ref::<MyBacktrace>(dyn_error).unwrap();
///
/// assert!(core::ptr::eq(&error.backtrace, backtrace_ref));
+ /// assert!(request_ref::<MyLittleTeaPot>(dyn_error).is_none());
/// }
/// ```
#[unstable(feature = "error_generic_member_access", issue = "99301")]
#[allow(unused_variables)]
- fn provide<'a>(&'a self, demand: &mut Demand<'a>) {}
-}
-
-#[unstable(feature = "error_generic_member_access", issue = "99301")]
-impl<E> Provider for E
-where
- E: Error + ?Sized,
-{
- fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- self.provide(demand)
- }
+ fn provide<'a>(&'a self, request: &mut Request<'a>) {}
}
-
mod private {
// This is a hack to prevent `type_id` from being overridden by `Error`
// implementations, since that can enable unsound downcasting.
@@ -215,20 +194,6 @@ mod private {
#[unstable(feature = "never_type", issue = "35121")]
impl Error for ! {}
-impl<'a> dyn Error + 'a {
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&'a self) -> Option<&'a T> {
- core::any::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&'a self) -> Option<T> {
- core::any::request_value(self)
- }
-}
-
// Copied from `any.rs`.
impl dyn Error + 'static {
/// Returns `true` if the inner type is the same as `T`.
@@ -293,18 +258,6 @@ impl dyn Error + 'static + Send {
pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
<dyn Error + 'static>::downcast_mut::<T>(self)
}
-
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
- <dyn Error>::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&self) -> Option<T> {
- <dyn Error>::request_value(self)
- }
}
impl dyn Error + 'static + Send + Sync {
@@ -328,18 +281,6 @@ impl dyn Error + 'static + Send + Sync {
pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
<dyn Error + 'static>::downcast_mut::<T>(self)
}
-
- /// Request a reference of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> {
- <dyn Error>::request_ref(self)
- }
-
- /// Request a value of type `T` as context about this error.
- #[unstable(feature = "error_generic_member_access", issue = "99301")]
- pub fn request_value<T: 'static>(&self) -> Option<T> {
- <dyn Error>::request_value(self)
- }
}
impl dyn Error {
@@ -412,6 +353,654 @@ impl dyn Error {
}
}
+/// Request a value of type `T` from the given `impl Error`.
+///
+/// # Examples
+///
+/// Get a string value from an error.
+///
+/// ```rust
+/// # #![feature(error_generic_member_access)]
+/// # #![feature(error_in_core)]
+/// use std::error::Error;
+/// use core::error::request_value;
+///
+/// fn get_string(err: &impl Error) -> String {
+/// request_value::<String>(err).unwrap()
+/// }
+/// ```
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+pub fn request_value<'a, T>(err: &'a (impl Error + ?Sized)) -> Option<T>
+where
+ T: 'static,
+{
+ request_by_type_tag::<'a, tags::Value<T>>(err)
+}
+
+/// Request a reference of type `T` from the given `impl Error`.
+///
+/// # Examples
+///
+/// Get a string reference from an error.
+///
+/// ```rust
+/// # #![feature(error_generic_member_access)]
+/// # #![feature(error_in_core)]
+/// use core::error::Error;
+/// use core::error::request_ref;
+///
+/// fn get_str(err: &impl Error) -> &str {
+/// request_ref::<str>(err).unwrap()
+/// }
+/// ```
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+pub fn request_ref<'a, T>(err: &'a (impl Error + ?Sized)) -> Option<&'a T>
+where
+ T: 'static + ?Sized,
+{
+ request_by_type_tag::<'a, tags::Ref<tags::MaybeSizedValue<T>>>(err)
+}
+
+/// Request a specific value by tag from the `Error`.
+fn request_by_type_tag<'a, I>(err: &'a (impl Error + ?Sized)) -> Option<I::Reified>
+where
+ I: tags::Type<'a>,
+{
+ let mut tagged = TaggedOption::<'a, I>(None);
+ err.provide(tagged.as_request());
+ tagged.0
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Request and its methods
+///////////////////////////////////////////////////////////////////////////////
+
+/// `Request` supports generic, type-driven access to data. It's use is currently restricted to the
+/// standard library in cases where trait authors wish to allow trait implementors to share generic
+/// information across trait boundaries. The motivating and prototypical use case is
+/// `core::error::Error` which would otherwise require a method per concrete type (eg.
+/// `std::backtrace::Backtrace` instance that implementors want to expose to users).
+///
+/// # Data flow
+///
+/// To describe the intended data flow for Request objects, let's consider two conceptual users
+/// separated by API boundaries:
+///
+/// * Consumer - the consumer requests objects using a Request instance; eg a crate that offers
+/// fancy `Error`/`Result` reporting to users wants to request a Backtrace from a given `dyn Error`.
+///
+/// * Producer - the producer provides objects when requested via Request; eg. a library with an
+/// an `Error` implementation that automatically captures backtraces at the time instances are
+/// created.
+///
+/// The consumer only needs to know where to submit their request and are expected to handle the
+/// request not being fulfilled by the use of `Option<T>` in the responses offered by the producer.
+///
+/// * A Producer initializes the value of one of its fields of a specific type. (or is otherwise
+/// prepared to generate a value requested). eg, `backtrace::Backtrace` or
+/// `std::backtrace::Backtrace`
+/// * A Consumer requests an object of a specific type (say `std::backtrace::Backtrace). In the case
+/// of a `dyn Error` trait object (the Producer), there are methods called `request_ref` and
+/// `request_value` are available to simplify obtaining an ``Option<T>`` for a given type. * The
+/// Producer, when requested, populates the given Request object which is given as a mutable
+/// reference.
+/// * The Consumer extracts a value or reference to the requested type from the `Request` object
+/// wrapped in an `Option<T>`; in the case of `dyn Error` the aforementioned `request_ref` and `
+/// request_value` methods mean that `dyn Error` users don't have to deal with the `Request` type at
+/// all (but `Error` implementors do). The `None` case of the `Option` suggests only that the
+/// Producer cannot currently offer an instance of the requested type, not it can't or never will.
+///
+/// # Examples
+///
+/// The best way to demonstrate this is using an example implementation of `Error`'s `provide` trait
+/// method:
+///
+/// ```
+/// #![feature(error_generic_member_access)]
+/// #![feature(error_in_core)]
+/// use core::fmt;
+/// use core::error::Request;
+/// use core::error::request_ref;
+///
+/// #[derive(Debug)]
+/// enum MyLittleTeaPot {
+/// Empty,
+/// }
+///
+/// #[derive(Debug)]
+/// struct MyBacktrace {
+/// // ...
+/// }
+///
+/// impl MyBacktrace {
+/// fn new() -> MyBacktrace {
+/// // ...
+/// # MyBacktrace {}
+/// }
+/// }
+///
+/// #[derive(Debug)]
+/// struct Error {
+/// backtrace: MyBacktrace,
+/// }
+///
+/// impl fmt::Display for Error {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "Example Error")
+/// }
+/// }
+///
+/// impl std::error::Error for Error {
+/// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+/// request
+/// .provide_ref::<MyBacktrace>(&self.backtrace);
+/// }
+/// }
+///
+/// fn main() {
+/// let backtrace = MyBacktrace::new();
+/// let error = Error { backtrace };
+/// let dyn_error = &error as &dyn std::error::Error;
+/// let backtrace_ref = request_ref::<MyBacktrace>(dyn_error).unwrap();
+///
+/// assert!(core::ptr::eq(&error.backtrace, backtrace_ref));
+/// assert!(request_ref::<MyLittleTeaPot>(dyn_error).is_none());
+/// }
+/// ```
+///
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+#[cfg_attr(not(doc), repr(transparent))] // work around https://github.com/rust-lang/rust/issues/90435
+pub struct Request<'a>(dyn Erased<'a> + 'a);
+
+impl<'a> Request<'a> {
+ /// Create a new `&mut Request` from a `&mut dyn Erased` trait object.
+ fn new<'b>(erased: &'b mut (dyn Erased<'a> + 'a)) -> &'b mut Request<'a> {
+ // SAFETY: transmuting `&mut (dyn Erased<'a> + 'a)` to `&mut Request<'a>` is safe since
+ // `Request` is repr(transparent).
+ unsafe { &mut *(erased as *mut dyn Erased<'a> as *mut Request<'a>) }
+ }
+
+ /// Provide a value or other type with only static lifetimes.
+ ///
+ /// # Examples
+ ///
+ /// Provides an `u8`.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ ///
+ /// #[derive(Debug)]
+ /// struct SomeConcreteType { field: u8 }
+ ///
+ /// impl std::fmt::Display for SomeConcreteType {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "{} failed", self.field)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SomeConcreteType {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request.provide_value::<u8>(self.field);
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn provide_value<T>(&mut self, value: T) -> &mut Self
+ where
+ T: 'static,
+ {
+ self.provide::<tags::Value<T>>(value)
+ }
+
+ /// Provide a value or other type with only static lifetimes computed using a closure.
+ ///
+ /// # Examples
+ ///
+ /// Provides a `String` by cloning.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ ///
+ /// #[derive(Debug)]
+ /// struct SomeConcreteType { field: String }
+ ///
+ /// impl std::fmt::Display for SomeConcreteType {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "{} failed", self.field)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SomeConcreteType {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request.provide_value_with::<String>(|| self.field.clone());
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn provide_value_with<T>(&mut self, fulfil: impl FnOnce() -> T) -> &mut Self
+ where
+ T: 'static,
+ {
+ self.provide_with::<tags::Value<T>>(fulfil)
+ }
+
+ /// Provide a reference. The referee type must be bounded by `'static`,
+ /// but may be unsized.
+ ///
+ /// # Examples
+ ///
+ /// Provides a reference to a field as a `&str`.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ ///
+ /// #[derive(Debug)]
+ /// struct SomeConcreteType { field: String }
+ ///
+ /// impl std::fmt::Display for SomeConcreteType {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "{} failed", self.field)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SomeConcreteType {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request.provide_ref::<str>(&self.field);
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn provide_ref<T: ?Sized + 'static>(&mut self, value: &'a T) -> &mut Self {
+ self.provide::<tags::Ref<tags::MaybeSizedValue<T>>>(value)
+ }
+
+ /// Provide a reference computed using a closure. The referee type
+ /// must be bounded by `'static`, but may be unsized.
+ ///
+ /// # Examples
+ ///
+ /// Provides a reference to a field as a `&str`.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ ///
+ /// #[derive(Debug)]
+ /// struct SomeConcreteType { business: String, party: String }
+ /// fn today_is_a_weekday() -> bool { true }
+ ///
+ /// impl std::fmt::Display for SomeConcreteType {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "{} failed", self.business)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for SomeConcreteType {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request.provide_ref_with::<str>(|| {
+ /// if today_is_a_weekday() {
+ /// &self.business
+ /// } else {
+ /// &self.party
+ /// }
+ /// });
+ /// }
+ /// }
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn provide_ref_with<T: ?Sized + 'static>(
+ &mut self,
+ fulfil: impl FnOnce() -> &'a T,
+ ) -> &mut Self {
+ self.provide_with::<tags::Ref<tags::MaybeSizedValue<T>>>(fulfil)
+ }
+
+ /// Provide a value with the given `Type` tag.
+ fn provide<I>(&mut self, value: I::Reified) -> &mut Self
+ where
+ I: tags::Type<'a>,
+ {
+ if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
+ res.0 = Some(value);
+ }
+ self
+ }
+
+ /// Provide a value with the given `Type` tag, using a closure to prevent unnecessary work.
+ fn provide_with<I>(&mut self, fulfil: impl FnOnce() -> I::Reified) -> &mut Self
+ where
+ I: tags::Type<'a>,
+ {
+ if let Some(res @ TaggedOption(None)) = self.0.downcast_mut::<I>() {
+ res.0 = Some(fulfil());
+ }
+ self
+ }
+
+ /// Check if the `Request` would be satisfied if provided with a
+ /// value of the specified type. If the type does not match or has
+ /// already been provided, returns false.
+ ///
+ /// # Examples
+ ///
+ /// Check if an `u8` still needs to be provided and then provides
+ /// it.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ /// use core::error::request_value;
+ ///
+ /// #[derive(Debug)]
+ /// struct Parent(Option<u8>);
+ ///
+ /// impl std::fmt::Display for Parent {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "a parent failed")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Parent {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// if let Some(v) = self.0 {
+ /// request.provide_value::<u8>(v);
+ /// }
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct Child {
+ /// parent: Parent,
+ /// }
+ ///
+ /// impl Child {
+ /// // Pretend that this takes a lot of resources to evaluate.
+ /// fn an_expensive_computation(&self) -> Option<u8> {
+ /// Some(99)
+ /// }
+ /// }
+ ///
+ /// impl std::fmt::Display for Child {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "child failed: \n because of parent: {}", self.parent)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Child {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// // In general, we don't know if this call will provide
+ /// // an `u8` value or not...
+ /// self.parent.provide(request);
+ ///
+ /// // ...so we check to see if the `u8` is needed before
+ /// // we run our expensive computation.
+ /// if request.would_be_satisfied_by_value_of::<u8>() {
+ /// if let Some(v) = self.an_expensive_computation() {
+ /// request.provide_value::<u8>(v);
+ /// }
+ /// }
+ ///
+ /// // The request will be satisfied now, regardless of if
+ /// // the parent provided the value or we did.
+ /// assert!(!request.would_be_satisfied_by_value_of::<u8>());
+ /// }
+ /// }
+ ///
+ /// let parent = Parent(Some(42));
+ /// let child = Child { parent };
+ /// assert_eq!(Some(42), request_value::<u8>(&child));
+ ///
+ /// let parent = Parent(None);
+ /// let child = Child { parent };
+ /// assert_eq!(Some(99), request_value::<u8>(&child));
+ ///
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn would_be_satisfied_by_value_of<T>(&self) -> bool
+ where
+ T: 'static,
+ {
+ self.would_be_satisfied_by::<tags::Value<T>>()
+ }
+
+ /// Check if the `Request` would be satisfied if provided with a
+ /// reference to a value of the specified type. If the type does
+ /// not match or has already been provided, returns false.
+ ///
+ /// # Examples
+ ///
+ /// Check if a `&str` still needs to be provided and then provides
+ /// it.
+ ///
+ /// ```rust
+ /// #![feature(error_generic_member_access)]
+ /// #![feature(error_in_core)]
+ ///
+ /// use core::error::Request;
+ /// use core::error::request_ref;
+ ///
+ /// #[derive(Debug)]
+ /// struct Parent(Option<String>);
+ ///
+ /// impl std::fmt::Display for Parent {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "a parent failed")
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Parent {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// if let Some(v) = &self.0 {
+ /// request.provide_ref::<str>(v);
+ /// }
+ /// }
+ /// }
+ ///
+ /// #[derive(Debug)]
+ /// struct Child {
+ /// parent: Parent,
+ /// name: String,
+ /// }
+ ///
+ /// impl Child {
+ /// // Pretend that this takes a lot of resources to evaluate.
+ /// fn an_expensive_computation(&self) -> Option<&str> {
+ /// Some(&self.name)
+ /// }
+ /// }
+ ///
+ /// impl std::fmt::Display for Child {
+ /// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// write!(f, "{} failed: \n {}", self.name, self.parent)
+ /// }
+ /// }
+ ///
+ /// impl std::error::Error for Child {
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// // In general, we don't know if this call will provide
+ /// // a `str` reference or not...
+ /// self.parent.provide(request);
+ ///
+ /// // ...so we check to see if the `&str` is needed before
+ /// // we run our expensive computation.
+ /// if request.would_be_satisfied_by_ref_of::<str>() {
+ /// if let Some(v) = self.an_expensive_computation() {
+ /// request.provide_ref::<str>(v);
+ /// }
+ /// }
+ ///
+ /// // The request will be satisfied now, regardless of if
+ /// // the parent provided the reference or we did.
+ /// assert!(!request.would_be_satisfied_by_ref_of::<str>());
+ /// }
+ /// }
+ ///
+ /// let parent = Parent(Some("parent".into()));
+ /// let child = Child { parent, name: "child".into() };
+ /// assert_eq!(Some("parent"), request_ref::<str>(&child));
+ ///
+ /// let parent = Parent(None);
+ /// let child = Child { parent, name: "child".into() };
+ /// assert_eq!(Some("child"), request_ref::<str>(&child));
+ /// ```
+ #[unstable(feature = "error_generic_member_access", issue = "99301")]
+ pub fn would_be_satisfied_by_ref_of<T>(&self) -> bool
+ where
+ T: ?Sized + 'static,
+ {
+ self.would_be_satisfied_by::<tags::Ref<tags::MaybeSizedValue<T>>>()
+ }
+
+ fn would_be_satisfied_by<I>(&self) -> bool
+ where
+ I: tags::Type<'a>,
+ {
+ matches!(self.0.downcast::<I>(), Some(TaggedOption(None)))
+ }
+}
+
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+impl<'a> Debug for Request<'a> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("Request").finish_non_exhaustive()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type tags
+///////////////////////////////////////////////////////////////////////////////
+
+pub(crate) mod tags {
+ //! Type tags are used to identify a type using a separate value. This module includes type tags
+ //! for some very common types.
+ //!
+ //! Currently type tags are not exposed to the user. But in the future, if you want to use the
+ //! Request API with more complex types (typically those including lifetime parameters), you
+ //! will need to write your own tags.
+
+ use crate::marker::PhantomData;
+
+ /// This trait is implemented by specific tag types in order to allow
+ /// describing a type which can be requested for a given lifetime `'a`.
+ ///
+ /// A few example implementations for type-driven tags can be found in this
+ /// module, although crates may also implement their own tags for more
+ /// complex types with internal lifetimes.
+ pub(crate) trait Type<'a>: Sized + 'static {
+ /// The type of values which may be tagged by this tag for the given
+ /// lifetime.
+ type Reified: 'a;
+ }
+
+ /// Similar to the [`Type`] trait, but represents a type which may be unsized (i.e., has a
+ /// `?Sized` bound). E.g., `str`.
+ pub(crate) trait MaybeSizedType<'a>: Sized + 'static {
+ type Reified: 'a + ?Sized;
+ }
+
+ impl<'a, T: Type<'a>> MaybeSizedType<'a> for T {
+ type Reified = T::Reified;
+ }
+
+ /// Type-based tag for types bounded by `'static`, i.e., with no borrowed elements.
+ #[derive(Debug)]
+ pub(crate) struct Value<T: 'static>(PhantomData<T>);
+
+ impl<'a, T: 'static> Type<'a> for Value<T> {
+ type Reified = T;
+ }
+
+ /// Type-based tag similar to [`Value`] but which may be unsized (i.e., has a `?Sized` bound).
+ #[derive(Debug)]
+ pub(crate) struct MaybeSizedValue<T: ?Sized + 'static>(PhantomData<T>);
+
+ impl<'a, T: ?Sized + 'static> MaybeSizedType<'a> for MaybeSizedValue<T> {
+ type Reified = T;
+ }
+
+ /// Type-based tag for reference types (`&'a T`, where T is represented by
+ /// `<I as MaybeSizedType<'a>>::Reified`.
+ #[derive(Debug)]
+ pub(crate) struct Ref<I>(PhantomData<I>);
+
+ impl<'a, I: MaybeSizedType<'a>> Type<'a> for Ref<I> {
+ type Reified = &'a I::Reified;
+ }
+}
+
+/// An `Option` with a type tag `I`.
+///
+/// Since this struct implements `Erased`, the type can be erased to make a dynamically typed
+/// option. The type can be checked dynamically using `Erased::tag_id` and since this is statically
+/// checked for the concrete type, there is some degree of type safety.
+#[repr(transparent)]
+pub(crate) struct TaggedOption<'a, I: tags::Type<'a>>(pub Option<I::Reified>);
+
+impl<'a, I: tags::Type<'a>> TaggedOption<'a, I> {
+ pub(crate) fn as_request(&mut self) -> &mut Request<'a> {
+ Request::new(self as &mut (dyn Erased<'a> + 'a))
+ }
+}
+
+/// Represents a type-erased but identifiable object.
+///
+/// This trait is exclusively implemented by the `TaggedOption` type.
+unsafe trait Erased<'a>: 'a {
+ /// The `TypeId` of the erased type.
+ fn tag_id(&self) -> TypeId;
+}
+
+unsafe impl<'a, I: tags::Type<'a>> Erased<'a> for TaggedOption<'a, I> {
+ fn tag_id(&self) -> TypeId {
+ TypeId::of::<I>()
+ }
+}
+
+impl<'a> dyn Erased<'a> + 'a {
+ /// Returns some reference to the dynamic value if it is tagged with `I`,
+ /// or `None` otherwise.
+ #[inline]
+ fn downcast<I>(&self) -> Option<&TaggedOption<'a, I>>
+ where
+ I: tags::Type<'a>,
+ {
+ if self.tag_id() == TypeId::of::<I>() {
+ // SAFETY: Just checked whether we're pointing to an I.
+ Some(unsafe { &*(self as *const Self).cast::<TaggedOption<'a, I>>() })
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the dynamic value if it is tagged with `I`,
+ /// or `None` otherwise.
+ #[inline]
+ fn downcast_mut<I>(&mut self) -> Option<&mut TaggedOption<'a, I>>
+ where
+ I: tags::Type<'a>,
+ {
+ if self.tag_id() == TypeId::of::<I>() {
+ // SAFETY: Just checked whether we're pointing to an I.
+ Some(unsafe { &mut *(self as *mut Self).cast::<TaggedOption<'a, I>>() })
+ } else {
+ None
+ }
+ }
+}
+
/// An iterator over an [`Error`] and its sources.
///
/// If you want to omit the initial error and only process
@@ -449,8 +1038,8 @@ impl<'a, T: Error + ?Sized> Error for &'a T {
Error::source(&**self)
}
- fn provide<'b>(&'b self, demand: &mut Demand<'b>) {
- Error::provide(&**self, demand);
+ fn provide<'b>(&'b self, request: &mut Request<'b>) {
+ Error::provide(&**self, request);
}
}
diff --git a/library/core/src/escape.rs b/library/core/src/escape.rs
index 3d471419b..24bb9ad1a 100644
--- a/library/core/src/escape.rs
+++ b/library/core/src/escape.rs
@@ -95,11 +95,11 @@ impl<const N: usize> EscapeIterInner<N> {
}
pub fn next(&mut self) -> Option<u8> {
- self.alive.next().map(|i| self.data[usize::from(i)].as_u8())
+ self.alive.next().map(|i| self.data[usize::from(i)].to_u8())
}
pub fn next_back(&mut self) -> Option<u8> {
- self.alive.next_back().map(|i| self.data[usize::from(i)].as_u8())
+ self.alive.next_back().map(|i| self.data[usize::from(i)].to_u8())
}
pub fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 39f795c1f..163a65c90 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -20,10 +20,10 @@ use crate::str;
/// in each pair are borrowed references; the latter are owned
/// strings.
///
-/// Note that this structure is **not** `repr(C)` and is not recommended to be
-/// placed in the signatures of FFI functions. Instead, safe wrappers of FFI
-/// functions may leverage the unsafe [`CStr::from_ptr`] constructor to provide
-/// a safe interface to other consumers.
+/// Note that this structure does **not** have a guaranteed layout (the `repr(transparent)`
+/// notwithstanding) and is not recommended to be placed in the signatures of FFI functions.
+/// Instead, safe wrappers of FFI functions may leverage the unsafe [`CStr::from_ptr`] constructor
+/// to provide a safe interface to other consumers.
///
/// [`CString`]: ../../std/ffi/struct.CString.html
/// [`String`]: ../../std/string/struct.String.html
@@ -82,12 +82,12 @@ use crate::str;
#[stable(feature = "core_c_str", since = "1.64.0")]
#[rustc_has_incoherent_inherent_impls]
#[lang = "CStr"]
-// FIXME:
// `fn from` in `impl From<&CStr> for Box<CStr>` current implementation relies
// on `CStr` being layout-compatible with `[u8]`.
-// When attribute privacy is implemented, `CStr` should be annotated as `#[repr(transparent)]`.
-// Anyway, `CStr` representation and layout are considered implementation detail, are
-// not documented and must not be relied upon.
+// However, `CStr` layout is considered an implementation detail and must not be relied upon. We
+// want `repr(transparent)` but we don't want it to show up in rustdoc, so we hide it under
+// `cfg(doc)`. This is an ad-hoc implementation of attribute privacy.
+#[cfg_attr(not(doc), repr(transparent))]
pub struct CStr {
// FIXME: this should not be represented with a DST slice but rather with
// just a raw `c_char` along with some form of marker to make
@@ -197,8 +197,8 @@ impl CStr {
///
/// This function will wrap the provided `ptr` with a `CStr` wrapper, which
/// allows inspection and interoperation of non-owned C strings. The total
- /// size of the raw C string must be smaller than `isize::MAX` **bytes**
- /// in memory due to calling the `slice::from_raw_parts` function.
+ /// size of the terminated buffer must be smaller than [`isize::MAX`] **bytes**
+ /// in memory (a restriction from [`slice::from_raw_parts`]).
///
/// # Safety
///
@@ -253,10 +253,10 @@ impl CStr {
/// ```
///
/// [valid]: core::ptr#safety
- #[inline]
+ #[inline] // inline is necessary for codegen to see strlen.
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "101719")]
+ #[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "113219")]
pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator of size less than `isize::MAX`, whose
@@ -280,6 +280,8 @@ impl CStr {
len
}
+ // `inline` is necessary for codegen to see strlen.
+ #[inline]
fn strlen_rt(s: *const c_char) -> usize {
extern "C" {
/// Provided by libc or compiler_builtins.
@@ -295,11 +297,11 @@ impl CStr {
}
}
- /// Creates a C string wrapper from a byte slice.
+ /// Creates a C string wrapper from a byte slice with any number of nuls.
///
/// This method will create a `CStr` from any byte slice that contains at
- /// least one nul byte. The caller does not need to know or specify where
- /// the nul byte is located.
+ /// least one nul byte. Unlike with [`CStr::from_bytes_with_nul`], the caller
+ /// does not need to know where the nul byte is located.
///
/// If the first byte is a nul character, this method will return an
/// empty `CStr`. If multiple nul characters are present, the `CStr` will
@@ -341,7 +343,8 @@ impl CStr {
}
}
- /// Creates a C string wrapper from a byte slice.
+ /// Creates a C string wrapper from a byte slice with exactly one nul
+ /// terminator.
///
/// This function will cast the provided `bytes` to a `CStr`
/// wrapper after ensuring that the byte slice is nul-terminated
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index 0488c8076..b2c9a0800 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -52,11 +52,6 @@ macro_rules! type_alias {
}
type_alias! { "c_char.md", c_char = c_char_definition::c_char, NonZero_c_char = c_char_definition::NonZero_c_char;
-// Make this type alias appear cfg-dependent so that Clippy does not suggest
-// replacing `0 as c_char` with `0_i8`/`0_u8`. This #[cfg(all())] can be removed
-// after the false positive in https://github.com/rust-lang/rust-clippy/issues/8093
-// is fixed.
-#[cfg(all())]
#[doc(cfg(all()))] }
type_alias! { "c_schar.md", c_schar = i8, NonZero_c_schar = NonZeroI8; }
@@ -115,7 +110,8 @@ mod c_char_definition {
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64",
- target_arch = "riscv32"
+ target_arch = "riscv32",
+ target_arch = "csky"
)
),
all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),
diff --git a/library/core/src/fmt/builders.rs b/library/core/src/fmt/builders.rs
index 36f49d51c..d2c9f9800 100644
--- a/library/core/src/fmt/builders.rs
+++ b/library/core/src/fmt/builders.rs
@@ -518,7 +518,7 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> {
/// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// fmt.debug_set()
/// .entries(self.0.iter())
- /// .finish() // Ends the struct formatting.
+ /// .finish() // Ends the set formatting.
/// }
/// }
///
@@ -648,7 +648,7 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> {
/// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// fmt.debug_list()
/// .entries(self.0.iter())
- /// .finish() // Ends the struct formatting.
+ /// .finish() // Ends the list formatting.
/// }
/// }
///
@@ -905,7 +905,7 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> {
/// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
/// fmt.debug_map()
/// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v)))
- /// .finish() // Ends the struct formatting.
+ /// .finish() // Ends the map formatting.
/// }
/// }
///
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 1786b309c..9ce6093f1 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -2521,22 +2521,12 @@ impl<T: Copy + Debug> Debug for Cell<T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Debug> Debug for RefCell<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ let mut d = f.debug_struct("RefCell");
match self.try_borrow() {
- Ok(borrow) => f.debug_struct("RefCell").field("value", &borrow).finish(),
- Err(_) => {
- // The RefCell is mutably borrowed so we can't look at its value
- // here. Show a placeholder instead.
- struct BorrowedPlaceholder;
-
- impl Debug for BorrowedPlaceholder {
- fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- f.write_str("<borrowed>")
- }
- }
-
- f.debug_struct("RefCell").field("value", &BorrowedPlaceholder).finish()
- }
- }
+ Ok(borrow) => d.field("value", &borrow),
+ Err(_) => d.field("value", &format_args!("<borrowed>")),
+ };
+ d.finish()
}
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 5a9a7013a..676d4f2f3 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -9,7 +9,7 @@
//! This includes changes in the stability of the constness.
//!
//! In order to make an intrinsic usable at compile-time, one needs to copy the implementation
-//! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs> to
+//! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics> to
//! <https://github.com/rust-lang/rust/blob/master/compiler/rustc_const_eval/src/interpret/intrinsics.rs> and add a
//! `#[rustc_const_unstable(feature = "const_such_and_such", issue = "01234")]` to the intrinsic declaration.
//!
@@ -1057,23 +1057,6 @@ extern "rust-intrinsic" {
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
#[rustc_safe_intrinsic]
#[rustc_nounwind]
- #[cfg(bootstrap)]
- pub fn type_id<T: ?Sized + 'static>() -> u64;
-
- /// Gets an identifier which is globally unique to the specified type. This
- /// function will return the same value for a type regardless of whichever
- /// crate it is invoked in.
- ///
- /// Note that, unlike most intrinsics, this is safe to call;
- /// it does not require an `unsafe` block.
- /// Therefore, implementations must not require the user to uphold
- /// any safety invariants.
- ///
- /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
- #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
- #[rustc_safe_intrinsic]
- #[rustc_nounwind]
- #[cfg(not(bootstrap))]
pub fn type_id<T: ?Sized + 'static>() -> u128;
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
@@ -2402,6 +2385,25 @@ extern "rust-intrinsic" {
#[rustc_nounwind]
pub fn raw_eq<T>(a: &T, b: &T) -> bool;
+ /// Lexicographically compare `[left, left + bytes)` and `[right, right + bytes)`
+ /// as unsigned bytes, returning negative if `left` is less, zero if all the
+ /// bytes match, or positive if `right` is greater.
+ ///
+ /// This underlies things like `<[u8]>::cmp`, and will usually lower to `memcmp`.
+ ///
+ /// # Safety
+ ///
+ /// `left` and `right` must each be [valid] for reads of `bytes` bytes.
+ ///
+ /// Note that this applies to the whole range, not just until the first byte
+ /// that differs. That allows optimizations that can read in large chunks.
+ ///
+ /// [valid]: crate::ptr#safety
+ #[cfg(not(bootstrap))]
+ #[rustc_const_unstable(feature = "const_intrinsic_compare_bytes", issue = "none")]
+ #[rustc_nounwind]
+ pub fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32;
+
/// See documentation of [`std::hint::black_box`] for details.
///
/// [`std::hint::black_box`]: crate::hint::black_box
@@ -2541,12 +2543,14 @@ pub(crate) use assert_unsafe_precondition;
/// Checks whether `ptr` is properly aligned with respect to
/// `align_of::<T>()`.
+#[inline]
pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
!ptr.is_null() && ptr.is_aligned()
}
/// Checks whether an allocation of `len` instances of `T` exceeds
/// the maximum allowed allocation size.
+#[inline]
pub(crate) fn is_valid_allocation_size<T>(len: usize) -> bool {
let max_len = const {
let size = crate::mem::size_of::<T>();
@@ -2557,6 +2561,7 @@ pub(crate) fn is_valid_allocation_size<T>(len: usize) -> bool {
/// Checks whether the regions of memory starting at `src` and `dst` of size
/// `count * size_of::<T>()` do *not* overlap.
+#[inline]
pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
let src_usize = src.addr();
let dst_usize = dst.addr();
@@ -2839,3 +2844,18 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
write_bytes(dst, val, count)
}
}
+
+/// Backfill for bootstrap
+#[cfg(bootstrap)]
+pub unsafe fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32 {
+ extern "C" {
+ fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> crate::ffi::c_int;
+ }
+
+ if bytes != 0 {
+ // SAFETY: Since bytes is non-zero, the caller has met `memcmp`'s requirements.
+ unsafe { memcmp(left, right, bytes).into() }
+ } else {
+ 0
+ }
+}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index 5944a0de1..ef0a2fd4e 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -14,6 +14,7 @@
//!
//! ```rust
//! #![feature(core_intrinsics, custom_mir)]
+#![cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
//!
//! use core::intrinsics::mir::*;
//!
@@ -63,6 +64,7 @@
//!
//! ```rust
//! #![feature(core_intrinsics, custom_mir)]
+#![cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
//!
//! use core::intrinsics::mir::*;
//!
@@ -102,17 +104,18 @@
//! }
//!
//! #[custom_mir(dialect = "runtime", phase = "optimized")]
+#![cfg_attr(bootstrap, doc = "#[cfg(any())]")] // disable the following function in doctests when `bootstrap` is set
//! fn push_and_pop<T>(v: &mut Vec<T>, value: T) {
//! mir!(
-//! let unused;
+//! let _unused;
//! let popped;
//!
//! {
-//! Call(unused, pop, Vec::push(v, value))
+//! Call(_unused = Vec::push(v, value), pop)
//! }
//!
//! pop = {
-//! Call(popped, drop, Vec::pop(v))
+//! Call(popped = Vec::pop(v), drop)
//! }
//!
//! drop = {
@@ -273,7 +276,7 @@ define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
define!("mir_unreachable", fn Unreachable() -> BasicBlock);
define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
-define!("mir_call", fn Call<T>(place: T, goto: BasicBlock, call: T));
+define!("mir_call", fn Call(call: (), goto: BasicBlock));
define!("mir_storage_live", fn StorageLive<T>(local: T));
define!("mir_storage_dead", fn StorageDead<T>(local: T));
define!("mir_deinit", fn Deinit<T>(place: T));
@@ -315,6 +318,7 @@ define!(
/// # Examples
///
/// ```rust
+ #[cfg_attr(not(bootstrap), doc = "#![allow(internal_features)]")]
/// #![feature(custom_mir, core_intrinsics)]
///
/// use core::intrinsics::mir::*;
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index d3e454563..eee6e5bcc 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -310,7 +310,7 @@ where
/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
/// this type.
#[derive(Clone, Debug)]
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
+#[unstable(feature = "trusted_len", issue = "37572")]
struct FlattenCompat<I, U> {
iter: Fuse<I>,
frontiter: Option<U>,
@@ -464,7 +464,6 @@ where
}
}
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
impl<I, U> Iterator for FlattenCompat<I, U>
where
I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
@@ -579,7 +578,6 @@ where
}
}
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
impl<I, U> DoubleEndedIterator for FlattenCompat<I, U>
where
I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
@@ -649,7 +647,6 @@ where
}
}
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<const N: usize, I, T> TrustedLen
for FlattenCompat<I, <[T; N] as IntoIterator>::IntoIter>
where
@@ -657,7 +654,6 @@ where
{
}
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<'a, const N: usize, I, T> TrustedLen
for FlattenCompat<I, <&'a [T; N] as IntoIterator>::IntoIter>
where
@@ -665,7 +661,6 @@ where
{
}
-#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<'a, const N: usize, I, T> TrustedLen
for FlattenCompat<I, <&'a mut [T; N] as IntoIterator>::IntoIter>
where
diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs
new file mode 100644
index 000000000..3c0e80b25
--- /dev/null
+++ b/library/core/src/iter/adapters/map_windows.rs
@@ -0,0 +1,293 @@
+use crate::{
+ fmt,
+ iter::{ExactSizeIterator, FusedIterator},
+ mem::{self, MaybeUninit},
+ ptr,
+};
+
+/// An iterator over the mapped windows of another iterator.
+///
+/// This `struct` is created by the [`Iterator::map_windows`]. See its
+/// documentation for more information.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+pub struct MapWindows<I: Iterator, F, const N: usize> {
+ f: F,
+ inner: MapWindowsInner<I, N>,
+}
+
+struct MapWindowsInner<I: Iterator, const N: usize> {
+ // We fuse the inner iterator because there shouldn't be "holes" in
+ // the sliding window. Once the iterator returns a `None`, we make
+ // our `MapWindows` iterator return `None` forever.
+ iter: Option<I>,
+ // Since iterators are assumed lazy, i.e. it only yields an item when
+ // `Iterator::next()` is called, and `MapWindows` is not an exception.
+ //
+ // Before the first iteration, we keep the buffer `None`. When the user
+ // first call `next` or other methods that makes the iterator advance,
+ // we collect the first `N` items yielded from the inner iterator and
+ // put it into the buffer.
+ //
+ // When the inner iterator has returned a `None` (i.e. fused), we take
+ // away this `buffer` and leave it `None` to reclaim its resources.
+ //
+ // FIXME: should we shrink the size of `buffer` using niche optimization?
+ buffer: Option<Buffer<I::Item, N>>,
+}
+
+// `Buffer` uses two times of space to reduce moves among the iterations.
+// `Buffer<T, N>` is semantically `[MaybeUninit<T>; 2 * N]`. However, due
+// to limitations of const generics, we use this different type. Note that
+// it has the same underlying memory layout.
+struct Buffer<T, const N: usize> {
+ // Invariant: `self.buffer[self.start..self.start + N]` is initialized,
+ // with all other elements being uninitialized. This also
+ // implies that `self.start <= N`.
+ buffer: [[MaybeUninit<T>; N]; 2],
+ start: usize,
+}
+
+impl<I: Iterator, F, const N: usize> MapWindows<I, F, N> {
+ pub(in crate::iter) fn new(iter: I, f: F) -> Self {
+ assert!(N != 0, "array in `Iterator::map_windows` must contain more than 0 elements");
+
+ // Only ZST arrays' length can be so large.
+ if mem::size_of::<I::Item>() == 0 {
+ assert!(
+ N.checked_mul(2).is_some(),
+ "array size of `Iterator::map_windows` is too large"
+ );
+ }
+
+ Self { inner: MapWindowsInner::new(iter), f }
+ }
+}
+
+impl<I: Iterator, const N: usize> MapWindowsInner<I, N> {
+ #[inline]
+ fn new(iter: I) -> Self {
+ Self { iter: Some(iter), buffer: None }
+ }
+
+ fn next_window(&mut self) -> Option<&[I::Item; N]> {
+ let iter = self.iter.as_mut()?;
+ match self.buffer {
+ // It is the first time to advance. We collect
+ // the first `N` items from `self.iter` to initialize `self.buffer`.
+ None => self.buffer = Buffer::try_from_iter(iter),
+ Some(ref mut buffer) => match iter.next() {
+ None => {
+ // Fuse the inner iterator since it yields a `None`.
+ self.iter.take();
+ self.buffer.take();
+ }
+ // Advance the iterator. We first call `next` before changing our buffer
+ // at all. This means that if `next` panics, our invariant is upheld and
+ // our `Drop` impl drops the correct elements.
+ Some(item) => buffer.push(item),
+ },
+ }
+ self.buffer.as_ref().map(Buffer::as_array_ref)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let Some(ref iter) = self.iter else { return (0, Some(0)) };
+ let (lo, hi) = iter.size_hint();
+ if self.buffer.is_some() {
+ // If the first `N` items are already yielded by the inner iterator,
+ // the size hint is then equal to the that of the inner iterator's.
+ (lo, hi)
+ } else {
+ // If the first `N` items are not yet yielded by the inner iterator,
+ // the first `N` elements should be counted as one window, so both bounds
+ // should subtract `N - 1`.
+ (lo.saturating_sub(N - 1), hi.map(|hi| hi.saturating_sub(N - 1)))
+ }
+ }
+}
+
+impl<T, const N: usize> Buffer<T, N> {
+ fn try_from_iter(iter: &mut impl Iterator<Item = T>) -> Option<Self> {
+ let first_half = crate::array::iter_next_chunk(iter).ok()?;
+ let buffer = [MaybeUninit::new(first_half).transpose(), MaybeUninit::uninit_array()];
+ Some(Self { buffer, start: 0 })
+ }
+
+ #[inline]
+ fn buffer_ptr(&self) -> *const MaybeUninit<T> {
+ self.buffer.as_ptr().cast()
+ }
+
+ #[inline]
+ fn buffer_mut_ptr(&mut self) -> *mut MaybeUninit<T> {
+ self.buffer.as_mut_ptr().cast()
+ }
+
+ #[inline]
+ fn as_array_ref(&self) -> &[T; N] {
+ debug_assert!(self.start + N <= 2 * N);
+
+ // SAFETY: our invariant guarantees these elements are initialized.
+ unsafe { &*self.buffer_ptr().add(self.start).cast() }
+ }
+
+ #[inline]
+ fn as_uninit_array_mut(&mut self) -> &mut MaybeUninit<[T; N]> {
+ debug_assert!(self.start + N <= 2 * N);
+
+ // SAFETY: our invariant guarantees these elements are in bounds.
+ unsafe { &mut *self.buffer_mut_ptr().add(self.start).cast() }
+ }
+
+ /// Pushes a new item `next` to the back, and pops the front-most one.
+ ///
+ /// All the elements will be shifted to the front end when pushing reaches
+ /// the back end.
+ fn push(&mut self, next: T) {
+ let buffer_mut_ptr = self.buffer_mut_ptr();
+ debug_assert!(self.start + N <= 2 * N);
+
+ let to_drop = if self.start == N {
+ // We have reached the end of our buffer and have to copy
+ // everything to the start. Example layout for N = 3.
+ //
+ // 0 1 2 3 4 5 0 1 2 3 4 5
+ // ┌───┬───┬───┬───┬───┬───┐ ┌───┬───┬───┬───┬───┬───┐
+ // │ - │ - │ - │ a │ b │ c │ -> │ b │ c │ n │ - │ - │ - │
+ // └───┴───┴───┴───┴───┴───┘ └───┴───┴───┴───┴───┴───┘
+ // ↑ ↑
+ // start start
+
+ // SAFETY: the two pointers are valid for reads/writes of N -1
+ // elements because our array's size is semantically 2 * N. The
+ // regions also don't overlap for the same reason.
+ //
+ // We leave the old elements in place. As soon as `start` is set
+ // to 0, we treat them as uninitialized and treat their copies
+ // as initialized.
+ let to_drop = unsafe {
+ ptr::copy_nonoverlapping(buffer_mut_ptr.add(self.start + 1), buffer_mut_ptr, N - 1);
+ (*buffer_mut_ptr.add(N - 1)).write(next);
+ buffer_mut_ptr.add(self.start)
+ };
+ self.start = 0;
+ to_drop
+ } else {
+ // SAFETY: `self.start` is < N as guaranteed by the invariant
+ // plus the check above. Even if the drop at the end panics,
+ // the invariant is upheld.
+ //
+ // Example layout for N = 3:
+ //
+ // 0 1 2 3 4 5 0 1 2 3 4 5
+ // ┌───┬───┬───┬───┬───┬───┐ ┌───┬───┬───┬───┬───┬───┐
+ // │ - │ a │ b │ c │ - │ - │ -> │ - │ - │ b │ c │ n │ - │
+ // └───┴───┴───┴───┴───┴───┘ └───┴───┴───┴───┴───┴───┘
+ // ↑ ↑
+ // start start
+ //
+ let to_drop = unsafe {
+ (*buffer_mut_ptr.add(self.start + N)).write(next);
+ buffer_mut_ptr.add(self.start)
+ };
+ self.start += 1;
+ to_drop
+ };
+
+ // SAFETY: the index is valid and this is element `a` in the
+ // diagram above and has not been dropped yet.
+ unsafe { ptr::drop_in_place(to_drop.cast::<T>()) };
+ }
+}
+
+impl<T: Clone, const N: usize> Clone for Buffer<T, N> {
+ fn clone(&self) -> Self {
+ let mut buffer = Buffer {
+ buffer: [MaybeUninit::uninit_array(), MaybeUninit::uninit_array()],
+ start: self.start,
+ };
+ buffer.as_uninit_array_mut().write(self.as_array_ref().clone());
+ buffer
+ }
+}
+
+impl<I, const N: usize> Clone for MapWindowsInner<I, N>
+where
+ I: Iterator + Clone,
+ I::Item: Clone,
+{
+ fn clone(&self) -> Self {
+ Self { iter: self.iter.clone(), buffer: self.buffer.clone() }
+ }
+}
+
+impl<T, const N: usize> Drop for Buffer<T, N> {
+ fn drop(&mut self) {
+ // SAFETY: our invariant guarantees that N elements starting from
+ // `self.start` are initialized. We drop them here.
+ unsafe {
+ let initialized_part: *mut [T] = crate::ptr::slice_from_raw_parts_mut(
+ self.buffer_mut_ptr().add(self.start).cast(),
+ N,
+ );
+ ptr::drop_in_place(initialized_part);
+ }
+ }
+}
+
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+impl<I, F, R, const N: usize> Iterator for MapWindows<I, F, N>
+where
+ I: Iterator,
+ F: FnMut(&[I::Item; N]) -> R,
+{
+ type Item = R;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let window = self.inner.next_window()?;
+ let out = (self.f)(window);
+ Some(out)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+// Note that even if the inner iterator not fused, the `MapWindows` is still fused,
+// because we don't allow "holes" in the mapping window.
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+impl<I, F, R, const N: usize> FusedIterator for MapWindows<I, F, N>
+where
+ I: Iterator,
+ F: FnMut(&[I::Item; N]) -> R,
+{
+}
+
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+impl<I, F, R, const N: usize> ExactSizeIterator for MapWindows<I, F, N>
+where
+ I: ExactSizeIterator,
+ F: FnMut(&[I::Item; N]) -> R,
+{
+}
+
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+impl<I: Iterator + fmt::Debug, F, const N: usize> fmt::Debug for MapWindows<I, F, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MapWindows").field("iter", &self.inner.iter).finish()
+ }
+}
+
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+impl<I, F, const N: usize> Clone for MapWindows<I, F, N>
+where
+ I: Iterator + Clone,
+ F: Clone,
+ I::Item: Clone,
+{
+ fn clone(&self) -> Self {
+ Self { f: self.f.clone(), inner: self.inner.clone() }
+ }
+}
diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs
index 8cc2b7cec..6f4fa7010 100644
--- a/library/core/src/iter/adapters/mod.rs
+++ b/library/core/src/iter/adapters/mod.rs
@@ -16,6 +16,7 @@ mod inspect;
mod intersperse;
mod map;
mod map_while;
+mod map_windows;
mod peekable;
mod rev;
mod scan;
@@ -57,6 +58,9 @@ pub use self::intersperse::{Intersperse, IntersperseWith};
#[stable(feature = "iter_map_while", since = "1.57.0")]
pub use self::map_while::MapWhile;
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+pub use self::map_windows::MapWindows;
+
#[unstable(feature = "trusted_random_access", issue = "none")]
pub use self::zip::TrustedRandomAccess;
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index de638552f..ca977d1ef 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -361,6 +361,12 @@ macro_rules! impl_fold_via_try_fold {
(rfold -> try_rfold) => {
impl_fold_via_try_fold! { @internal rfold -> try_rfold }
};
+ (spec_fold -> spec_try_fold) => {
+ impl_fold_via_try_fold! { @internal spec_fold -> spec_try_fold }
+ };
+ (spec_rfold -> spec_try_rfold) => {
+ impl_fold_via_try_fold! { @internal spec_rfold -> spec_try_rfold }
+ };
(@internal $fold:ident -> $try_fold:ident) => {
#[inline]
fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA
@@ -434,6 +440,8 @@ pub use self::adapters::Copied;
pub use self::adapters::Flatten;
#[stable(feature = "iter_map_while", since = "1.57.0")]
pub use self::adapters::MapWhile;
+#[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+pub use self::adapters::MapWindows;
#[unstable(feature = "inplace_iteration", issue = "none")]
pub use self::adapters::SourceIter;
#[stable(feature = "iterator_step_by", since = "1.28.0")]
diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs
index 0675e5635..e0ef5071c 100644
--- a/library/core/src/iter/traits/collect.rs
+++ b/library/core/src/iter/traits/collect.rs
@@ -138,8 +138,6 @@ pub trait FromIterator<A>: Sized {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let five_fives = std::iter::repeat(5).take(5);
///
@@ -255,8 +253,6 @@ pub trait IntoIterator {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v = [1, 2, 3];
/// let mut iter = v.into_iter();
@@ -363,8 +359,6 @@ pub trait Extend<A> {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// // You can extend a String with some chars:
/// let mut message = String::from("abc");
diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs
index 182d9f758..4c8af4eba 100644
--- a/library/core/src/iter/traits/double_ended.rs
+++ b/library/core/src/iter/traits/double_ended.rs
@@ -379,4 +379,66 @@ impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
(**self).nth_back(n)
}
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.spec_rfold(init, f)
+ }
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.spec_try_rfold(init, f)
+ }
+}
+
+/// Helper trait to specialize `rfold` and `rtry_fold` for `&mut I where I: Sized`
+trait DoubleEndedIteratorRefSpec: DoubleEndedIterator {
+ fn spec_rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B;
+
+ fn spec_try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>;
+}
+
+impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIteratorRefSpec for &mut I {
+ default fn spec_rfold<B, F>(self, init: B, mut f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ default fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+}
+
+impl<I: DoubleEndedIterator> DoubleEndedIteratorRefSpec for &mut I {
+ impl_fold_via_try_fold! { spec_rfold -> spec_try_rfold }
+
+ fn spec_try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ (**self).try_rfold(init, f)
+ }
}
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 988352283..ac1fc26a1 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -10,7 +10,8 @@ use super::super::{ArrayChunks, Chain, Cloned, Copied, Cycle, Enumerate, Filter,
use super::super::{FlatMap, Flatten};
use super::super::{FromIterator, Intersperse, IntersperseWith, Product, Sum, Zip};
use super::super::{
- Inspect, Map, MapWhile, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile,
+ Inspect, Map, MapWhile, MapWindows, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take,
+ TakeWhile,
};
fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
@@ -1591,6 +1592,163 @@ pub trait Iterator {
Flatten::new(self)
}
+ /// Calls the given function `f` for each contiguous window of size `N` over
+ /// `self` and returns an iterator over the outputs of `f`. Like [`slice::windows()`],
+ /// the windows during mapping overlap as well.
+ ///
+ /// In the following example, the closure is called three times with the
+ /// arguments `&['a', 'b']`, `&['b', 'c']` and `&['c', 'd']` respectively.
+ ///
+ /// ```
+ /// #![feature(iter_map_windows)]
+ ///
+ /// let strings = "abcd".chars()
+ /// .map_windows(|[x, y]| format!("{}+{}", x, y))
+ /// .collect::<Vec<String>>();
+ ///
+ /// assert_eq!(strings, vec!["a+b", "b+c", "c+d"]);
+ /// ```
+ ///
+ /// Note that the const parameter `N` is usually inferred by the
+ /// destructured argument in the closure.
+ ///
+ /// The returned iterator yields 𝑘 − `N` + 1 items (where 𝑘 is the number of
+ /// items yielded by `self`). If 𝑘 is less than `N`, this method yields an
+ /// empty iterator.
+ ///
+ /// The returned iterator implements [`FusedIterator`], because once `self`
+ /// returns `None`, even if it returns a `Some(T)` again in the next iterations,
+ /// we cannot put it into a contigious array buffer, and thus the returned iterator
+ /// should be fused.
+ ///
+ /// [`slice::windows()`]: slice::windows
+ /// [`FusedIterator`]: crate::iter::FusedIterator
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a
+ /// compile time error before this method gets stabilized.
+ ///
+ /// ```should_panic
+ /// #![feature(iter_map_windows)]
+ ///
+ /// let iter = std::iter::repeat(0).map_windows(|&[]| ());
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// Building the sums of neighboring numbers.
+ ///
+ /// ```
+ /// #![feature(iter_map_windows)]
+ ///
+ /// let mut it = [1, 3, 8, 1].iter().map_windows(|&[a, b]| a + b);
+ /// assert_eq!(it.next(), Some(4)); // 1 + 3
+ /// assert_eq!(it.next(), Some(11)); // 3 + 8
+ /// assert_eq!(it.next(), Some(9)); // 8 + 1
+ /// assert_eq!(it.next(), None);
+ /// ```
+ ///
+ /// Since the elements in the following example implement `Copy`, we can
+ /// just copy the array and get an iterator over the windows.
+ ///
+ /// ```
+ /// #![feature(iter_map_windows)]
+ ///
+ /// let mut it = "ferris".chars().map_windows(|w: &[_; 3]| *w);
+ /// assert_eq!(it.next(), Some(['f', 'e', 'r']));
+ /// assert_eq!(it.next(), Some(['e', 'r', 'r']));
+ /// assert_eq!(it.next(), Some(['r', 'r', 'i']));
+ /// assert_eq!(it.next(), Some(['r', 'i', 's']));
+ /// assert_eq!(it.next(), None);
+ /// ```
+ ///
+ /// You can also use this function to check the sortedness of an iterator.
+ /// For the simple case, rather use [`Iterator::is_sorted`].
+ ///
+ /// ```
+ /// #![feature(iter_map_windows)]
+ ///
+ /// let mut it = [0.5, 1.0, 3.5, 3.0, 8.5, 8.5, f32::NAN].iter()
+ /// .map_windows(|[a, b]| a <= b);
+ ///
+ /// assert_eq!(it.next(), Some(true)); // 0.5 <= 1.0
+ /// assert_eq!(it.next(), Some(true)); // 1.0 <= 3.5
+ /// assert_eq!(it.next(), Some(false)); // 3.5 <= 3.0
+ /// assert_eq!(it.next(), Some(true)); // 3.0 <= 8.5
+ /// assert_eq!(it.next(), Some(true)); // 8.5 <= 8.5
+ /// assert_eq!(it.next(), Some(false)); // 8.5 <= NAN
+ /// assert_eq!(it.next(), None);
+ /// ```
+ ///
+ /// For non-fused iterators, they are fused after `map_windows`.
+ ///
+ /// ```
+ /// #![feature(iter_map_windows)]
+ ///
+ /// #[derive(Default)]
+ /// struct NonFusedIterator {
+ /// state: i32,
+ /// }
+ ///
+ /// impl Iterator for NonFusedIterator {
+ /// type Item = i32;
+ ///
+ /// fn next(&mut self) -> Option<i32> {
+ /// let val = self.state;
+ /// self.state = self.state + 1;
+ ///
+ /// // yields `0..5` first, then only even numbers since `6..`.
+ /// if val < 5 || val % 2 == 0 {
+ /// Some(val)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// let mut iter = NonFusedIterator::default();
+ ///
+ /// // yields 0..5 first.
+ /// assert_eq!(iter.next(), Some(0));
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), Some(3));
+ /// assert_eq!(iter.next(), Some(4));
+ /// // then we can see our iterator going back and forth
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), Some(6));
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), Some(8));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // however, with `.map_windows()`, it is fused.
+ /// let mut iter = NonFusedIterator::default()
+ /// .map_windows(|arr: &[_; 2]| *arr);
+ ///
+ /// assert_eq!(iter.next(), Some([0, 1]));
+ /// assert_eq!(iter.next(), Some([1, 2]));
+ /// assert_eq!(iter.next(), Some([2, 3]));
+ /// assert_eq!(iter.next(), Some([3, 4]));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // it will always return `None` after the first time.
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "iter_map_windows", reason = "recently added", issue = "87155")]
+ #[rustc_do_not_const_check]
+ fn map_windows<F, R, const N: usize>(self, f: F) -> MapWindows<Self, F, N>
+ where
+ Self: Sized,
+ F: FnMut(&[Self::Item; N]) -> R,
+ {
+ MapWindows::new(self, f)
+ }
+
/// Creates an iterator which ends after the first [`None`].
///
/// After an iterator returns [`None`], future calls may or may not yield
@@ -4018,4 +4176,66 @@ impl<I: Iterator + ?Sized> Iterator for &mut I {
fn nth(&mut self, n: usize) -> Option<Self::Item> {
(**self).nth(n)
}
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.spec_fold(init, f)
+ }
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.spec_try_fold(init, f)
+ }
+}
+
+/// Helper trait to specialize `fold` and `try_fold` for `&mut I where I: Sized`
+trait IteratorRefSpec: Iterator {
+ fn spec_fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B;
+
+ fn spec_try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>;
+}
+
+impl<I: Iterator + ?Sized> IteratorRefSpec for &mut I {
+ default fn spec_fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+}
+
+impl<I: Iterator> IteratorRefSpec for &mut I {
+ impl_fold_via_try_fold! { spec_fold -> spec_try_fold }
+
+ fn spec_try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ (**self).try_fold(init, f)
+ }
}
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 05876f5fc..a2729b374 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -51,7 +51,7 @@
#![cfg(not(test))]
// To run core tests without x.py without ending up with two copies of core, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
+// rustc itself never sets the feature, so this line has no effect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![stable(feature = "core", since = "1.6.0")]
#![doc(
@@ -96,6 +96,9 @@
#![allow(explicit_outlives_requirements)]
#![allow(incomplete_features)]
#![warn(multiple_supertrait_upcastable)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+// Do not check link redundancy on bootstraping phase
+#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
//
// Library features:
// tidy-alphabetical-start
@@ -165,6 +168,7 @@
#![feature(duration_consts_float)]
#![feature(internal_impls_macro)]
#![feature(ip)]
+#![feature(ip_bits)]
#![feature(is_ascii_octdigit)]
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
@@ -398,7 +402,8 @@ pub mod primitive;
missing_debug_implementations,
dead_code,
unused_imports,
- unsafe_op_in_unsafe_fn
+ unsafe_op_in_unsafe_fn,
+ ambiguous_glob_reexports
)]
#[allow(rustdoc::bare_urls)]
// FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
@@ -417,7 +422,7 @@ pub mod arch;
// set up in such a way that directly pulling it here works such that the
// crate uses this crate as its core.
#[path = "../../portable-simd/crates/core_simd/src/mod.rs"]
-#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn)]
#[allow(rustdoc::bare_urls)]
#[unstable(feature = "portable_simd", issue = "86656")]
mod core_simd;
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 45e5b7627..14cc523b0 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -312,7 +312,6 @@ macro_rules! debug_assert_ne {
/// let c = Ok("abc".to_string());
/// debug_assert_matches!(c, Ok(x) | Err(x) if x.len() < 100);
/// ```
-#[macro_export]
#[unstable(feature = "assert_matches", issue = "82775")]
#[allow_internal_unstable(assert_matches)]
#[rustc_macro_transparency = "semitransparent"]
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index e251015dd..5ec751e51 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -140,8 +140,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
#[rustc_coinductive]
pub trait Sized {
// Empty.
@@ -174,8 +173,7 @@ pub trait Sized {
/// [nomicon-coerce]: ../../nomicon/coercions.html
#[unstable(feature = "unsize", issue = "18598")]
#[lang = "unsize"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -856,8 +854,7 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -962,8 +959,7 @@ marker_impls! {
#[unstable(feature = "const_trait_impl", issue = "67792")]
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
#[const_trait]
pub trait Destruct {}
@@ -974,8 +970,7 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
pub trait Tuple {}
/// A marker for pointer-like types.
@@ -1020,7 +1015,6 @@ marker_impls! {
// FIXME(adt_const_params): Add to marker_impls call above once not in bootstrap
#[unstable(feature = "adt_const_params", issue = "95174")]
-#[cfg(not(bootstrap))]
impl ConstParamTy for () {}
/// A common trait implemented by all function pointers.
@@ -1030,8 +1024,7 @@ impl ConstParamTy for () {}
reason = "internal trait for implementing various traits for all function pointers"
)]
#[lang = "fn_ptr_trait"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
pub trait FnPtr: Copy + Clone {
/// Returns the address of the function pointer.
#[lang = "fn_ptr_addr"]
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
index 3805d149b..f5cc86e77 100644
--- a/library/core/src/mem/transmutability.rs
+++ b/library/core/src/mem/transmutability.rs
@@ -7,8 +7,8 @@ use crate::marker::ConstParamTy;
/// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied.
#[unstable(feature = "transmutability", issue = "99571")]
#[lang = "transmute_trait"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
+#[rustc_coinductive]
pub unsafe trait BikeshedIntrinsicFrom<Src, Context, const ASSUME: Assume = { Assume::NOTHING }>
where
Src: ?Sized,
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index c51913fa8..56460c75e 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -450,6 +450,57 @@ impl Ipv4Addr {
Ipv4Addr { octets: [a, b, c, d] }
}
+ /// The size of an IPv4 address in bits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// assert_eq!(Ipv4Addr::BITS, 32);
+ /// ```
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ pub const BITS: u32 = 32;
+
+ /// Converts an IPv4 address into host byte order `u32`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
+ /// assert_eq!(0x12345678, addr.to_bits());
+ /// ```
+ #[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ #[must_use]
+ #[inline]
+ pub const fn to_bits(self) -> u32 {
+ u32::from_be_bytes(self.octets)
+ }
+
+ /// Converts a host byte order `u32` into an IPv4 address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::from(0x12345678);
+ /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr);
+ /// ```
+ #[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ #[must_use]
+ #[inline]
+ pub const fn from_bits(bits: u32) -> Ipv4Addr {
+ Ipv4Addr { octets: bits.to_be_bytes() }
+ }
+
/// An IPv4 address with the address pointing to localhost: `127.0.0.1`
///
/// # Examples
@@ -1069,37 +1120,19 @@ impl Ord for Ipv4Addr {
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<Ipv4Addr> for u32 {
- /// Converts an `Ipv4Addr` into a host byte order `u32`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
- /// assert_eq!(0x12345678, u32::from(addr));
- /// ```
+ /// Uses [`Ipv4Addr::to_bits`] to convert an IPv4 address to a host byte order `u32`.
#[inline]
fn from(ip: Ipv4Addr) -> u32 {
- u32::from_be_bytes(ip.octets)
+ ip.to_bits()
}
}
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<u32> for Ipv4Addr {
- /// Converts a host byte order `u32` into an `Ipv4Addr`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv4Addr;
- ///
- /// let addr = Ipv4Addr::from(0x12345678);
- /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr);
- /// ```
+ /// Uses [`Ipv4Addr::from_bits`] to convert a host byte order `u32` into an IPv4 address.
#[inline]
fn from(ip: u32) -> Ipv4Addr {
- Ipv4Addr { octets: ip.to_be_bytes() }
+ Ipv4Addr::from_bits(ip)
}
}
@@ -1173,6 +1206,65 @@ impl Ipv6Addr {
}
}
+ /// The size of an IPv6 address in bits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// assert_eq!(Ipv6Addr::BITS, 128);
+ /// ```
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ pub const BITS: u32 = 128;
+
+ /// Converts an IPv6 address into host byte order `u128`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// );
+ /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
+ /// ```
+ #[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ #[must_use]
+ #[inline]
+ pub const fn to_bits(self) -> u128 {
+ u128::from_be_bytes(self.octets)
+ }
+
+ /// Converts a host byte order `u128` into an IPv6 address.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// ),
+ /// addr);
+ /// ```
+ #[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
+ #[unstable(feature = "ip_bits", issue = "113744")]
+ #[must_use]
+ #[inline]
+ pub const fn from_bits(bits: u128) -> Ipv6Addr {
+ Ipv6Addr { octets: bits.to_be_bytes() }
+ }
+
/// An IPv6 address representing localhost: `::1`.
///
/// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other
@@ -1905,44 +1997,18 @@ impl Ord for Ipv6Addr {
#[stable(feature = "i128", since = "1.26.0")]
impl From<Ipv6Addr> for u128 {
- /// Convert an `Ipv6Addr` into a host byte order `u128`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::new(
- /// 0x1020, 0x3040, 0x5060, 0x7080,
- /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
- /// );
- /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
- /// ```
+ /// Uses [`Ipv6Addr::to_bits`] to convert an IPv6 address to a host byte order `u128`.
#[inline]
fn from(ip: Ipv6Addr) -> u128 {
- u128::from_be_bytes(ip.octets)
+ ip.to_bits()
}
}
#[stable(feature = "i128", since = "1.26.0")]
impl From<u128> for Ipv6Addr {
- /// Convert a host byte order `u128` into an `Ipv6Addr`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::net::Ipv6Addr;
- ///
- /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
- /// assert_eq!(
- /// Ipv6Addr::new(
- /// 0x1020, 0x3040, 0x5060, 0x7080,
- /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
- /// ),
- /// addr);
- /// ```
+ /// Uses [`Ipv6Addr::from_bits`] to convert a host byte order `u128` to an IPv6 address.
#[inline]
fn from(ip: u128) -> Ipv6Addr {
- Ipv6Addr::from(ip.to_be_bytes())
+ Ipv6Addr::from_bits(ip)
}
}
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 1199d09b5..1f43520e1 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -2126,6 +2126,7 @@ macro_rules! int_impl {
/// assert_eq!(a.rem_euclid(-b), 3);
/// assert_eq!((-a).rem_euclid(-b), 1);
/// ```
+ #[doc(alias = "modulo", alias = "mod")]
#[stable(feature = "euclidean_division", since = "1.38.0")]
#[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
#[must_use = "this returns the result of the operation, \
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 6f6b6dbb8..23ca37817 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -2024,6 +2024,7 @@ macro_rules! uint_impl {
/// ```
#[doc = concat!("assert_eq!(7", stringify!($SelfT), ".rem_euclid(4), 3); // or any other integer type")]
/// ```
+ #[doc(alias = "modulo", alias = "mod")]
#[stable(feature = "euclidean_division", since = "1.38.0")]
#[rustc_const_stable(feature = "const_euclidean_int_methods", since = "1.52.0")]
#[must_use = "this returns the result of the operation, \
@@ -2074,10 +2075,10 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// #![feature(int_roundings)]
#[doc = concat!("assert_eq!(7_", stringify!($SelfT), ".div_ceil(4), 2);")]
/// ```
- #[unstable(feature = "int_roundings", issue = "88581")]
+ #[stable(feature = "int_roundings1", since = "1.73.0")]
+ #[rustc_const_stable(feature = "int_roundings1", since = "1.73.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2109,11 +2110,11 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// #![feature(int_roundings)]
#[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".next_multiple_of(8), 16);")]
#[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".next_multiple_of(8), 24);")]
/// ```
- #[unstable(feature = "int_roundings", issue = "88581")]
+ #[stable(feature = "int_roundings1", since = "1.73.0")]
+ #[rustc_const_stable(feature = "int_roundings1", since = "1.73.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2134,13 +2135,13 @@ macro_rules! uint_impl {
/// Basic usage:
///
/// ```
- /// #![feature(int_roundings)]
#[doc = concat!("assert_eq!(16_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(16));")]
#[doc = concat!("assert_eq!(23_", stringify!($SelfT), ".checked_next_multiple_of(8), Some(24));")]
#[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".checked_next_multiple_of(0), None);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.checked_next_multiple_of(2), None);")]
/// ```
- #[unstable(feature = "int_roundings", issue = "88581")]
+ #[stable(feature = "int_roundings1", since = "1.73.0")]
+ #[rustc_const_stable(feature = "int_roundings1", since = "1.73.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 9b6ff76b2..becb63309 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -1125,6 +1125,7 @@ impl<T> Option<T> {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "if you don't need the returned value, use `if let` instead"]
pub fn map_or<U, F>(self, default: U, f: F) -> U
where
F: FnOnce(T) -> U,
@@ -1697,6 +1698,41 @@ impl<T> Option<T> {
mem::replace(self, None)
}
+ /// Takes the value out of the option, but only if the predicate evaluates to
+ /// `true` on a mutable reference to the value.
+ ///
+ /// In other words, replaces `self` with `None` if the predicate returns `true`.
+ /// This method operates similar to [`Option::take`] but conditional.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_take_if)]
+ ///
+ /// let mut x = Some(42);
+ ///
+ /// let prev = x.take_if(|v| if *v == 42 {
+ /// *v += 1;
+ /// false
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(x, Some(43));
+ /// assert_eq!(prev, None);
+ ///
+ /// let prev = x.take_if(|v| *v == 43);
+ /// assert_eq!(x, None);
+ /// assert_eq!(prev, Some(43));
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_take_if", issue = "98934")]
+ pub fn take_if<P>(&mut self, predicate: P) -> Option<T>
+ where
+ P: FnOnce(&mut T) -> bool,
+ {
+ if self.as_mut().map_or(false, predicate) { self.take() } else { None }
+ }
+
/// Replaces the actual value in the option by the value given in parameter,
/// returning the old value if present,
/// leaving a [`Some`] in its place without deinitializing either one.
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
index 5576adde8..c7f04f11e 100644
--- a/library/core/src/panic/panic_info.rs
+++ b/library/core/src/panic/panic_info.rs
@@ -147,16 +147,18 @@ impl<'a> PanicInfo<'a> {
impl fmt::Display for PanicInfo<'_> {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("panicked at ")?;
+ self.location.fmt(formatter)?;
if let Some(message) = self.message {
- write!(formatter, "'{}', ", message)?
+ formatter.write_str(":\n")?;
+ formatter.write_fmt(*message)?;
} else if let Some(payload) = self.payload.downcast_ref::<&'static str>() {
- write!(formatter, "'{}', ", payload)?
+ formatter.write_str(":\n")?;
+ formatter.write_str(payload)?;
}
// NOTE: we cannot use downcast_ref::<String>() here
// since String is not available in core!
// The payload is a String when `std::panic!` is called with multiple arguments,
// but in that case the message is also available.
-
- self.location.fmt(formatter)
+ Ok(())
}
}
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index f0fcdab00..7b6249207 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -267,16 +267,14 @@ fn assert_failed_inner(
match args {
Some(args) => panic!(
- r#"assertion failed: `(left {} right)`
- left: `{:?}`,
- right: `{:?}`: {}"#,
- op, left, right, args
+ r#"assertion `left {op} right` failed: {args}
+ left: {left:?}
+ right: {right:?}"#
),
None => panic!(
- r#"assertion failed: `(left {} right)`
- left: `{:?}`,
- right: `{:?}`"#,
- op, left, right,
+ r#"assertion `left {op} right` failed
+ left: {left:?}
+ right: {right:?}"#
),
}
}
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 926189a17..ee69d89a4 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,7 +1,7 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::intrinsics::{self, const_eval_select};
-use crate::mem;
+use crate::mem::{self, SizedTypeProperties};
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *const T {
@@ -30,6 +30,7 @@ impl<T: ?Sized> *const T {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[rustc_diagnostic_item = "ptr_const_is_null"]
#[inline]
pub const fn is_null(self) -> bool {
#[inline]
@@ -54,6 +55,7 @@ impl<T: ?Sized> *const T {
/// Casts to a pointer of another type.
#[stable(feature = "ptr_cast", since = "1.38.0")]
#[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[rustc_diagnostic_item = "const_ptr_cast"]
#[inline(always)]
pub const fn cast<U>(self) -> *const U {
self as _
@@ -994,14 +996,23 @@ impl<T: ?Sized> *const T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ // We could always go back to wrapping if unchecked becomes unacceptable
+ #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
T: Sized,
{
- // SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe { self.offset((count as isize).wrapping_neg()) }
+ if T::IS_ZST {
+ // Pointer arithmetic does nothing when the pointee is a ZST.
+ self
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Because the pointee is *not* a ZST, that means that `count` is
+ // at most `isize::MAX`, and thus the negation cannot overflow.
+ unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
+ }
}
/// Calculates the offset from a pointer in bytes (convenience for
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index daaa44b1d..040aa0697 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,8 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
-#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl(implement_via_object = false)]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index acc9ca29d..5f094ac4e 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -710,6 +710,7 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[rustc_diagnostic_item = "ptr_from_mut"]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
}
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index c6f438578..9dbb3f9d3 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,7 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::intrinsics::{self, const_eval_select};
+use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *mut T {
@@ -29,6 +30,7 @@ impl<T: ?Sized> *mut T {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[rustc_diagnostic_item = "ptr_is_null"]
#[inline]
pub const fn is_null(self) -> bool {
#[inline]
@@ -53,6 +55,7 @@ impl<T: ?Sized> *mut T {
/// Casts to a pointer of another type.
#[stable(feature = "ptr_cast", since = "1.38.0")]
#[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[rustc_diagnostic_item = "ptr_cast"]
#[inline(always)]
pub const fn cast<U>(self) -> *mut U {
self as _
@@ -109,6 +112,7 @@ impl<T: ?Sized> *mut T {
/// [`cast_mut`]: #method.cast_mut
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_diagnostic_item = "ptr_cast_const"]
#[inline(always)]
pub const fn cast_const(self) -> *const T {
self as _
@@ -1093,14 +1097,23 @@ impl<T: ?Sized> *mut T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ // We could always go back to wrapping if unchecked becomes unacceptable
+ #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
T: Sized,
{
- // SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe { self.offset((count as isize).wrapping_neg()) }
+ if T::IS_ZST {
+ // Pointer arithmetic does nothing when the pointee is a ZST.
+ self
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Because the pointee is *not* a ZST, that means that `count` is
+ // at most `isize::MAX`, and thus the negation cannot overflow.
+ unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
+ }
}
/// Calculates the offset from a pointer in bytes (convenience for
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index b492d2f07..e0fd347a0 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -367,13 +367,14 @@ impl<T: ?Sized> NonNull<T> {
///
/// [the module documentation]: crate::ptr#safety
#[stable(feature = "nonnull", since = "1.25.0")]
- #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+ #[rustc_const_stable(feature = "const_nonnull_as_ref", since = "1.73.0")]
#[must_use]
#[inline(always)]
pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
- unsafe { &*self.as_ptr() }
+ // `cast_const` avoids a mutable raw pointer deref.
+ unsafe { &*self.as_ptr().cast_const() }
}
/// Returns a unique reference to the value. If the value may be uninitialized, [`as_uninit_mut`]
@@ -462,6 +463,30 @@ impl<T: ?Sized> NonNull<T> {
// And the caller promised the `delta` is sound to add.
unsafe { NonNull { pointer: self.pointer.add(delta) } }
}
+
+ /// See [`pointer::sub`] for semantics and safety requirements.
+ #[inline]
+ pub(crate) const unsafe fn sub(self, delta: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: We require that the delta stays in-bounds of the object, and
+ // thus it cannot become null, as no legal objects can be allocated
+ // in such as way that the null address is part of them.
+ // And the caller promised the `delta` is sound to subtract.
+ unsafe { NonNull { pointer: self.pointer.sub(delta) } }
+ }
+
+ /// See [`pointer::sub_ptr`] for semantics and safety requirements.
+ #[inline]
+ pub(crate) const unsafe fn sub_ptr(self, subtrahend: Self) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: The caller promised that this is safe to do, and
+ // the non-nullness is irrelevant to the operation.
+ unsafe { self.pointer.sub_ptr(subtrahend.pointer) }
+ }
}
impl<T> NonNull<[T]> {
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index ff7e91d3e..bf8b86677 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -33,7 +33,7 @@ use crate::ptr::NonNull;
#[doc(hidden)]
#[repr(transparent)]
// Lang item used experimentally by Miri to define the semantics of `Unique`.
-#[cfg_attr(not(bootstrap), lang = "ptr_unique")]
+#[lang = "ptr_unique"]
pub struct Unique<T: ?Sized> {
pointer: NonNull<T>,
// NOTE: this marker has no consequences for variance, but is necessary
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 1ee270f4c..6981abc9b 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -749,7 +749,7 @@ impl<T, E> Result<T, E> {
}
/// Returns the provided default (if [`Err`]), or
- /// applies a function to the contained value (if [`Ok`]),
+ /// applies a function to the contained value (if [`Ok`]).
///
/// Arguments passed to `map_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`map_or_else`],
@@ -768,6 +768,7 @@ impl<T, E> Result<T, E> {
/// ```
#[inline]
#[stable(feature = "result_map_or", since = "1.41.0")]
+ #[must_use = "if you don't need the returned value, use `if let` instead"]
pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
match self {
Ok(t) => f(t),
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
index 7601dd3c7..075347b80 100644
--- a/library/core/src/slice/cmp.rs
+++ b/library/core/src/slice/cmp.rs
@@ -1,22 +1,12 @@
//! Comparison traits for `[T]`.
use crate::cmp::{self, BytewiseEq, Ordering};
-use crate::ffi;
+use crate::intrinsics::compare_bytes;
use crate::mem;
use super::from_raw_parts;
use super::memchr;
-extern "C" {
- /// Calls implementation provided memcmp.
- ///
- /// Interprets the data as u8.
- ///
- /// Returns 0 for equal, < 0 for less than and > 0 for greater
- /// than.
- fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> ffi::c_int;
-}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B> PartialEq<[B]> for [A]
where
@@ -74,7 +64,8 @@ where
}
}
-// Use memcmp for bytewise equality when the types allow
+// When each element can be compared byte-wise, we can compare all the bytes
+// from the whole size in one call to the intrinsics.
impl<A, B> SlicePartialEq<B> for [A]
where
A: BytewiseEq<B>,
@@ -88,7 +79,7 @@ where
// The two slices have been checked to have the same size above.
unsafe {
let size = mem::size_of_val(self);
- memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
+ compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
}
}
}
@@ -183,7 +174,7 @@ impl<A: Ord> SliceOrd for A {
}
}
-// memcmp compares a sequence of unsigned bytes lexicographically.
+// `compare_bytes` compares a sequence of unsigned bytes lexicographically.
// this matches the order we want for [u8], but no others (not even [i8]).
impl SliceOrd for u8 {
#[inline]
@@ -195,7 +186,7 @@ impl SliceOrd for u8 {
// SAFETY: `left` and `right` are references and are thus guaranteed to be valid.
// We use the minimum of both lengths which guarantees that both regions are
// valid for reads in that interval.
- let mut order = unsafe { memcmp(left.as_ptr(), right.as_ptr(), len) as isize };
+ let mut order = unsafe { compare_bytes(left.as_ptr(), right.as_ptr(), len) as isize };
if order == 0 {
order = diff;
}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index e1e3bcc05..d313e8e01 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -727,7 +727,7 @@ where
}
/// Convert pair of `ops::Bound`s into `ops::Range` without performing any bounds checking and (in debug) overflow checking
-fn into_range_unchecked(
+pub(crate) fn into_range_unchecked(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
@@ -747,7 +747,7 @@ fn into_range_unchecked(
/// Convert pair of `ops::Bound`s into `ops::Range`.
/// Returns `None` on overflowing indices.
-fn into_range(
+pub(crate) fn into_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> Option<ops::Range<usize>> {
@@ -772,7 +772,7 @@ fn into_range(
/// Convert pair of `ops::Bound`s into `ops::Range`.
/// Panics on overflowing indices.
-fn into_slice_range(
+pub(crate) fn into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 5369fe0a9..cc9313553 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -13,7 +13,7 @@ use crate::iter::{
use crate::marker::{PhantomData, Send, Sized, Sync};
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
-use crate::ptr::{invalid, invalid_mut, NonNull};
+use crate::ptr::{self, invalid, invalid_mut, NonNull};
use super::{from_raw_parts, from_raw_parts_mut};
@@ -68,7 +68,7 @@ pub struct Iter<'a, T: 'a> {
/// For non-ZSTs, the non-null pointer to the past-the-end element.
///
/// For ZSTs, this is `ptr::invalid(len)`.
- end: *const T,
+ end_or_len: *const T,
_marker: PhantomData<&'a T>,
}
@@ -90,9 +90,9 @@ impl<'a, T> Iter<'a, T> {
let ptr = slice.as_ptr();
// SAFETY: Similar to `IterMut::new`.
unsafe {
- let end = if T::IS_ZST { invalid(slice.len()) } else { ptr.add(slice.len()) };
+ let end_or_len = if T::IS_ZST { invalid(slice.len()) } else { ptr.add(slice.len()) };
- Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
+ Self { ptr: NonNull::new_unchecked(ptr as *mut T), end_or_len, _marker: PhantomData }
}
}
@@ -128,7 +128,7 @@ impl<'a, T> Iter<'a, T> {
}
}
-iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
+iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, as_ref, {
fn is_sorted_by<F>(self, mut compare: F) -> bool
where
Self: Sized,
@@ -142,7 +142,7 @@ iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
impl<T> Clone for Iter<'_, T> {
#[inline]
fn clone(&self) -> Self {
- Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
+ Iter { ptr: self.ptr, end_or_len: self.end_or_len, _marker: self._marker }
}
}
@@ -189,7 +189,7 @@ pub struct IterMut<'a, T: 'a> {
/// For non-ZSTs, the non-null pointer to the past-the-end element.
///
/// For ZSTs, this is `ptr::invalid_mut(len)`.
- end: *mut T,
+ end_or_len: *mut T,
_marker: PhantomData<&'a mut T>,
}
@@ -220,15 +220,16 @@ impl<'a, T> IterMut<'a, T> {
// for direct pointer equality with `ptr` to check if the iterator is
// done.
//
- // In the case of a ZST, the end pointer is just the start pointer plus
- // the length, to also allows for the fast `ptr == end` check.
+ // In the case of a ZST, the end pointer is just the length. It's never
+ // used as a pointer at all, and thus it's fine to have no provenance.
//
// See the `next_unchecked!` and `is_empty!` macros as well as the
// `post_inc_start` method for more information.
unsafe {
- let end = if T::IS_ZST { invalid_mut(slice.len()) } else { ptr.add(slice.len()) };
+ let end_or_len =
+ if T::IS_ZST { invalid_mut(slice.len()) } else { ptr.add(slice.len()) };
- Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
+ Self { ptr: NonNull::new_unchecked(ptr), end_or_len, _marker: PhantomData }
}
}
@@ -360,7 +361,7 @@ impl<T> AsRef<[T]> for IterMut<'_, T> {
// }
// }
-iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
+iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, as_mut, {}}
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index 96a145e22..95bcd123b 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -1,45 +1,62 @@
//! Macros used by iterators of slice.
-// Shrinks the iterator when T is a ZST, setting the length to `new_len`.
-// `new_len` must not exceed `self.len()`.
-macro_rules! zst_set_len {
- ($self: ident, $new_len: expr) => {{
+/// Convenience & performance macro for consuming the `end_or_len` field, by
+/// giving a `(&mut) usize` or `(&mut) NonNull<T>` depending whether `T` is
+/// or is not a ZST respectively.
+///
+/// Internally, this reads the `end` through a pointer-to-`NonNull` so that
+/// it'll get the appropriate non-null metadata in the backend without needing
+/// to call `assume` manually.
+macro_rules! if_zst {
+ (mut $this:ident, $len:ident => $zst_body:expr, $end:ident => $other_body:expr,) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
- // SAFETY: same as `invalid(_mut)`, but the macro doesn't know
- // which versions of that function to call, so open-code it.
- $self.end = unsafe { mem::transmute::<usize, _>($new_len) };
+ if T::IS_ZST {
+ // SAFETY: for ZSTs, the pointer is storing a provenance-free length,
+ // so consuming and updating it as a `usize` is fine.
+ let $len = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<usize>() };
+ $zst_body
+ } else {
+ // SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
+ let $end = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<NonNull<T>>() };
+ $other_body
+ }
}};
-}
+ ($this:ident, $len:ident => $zst_body:expr, $end:ident => $other_body:expr,) => {{
+ #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
-// Shrinks the iterator when T is a ZST, reducing the length by `n`.
-// `n` must not exceed `self.len()`.
-macro_rules! zst_shrink {
- ($self: ident, $n: ident) => {
- let new_len = $self.end.addr() - $n;
- zst_set_len!($self, new_len);
- };
+ if T::IS_ZST {
+ let $len = $this.end_or_len.addr();
+ $zst_body
+ } else {
+ // SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
+ let $end = unsafe { *ptr::addr_of!($this.end_or_len).cast::<NonNull<T>>() };
+ $other_body
+ }
+ }};
}
// Inlining is_empty and len makes a huge performance difference
macro_rules! is_empty {
($self: ident) => {
- if T::IS_ZST { $self.end.addr() == 0 } else { $self.ptr.as_ptr() as *const _ == $self.end }
+ if_zst!($self,
+ len => len == 0,
+ end => $self.ptr == end,
+ )
};
}
macro_rules! len {
($self: ident) => {{
- #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
-
- if T::IS_ZST {
- $self.end.addr()
- } else {
- // To get rid of some bounds checks (see `position`), we use ptr_sub instead of
- // offset_from (Tested by `codegen/slice-position-bounds-check`.)
- // SAFETY: by the type invariant pointers are aligned and `start <= end`
- unsafe { $self.end.sub_ptr($self.ptr.as_ptr()) }
- }
+ if_zst!($self,
+ len => len,
+ end => {
+ // To get rid of some bounds checks (see `position`), we use ptr_sub instead of
+ // offset_from (Tested by `codegen/slice-position-bounds-check`.)
+ // SAFETY: by the type invariant pointers are aligned and `start <= end`
+ unsafe { end.sub_ptr($self.ptr) }
+ },
+ )
}};
}
@@ -50,20 +67,21 @@ macro_rules! iterator {
$elem:ty,
$raw_mut:tt,
{$( $mut_:tt )?},
+ $into_ref:ident,
{$($extra:tt)*}
) => {
// Returns the first element and moves the start of the iterator forwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_unchecked {
- ($self: ident) => {& $( $mut_ )? *$self.post_inc_start(1)}
+ ($self: ident) => { $self.post_inc_start(1).$into_ref() }
}
// Returns the last element and moves the end of the iterator backwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_back_unchecked {
- ($self: ident) => {& $( $mut_ )? *$self.pre_dec_end(1)}
+ ($self: ident) => { $self.pre_dec_end(1).$into_ref() }
}
impl<'a, T> $name<'a, T> {
@@ -80,33 +98,40 @@ macro_rules! iterator {
// returning the old start.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn post_inc_start(&mut self, offset: usize) -> * $raw_mut T {
+ unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
- if T::IS_ZST {
- zst_shrink!(self, offset);
- } else {
- // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
- // so this new pointer is inside `self` and thus guaranteed to be non-null.
- self.ptr = unsafe { self.ptr.add(offset) };
+
+ // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+ // so this new pointer is inside `self` and thus guaranteed to be non-null.
+ unsafe {
+ if_zst!(mut self,
+ len => *len = len.unchecked_sub(offset),
+ _end => self.ptr = self.ptr.add(offset),
+ );
}
- old.as_ptr()
+ old
}
// Helper function for moving the end of the iterator backwards by `offset` elements,
// returning the new end.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T {
- if T::IS_ZST {
- zst_shrink!(self, offset);
- self.ptr.as_ptr()
- } else {
+ unsafe fn pre_dec_end(&mut self, offset: usize) -> NonNull<T> {
+ if_zst!(mut self,
+ // SAFETY: By our precondition, `offset` can be at most the
+ // current length, so the subtraction can never overflow.
+ len => unsafe {
+ *len = len.unchecked_sub(offset);
+ self.ptr
+ },
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
- self.end = unsafe { self.end.sub(offset) };
- self.end
- }
+ end => unsafe {
+ *end = end.sub(offset);
+ *end
+ },
+ )
}
}
@@ -131,13 +156,9 @@ macro_rules! iterator {
fn next(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
- // SAFETY: `assume` call is safe because slices over non-ZSTs must
- // have a non-null end pointer. The call to `next_unchecked!` is
+ // SAFETY: The call to `next_unchecked!` is
// safe since we check if the iterator is empty first.
unsafe {
- if !<T>::IS_ZST {
- assume(!self.end.is_null());
- }
if is_empty!(self) {
None
} else {
@@ -161,14 +182,10 @@ macro_rules! iterator {
fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
- if T::IS_ZST {
- zst_set_len!(self, 0);
- } else {
- // SAFETY: end can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
- unsafe {
- self.ptr = NonNull::new_unchecked(self.end as *mut T);
- }
- }
+ if_zst!(mut self,
+ len => *len = 0,
+ end => self.ptr = *end,
+ );
return None;
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
@@ -375,13 +392,9 @@ macro_rules! iterator {
fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
- // SAFETY: `assume` call is safe because slices over non-ZSTs must
- // have a non-null end pointer. The call to `next_back_unchecked!`
+ // SAFETY: The call to `next_back_unchecked!`
// is safe since we check if the iterator is empty first.
unsafe {
- if !<T>::IS_ZST {
- assume(!self.end.is_null());
- }
if is_empty!(self) {
None
} else {
@@ -394,11 +407,10 @@ macro_rules! iterator {
fn nth_back(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
- if T::IS_ZST {
- zst_set_len!(self, 0);
- } else {
- self.end = self.ptr.as_ptr();
- }
+ if_zst!(mut self,
+ len => *len = 0,
+ end => *end = self.ptr,
+ );
return None;
}
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index e2a2428fb..d95662afd 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -38,7 +38,7 @@ pub mod sort;
mod ascii;
mod cmp;
-mod index;
+pub(crate) mod index;
mod iter;
mod raw;
mod rotate;
@@ -2957,7 +2957,7 @@ impl<T> [T] {
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
- /// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
+ /// (i.e., does not allocate), and *O*(*m* \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index 772c36055..cd16810c4 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -1439,11 +1439,22 @@ impl<'a> Iterator for EncodeUtf16<'a> {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let (low, high) = self.chars.size_hint();
- // every char gets either one u16 or two u16,
- // so this iterator is between 1 or 2 times as
- // long as the underlying iterator.
- (low, high.and_then(|n| n.checked_mul(2)))
+ let len = self.chars.iter.len();
+ // The highest bytes:code units ratio occurs for 3-byte sequences,
+ // since a 4-byte sequence results in 2 code units. The lower bound
+ // is therefore determined by assuming the remaining bytes contain as
+ // many 3-byte sequences as possible. The highest bytes:code units
+ // ratio is for 1-byte sequences, so use this for the upper bound.
+ // `(len + 2)` can't overflow, because we know that the `slice::Iter`
+ // belongs to a slice in memory which has a maximum length of
+ // `isize::MAX` (that's well below `usize::MAX`)
+ if self.extra == 0 {
+ ((len + 2) / 3, Some(len))
+ } else {
+ // We're in the middle of a surrogate pair, so add the remaining
+ // surrogate to the bounds.
+ ((len + 2) / 3 + 1, Some(len + 1))
+ }
}
}
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 9a93bb729..e5f34952c 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -267,14 +267,13 @@ impl str {
/// Finds the closest `x` not below `index` where `is_char_boundary(x)` is `true`.
///
+ /// If `index` is greater than the length of the string, this returns the length of the string.
+ ///
/// This method is the natural complement to [`floor_char_boundary`]. See that method
/// for more details.
///
/// [`floor_char_boundary`]: str::floor_char_boundary
///
- /// # Panics
- ///
- /// Panics if `index > self.len()`.
///
/// # Examples
///
@@ -292,7 +291,7 @@ impl str {
#[inline]
pub fn ceil_char_boundary(&self, index: usize) -> usize {
if index > self.len() {
- slice_error_fail(self, index, index)
+ self.len()
} else {
let upper_bound = Ord::min(index + 4, self.len());
self.as_bytes()[index..upper_bound]
@@ -952,6 +951,10 @@ impl str {
///
/// Line terminators are not included in the lines returned by the iterator.
///
+ /// Note that any carriage return (`\r`) not immediately followed by a
+ /// line feed (`\n`) does not split a line. These carriage returns are
+ /// thereby included in the produced lines.
+ ///
/// The final line ending is optional. A string that ends with a final line
/// ending will return the same lines as an otherwise identical string
/// without a final line ending.
@@ -961,18 +964,19 @@ impl str {
/// Basic usage:
///
/// ```
- /// let text = "foo\r\nbar\n\nbaz\n";
+ /// let text = "foo\r\nbar\n\nbaz\r";
/// let mut lines = text.lines();
///
/// assert_eq!(Some("foo"), lines.next());
/// assert_eq!(Some("bar"), lines.next());
/// assert_eq!(Some(""), lines.next());
- /// assert_eq!(Some("baz"), lines.next());
+ /// // Trailing carriage return is included in the last line
+ /// assert_eq!(Some("baz\r"), lines.next());
///
/// assert_eq!(None, lines.next());
/// ```
///
- /// The final line ending isn't required:
+ /// The final line does not require any ending:
///
/// ```
/// let text = "foo\nbar\n\r\nbaz";
@@ -1666,7 +1670,7 @@ impl str {
/// If the pattern allows a reverse search but its results might differ
/// from a forward search, the [`rmatches`] method can be used.
///
- /// [`rmatches`]: str::matches
+ /// [`rmatches`]: str::rmatches
///
/// # Examples
///
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index 91ee2903a..d5d6d60ac 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -1750,7 +1750,9 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
1
} else {
// try a few bytes in case first and last byte of the needle are the same
- let Some(second_probe_offset) = (needle.len().saturating_sub(4)..needle.len()).rfind(|&idx| needle[idx] != first_probe) else {
+ let Some(second_probe_offset) =
+ (needle.len().saturating_sub(4)..needle.len()).rfind(|&idx| needle[idx] != first_probe)
+ else {
// fall back to other search methods if we can't find any different bytes
// since we could otherwise hit some degenerate cases
return None;
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 1d52335f2..2b37af66b 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -252,6 +252,58 @@ unsafe impl SliceIndex<str> for ops::Range<usize> {
}
}
+/// Implements substring slicing for arbitrary bounds.
+///
+/// Returns a slice of the given string bounded by the byte indices
+/// provided by each bound.
+///
+/// This operation is *O*(1).
+///
+/// # Panics
+///
+/// Panics if `begin` or `end` (if it exists and once adjusted for
+/// inclusion/exclusion) does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), if `begin > end`, or if
+/// `end > len`.
+#[stable(feature = "slice_index_str_with_ops_bound_pair", since = "1.73.0")]
+unsafe impl SliceIndex<str> for (ops::Bound<usize>, ops::Bound<usize>) {
+ type Output = str;
+
+ #[inline]
+ fn get(self, slice: &str) -> Option<&str> {
+ crate::slice::index::into_range(slice.len(), self)?.get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut str> {
+ crate::slice::index::into_range(slice.len(), self)?.get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const str {
+ let len = (slice as *const [u8]).len();
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { crate::slice::index::into_range_unchecked(len, self).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut str {
+ let len = (slice as *mut [u8]).len();
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { crate::slice::index::into_range_unchecked(len, self).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &str) -> &str {
+ crate::slice::index::into_slice_range(slice.len(), self).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut str {
+ crate::slice::index::into_slice_range(slice.len(), self).index_mut(slice)
+ }
+}
+
/// Implements substring slicing with syntax `&self[.. end]` or `&mut
/// self[.. end]`.
///
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 236b7f423..22a1c0978 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -131,6 +131,17 @@ use crate::intrinsics;
use crate::hint::spin_loop;
+// Some architectures don't have byte-sized atomics, which results in LLVM
+// emulating them using a LL/SC loop. However for AtomicBool we can take
+// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND
+// instead, which LLVM can emulate using a larger atomic OR/AND operation.
+//
+// This list should only contain architectures which have word-sized atomic-or/
+// atomic-and instructions but don't natively support byte-sized atomics.
+#[cfg(target_has_atomic = "8")]
+const EMULATE_ATOMIC_BOOL: bool =
+ cfg!(any(target_arch = "riscv32", target_arch = "riscv64", target_arch = "loongarch64"));
+
/// A boolean type which can be safely shared between threads.
///
/// This type has the same in-memory representation as a [`bool`].
@@ -553,8 +564,12 @@ impl AtomicBool {
#[cfg(target_has_atomic = "8")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn swap(&self, val: bool, order: Ordering) -> bool {
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
+ if EMULATE_ATOMIC_BOOL {
+ if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
+ } else {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
+ }
}
/// Stores a value into the [`bool`] if the current value is the same as the `current` value.
@@ -664,12 +679,39 @@ impl AtomicBool {
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
- // SAFETY: data races are prevented by atomic intrinsics.
- match unsafe {
- atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
- } {
- Ok(x) => Ok(x != 0),
- Err(x) => Err(x != 0),
+ if EMULATE_ATOMIC_BOOL {
+ // Pick the strongest ordering from success and failure.
+ let order = match (success, failure) {
+ (SeqCst, _) => SeqCst,
+ (_, SeqCst) => SeqCst,
+ (AcqRel, _) => AcqRel,
+ (_, AcqRel) => {
+ panic!("there is no such thing as an acquire-release failure ordering")
+ }
+ (Release, Acquire) => AcqRel,
+ (Acquire, _) => Acquire,
+ (_, Acquire) => Acquire,
+ (Release, Relaxed) => Release,
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
+ (Relaxed, Relaxed) => Relaxed,
+ };
+ let old = if current == new {
+ // This is a no-op, but we still need to perform the operation
+ // for memory ordering reasons.
+ self.fetch_or(false, order)
+ } else {
+ // This sets the value to the new one and returns the old one.
+ self.swap(new, order)
+ };
+ if old == current { Ok(old) } else { Err(old) }
+ } else {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ match unsafe {
+ atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
}
}
@@ -719,6 +761,10 @@ impl AtomicBool {
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
+ if EMULATE_ATOMIC_BOOL {
+ return self.compare_exchange(current, new, success, failure);
+ }
+
// SAFETY: data races are prevented by atomic intrinsics.
match unsafe {
atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
@@ -1958,14 +2004,12 @@ macro_rules! atomic_int {
$stable_from:meta,
$stable_nand:meta,
$const_stable:meta,
- $stable_init_const:meta,
$diagnostic_item:meta,
$s_int_type:literal,
$extra_feature:expr,
$min_fn:ident, $max_fn:ident,
$align:expr,
- $atomic_new:expr,
- $int_type:ident $atomic_type:ident $atomic_init:ident) => {
+ $int_type:ident $atomic_type:ident) => {
/// An integer type which can be safely shared between threads.
///
/// This type has the same in-memory representation as the underlying
@@ -1988,15 +2032,6 @@ macro_rules! atomic_int {
v: UnsafeCell<$int_type>,
}
- /// An atomic integer initialized to `0`.
- #[$stable_init_const]
- #[deprecated(
- since = "1.34.0",
- note = "the `new` function is now preferred",
- suggestion = $atomic_new,
- )]
- pub const $atomic_init: $atomic_type = $atomic_type::new(0);
-
#[$stable]
impl Default for $atomic_type {
#[inline]
@@ -2874,14 +2909,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI8"),
"i8",
"",
atomic_min, atomic_max,
1,
- "AtomicI8::new(0)",
- i8 AtomicI8 ATOMIC_I8_INIT
+ i8 AtomicI8
}
#[cfg(target_has_atomic_load_store = "8")]
atomic_int! {
@@ -2894,14 +2927,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU8"),
"u8",
"",
atomic_umin, atomic_umax,
1,
- "AtomicU8::new(0)",
- u8 AtomicU8 ATOMIC_U8_INIT
+ u8 AtomicU8
}
#[cfg(target_has_atomic_load_store = "16")]
atomic_int! {
@@ -2914,14 +2945,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI16"),
"i16",
"",
atomic_min, atomic_max,
2,
- "AtomicI16::new(0)",
- i16 AtomicI16 ATOMIC_I16_INIT
+ i16 AtomicI16
}
#[cfg(target_has_atomic_load_store = "16")]
atomic_int! {
@@ -2934,14 +2963,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU16"),
"u16",
"",
atomic_umin, atomic_umax,
2,
- "AtomicU16::new(0)",
- u16 AtomicU16 ATOMIC_U16_INIT
+ u16 AtomicU16
}
#[cfg(target_has_atomic_load_store = "32")]
atomic_int! {
@@ -2954,14 +2981,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI32"),
"i32",
"",
atomic_min, atomic_max,
4,
- "AtomicI32::new(0)",
- i32 AtomicI32 ATOMIC_I32_INIT
+ i32 AtomicI32
}
#[cfg(target_has_atomic_load_store = "32")]
atomic_int! {
@@ -2974,14 +2999,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU32"),
"u32",
"",
atomic_umin, atomic_umax,
4,
- "AtomicU32::new(0)",
- u32 AtomicU32 ATOMIC_U32_INIT
+ u32 AtomicU32
}
#[cfg(target_has_atomic_load_store = "64")]
atomic_int! {
@@ -2994,14 +3017,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI64"),
"i64",
"",
atomic_min, atomic_max,
8,
- "AtomicI64::new(0)",
- i64 AtomicI64 ATOMIC_I64_INIT
+ i64 AtomicI64
}
#[cfg(target_has_atomic_load_store = "64")]
atomic_int! {
@@ -3014,14 +3035,12 @@ atomic_int! {
stable(feature = "integer_atomics_stable", since = "1.34.0"),
stable(feature = "integer_atomics_stable", since = "1.34.0"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU64"),
"u64",
"",
atomic_umin, atomic_umax,
8,
- "AtomicU64::new(0)",
- u64 AtomicU64 ATOMIC_U64_INIT
+ u64 AtomicU64
}
#[cfg(target_has_atomic_load_store = "128")]
atomic_int! {
@@ -3034,14 +3053,12 @@ atomic_int! {
unstable(feature = "integer_atomics", issue = "99069"),
unstable(feature = "integer_atomics", issue = "99069"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
"i128",
"#![feature(integer_atomics)]\n\n",
atomic_min, atomic_max,
16,
- "AtomicI128::new(0)",
- i128 AtomicI128 ATOMIC_I128_INIT
+ i128 AtomicI128
}
#[cfg(target_has_atomic_load_store = "128")]
atomic_int! {
@@ -3054,19 +3071,17 @@ atomic_int! {
unstable(feature = "integer_atomics", issue = "99069"),
unstable(feature = "integer_atomics", issue = "99069"),
rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
- unstable(feature = "integer_atomics", issue = "99069"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
"u128",
"#![feature(integer_atomics)]\n\n",
atomic_umin, atomic_umax,
16,
- "AtomicU128::new(0)",
- u128 AtomicU128 ATOMIC_U128_INIT
+ u128 AtomicU128
}
+#[cfg(target_has_atomic_load_store = "ptr")]
macro_rules! atomic_int_ptr_sized {
( $($target_pointer_width:literal $align:literal)* ) => { $(
- #[cfg(target_has_atomic_load_store = "ptr")]
#[cfg(target_pointer_width = $target_pointer_width)]
atomic_int! {
cfg(target_has_atomic = "ptr"),
@@ -3078,16 +3093,13 @@ macro_rules! atomic_int_ptr_sized {
stable(feature = "atomic_from", since = "1.23.0"),
stable(feature = "atomic_nand", since = "1.27.0"),
rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
- stable(feature = "rust1", since = "1.0.0"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicIsize"),
"isize",
"",
atomic_min, atomic_max,
$align,
- "AtomicIsize::new(0)",
- isize AtomicIsize ATOMIC_ISIZE_INIT
+ isize AtomicIsize
}
- #[cfg(target_has_atomic_load_store = "ptr")]
#[cfg(target_pointer_width = $target_pointer_width)]
atomic_int! {
cfg(target_has_atomic = "ptr"),
@@ -3099,18 +3111,37 @@ macro_rules! atomic_int_ptr_sized {
stable(feature = "atomic_from", since = "1.23.0"),
stable(feature = "atomic_nand", since = "1.27.0"),
rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
- stable(feature = "rust1", since = "1.0.0"),
cfg_attr(not(test), rustc_diagnostic_item = "AtomicUsize"),
"usize",
"",
atomic_umin, atomic_umax,
$align,
- "AtomicUsize::new(0)",
- usize AtomicUsize ATOMIC_USIZE_INIT
+ usize AtomicUsize
}
+
+ /// An [`AtomicIsize`] initialized to `0`.
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.34.0",
+ note = "the `new` function is now preferred",
+ suggestion = "AtomicIsize::new(0)",
+ )]
+ pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
+
+ /// An [`AtomicUsize`] initialized to `0`.
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(
+ since = "1.34.0",
+ note = "the `new` function is now preferred",
+ suggestion = "AtomicUsize::new(0)",
+ )]
+ pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
)* };
}
+#[cfg(target_has_atomic_load_store = "ptr")]
atomic_int_ptr_sized! {
"16" 2
"32" 4
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index ac8d04a82..7782ace69 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -1,7 +1,6 @@
// See src/libstd/primitive_docs.rs for documentation.
use crate::cmp::Ordering::{self, *};
-#[cfg(not(bootstrap))]
use crate::marker::ConstParamTy;
use crate::marker::{StructuralEq, StructuralPartialEq};
@@ -51,7 +50,6 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- #[cfg(not(bootstrap))]
impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
{}
}
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index a8f6b7ebb..8d2d31b64 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -147,65 +147,3 @@ fn dyn_type_name() {
std::any::type_name::<dyn Foo<Bar = i32> + Send + Sync>()
);
}
-
-// Test the `Provider` API.
-
-struct SomeConcreteType {
- some_string: String,
-}
-
-impl Provider for SomeConcreteType {
- fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- demand
- .provide_ref::<String>(&self.some_string)
- .provide_ref::<str>(&self.some_string)
- .provide_value_with::<String>(|| "bye".to_owned());
- }
-}
-
-// Test the provide and request mechanisms with a by-reference trait object.
-#[test]
-fn test_provider() {
- let obj: &dyn Provider = &SomeConcreteType { some_string: "hello".to_owned() };
-
- assert_eq!(&**request_ref::<String>(obj).unwrap(), "hello");
- assert_eq!(&*request_value::<String>(obj).unwrap(), "bye");
- assert_eq!(request_value::<u8>(obj), None);
-}
-
-// Test the provide and request mechanisms with a boxed trait object.
-#[test]
-fn test_provider_boxed() {
- let obj: Box<dyn Provider> = Box::new(SomeConcreteType { some_string: "hello".to_owned() });
-
- assert_eq!(&**request_ref::<String>(&*obj).unwrap(), "hello");
- assert_eq!(&*request_value::<String>(&*obj).unwrap(), "bye");
- assert_eq!(request_value::<u8>(&*obj), None);
-}
-
-// Test the provide and request mechanisms with a concrete object.
-#[test]
-fn test_provider_concrete() {
- let obj = SomeConcreteType { some_string: "hello".to_owned() };
-
- assert_eq!(&**request_ref::<String>(&obj).unwrap(), "hello");
- assert_eq!(&*request_value::<String>(&obj).unwrap(), "bye");
- assert_eq!(request_value::<u8>(&obj), None);
-}
-
-trait OtherTrait: Provider {}
-
-impl OtherTrait for SomeConcreteType {}
-
-impl dyn OtherTrait {
- fn get_ref<T: 'static + ?Sized>(&self) -> Option<&T> {
- request_ref::<T>(self)
- }
-}
-
-// Test the provide and request mechanisms via an intermediate trait.
-#[test]
-fn test_provider_intermediate() {
- let obj: &dyn OtherTrait = &SomeConcreteType { some_string: "hello".to_owned() };
- assert_eq!(obj.get_ref::<str>().unwrap(), "hello");
-}
diff --git a/library/core/tests/error.rs b/library/core/tests/error.rs
new file mode 100644
index 000000000..cb7cb5441
--- /dev/null
+++ b/library/core/tests/error.rs
@@ -0,0 +1,66 @@
+use core::error::{request_value, request_ref, Request};
+
+// Test the `Request` API.
+#[derive(Debug)]
+struct SomeConcreteType {
+ some_string: String,
+}
+
+impl std::fmt::Display for SomeConcreteType {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "A")
+ }
+}
+
+impl std::error::Error for SomeConcreteType {
+ fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ request
+ .provide_ref::<String>(&self.some_string)
+ .provide_ref::<str>(&self.some_string)
+ .provide_value_with::<String>(|| "bye".to_owned());
+ }
+}
+
+// Test the Error.provide and request mechanisms with a by-reference trait object.
+#[test]
+fn test_error_generic_member_access() {
+ let obj = &SomeConcreteType { some_string: "hello".to_owned() };
+
+ assert_eq!(request_ref::<String>(&*obj).unwrap(), "hello");
+ assert_eq!(request_value::<String>(&*obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(&obj), None);
+}
+
+// Test the Error.provide and request mechanisms with a by-reference trait object.
+#[test]
+fn test_request_constructor() {
+ let obj: &dyn std::error::Error = &SomeConcreteType { some_string: "hello".to_owned() };
+
+ assert_eq!(request_ref::<String>(&*obj).unwrap(), "hello");
+ assert_eq!(request_value::<String>(&*obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(&obj), None);
+}
+
+// Test the Error.provide and request mechanisms with a boxed trait object.
+#[test]
+fn test_error_generic_member_access_boxed() {
+ let obj: Box<dyn std::error::Error> =
+ Box::new(SomeConcreteType { some_string: "hello".to_owned() });
+
+ assert_eq!(request_ref::<String>(&*obj).unwrap(), "hello");
+ assert_eq!(request_value::<String>(&*obj).unwrap(), "bye");
+
+ // NOTE: Box<E> only implements Error when E: Error + Sized, which means we can't pass a
+ // Box<dyn Error> to request_value.
+ //assert_eq!(request_value::<String>(&obj).unwrap(), "bye");
+}
+
+// Test the Error.provide and request mechanisms with a concrete object.
+#[test]
+fn test_error_generic_member_access_concrete() {
+ let obj = SomeConcreteType { some_string: "hello".to_owned() };
+
+ assert_eq!(request_ref::<String>(&obj).unwrap(), "hello");
+ assert_eq!(request_value::<String>(&obj).unwrap(), "bye");
+ assert_eq!(request_value::<u8>(&obj), None);
+}
diff --git a/library/core/tests/iter/adapters/map_windows.rs b/library/core/tests/iter/adapters/map_windows.rs
new file mode 100644
index 000000000..7fb2408f8
--- /dev/null
+++ b/library/core/tests/iter/adapters/map_windows.rs
@@ -0,0 +1,283 @@
+use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+#[cfg(not(panic = "abort"))]
+mod drop_checks {
+ //! These tests mainly make sure the elements are correctly dropped.
+ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst};
+
+ #[derive(Debug)]
+ struct DropInfo {
+ dropped_twice: AtomicBool,
+ alive_count: AtomicUsize,
+ }
+
+ impl DropInfo {
+ const fn new() -> Self {
+ Self { dropped_twice: AtomicBool::new(false), alive_count: AtomicUsize::new(0) }
+ }
+
+ #[track_caller]
+ fn check(&self) {
+ assert!(!self.dropped_twice.load(SeqCst), "a value was dropped twice");
+ assert_eq!(self.alive_count.load(SeqCst), 0);
+ }
+ }
+
+ #[derive(Debug)]
+ struct DropCheck<'a> {
+ info: &'a DropInfo,
+ was_dropped: bool,
+ }
+
+ impl<'a> DropCheck<'a> {
+ fn new(info: &'a DropInfo) -> Self {
+ info.alive_count.fetch_add(1, SeqCst);
+
+ Self { info, was_dropped: false }
+ }
+ }
+
+ impl Drop for DropCheck<'_> {
+ fn drop(&mut self) {
+ if self.was_dropped {
+ self.info.dropped_twice.store(true, SeqCst);
+ }
+ self.was_dropped = true;
+
+ self.info.alive_count.fetch_sub(1, SeqCst);
+ }
+ }
+
+ fn iter(info: &DropInfo, len: usize, panic_at: usize) -> impl Iterator<Item = DropCheck<'_>> {
+ (0..len).map(move |i| {
+ if i == panic_at {
+ panic!("intended panic");
+ }
+ DropCheck::new(info)
+ })
+ }
+
+ #[track_caller]
+ fn check<const N: usize>(len: usize, panic_at: usize) {
+ check_drops(|info| {
+ iter(info, len, panic_at).map_windows(|_: &[_; N]| {}).last();
+ });
+ }
+
+ #[track_caller]
+ fn check_drops(f: impl FnOnce(&DropInfo)) {
+ let info = DropInfo::new();
+ let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+ f(&info);
+ }));
+ info.check();
+ }
+
+ #[test]
+ fn no_iter_panic_n1() {
+ check::<1>(0, 100);
+ check::<1>(1, 100);
+ check::<1>(2, 100);
+ check::<1>(13, 100);
+ }
+
+ #[test]
+ fn no_iter_panic_n2() {
+ check::<2>(0, 100);
+ check::<2>(1, 100);
+ check::<2>(2, 100);
+ check::<2>(3, 100);
+ check::<2>(13, 100);
+ }
+
+ #[test]
+ fn no_iter_panic_n5() {
+ check::<5>(0, 100);
+ check::<5>(1, 100);
+ check::<5>(2, 100);
+ check::<5>(13, 100);
+ check::<5>(30, 100);
+ }
+
+ #[test]
+ fn panic_in_first_batch() {
+ check::<1>(7, 0);
+
+ check::<2>(7, 0);
+ check::<2>(7, 1);
+
+ check::<3>(7, 0);
+ check::<3>(7, 1);
+ check::<3>(7, 2);
+ }
+
+ #[test]
+ fn panic_in_middle() {
+ check::<1>(7, 1);
+ check::<1>(7, 5);
+ check::<1>(7, 6);
+
+ check::<2>(7, 2);
+ check::<2>(7, 5);
+ check::<2>(7, 6);
+
+ check::<5>(13, 5);
+ check::<5>(13, 8);
+ check::<5>(13, 12);
+ }
+
+ #[test]
+ fn len_equals_n() {
+ check::<1>(1, 100);
+ check::<1>(1, 0);
+
+ check::<2>(2, 100);
+ check::<2>(2, 0);
+ check::<2>(2, 1);
+
+ check::<5>(5, 100);
+ check::<5>(5, 0);
+ check::<5>(5, 1);
+ check::<5>(5, 4);
+ }
+}
+
+#[test]
+fn output_n1() {
+ assert_eq!("".chars().map_windows(|[c]| *c).collect::<Vec<_>>(), vec![]);
+ assert_eq!("x".chars().map_windows(|[c]| *c).collect::<Vec<_>>(), vec!['x']);
+ assert_eq!("abcd".chars().map_windows(|[c]| *c).collect::<Vec<_>>(), vec!['a', 'b', 'c', 'd']);
+}
+
+#[test]
+fn output_n2() {
+ assert_eq!(
+ "".chars().map_windows(|a: &[_; 2]| *a).collect::<Vec<_>>(),
+ <Vec<[char; 2]>>::new(),
+ );
+ assert_eq!("ab".chars().map_windows(|a: &[_; 2]| *a).collect::<Vec<_>>(), vec![['a', 'b']]);
+ assert_eq!(
+ "abcd".chars().map_windows(|a: &[_; 2]| *a).collect::<Vec<_>>(),
+ vec![['a', 'b'], ['b', 'c'], ['c', 'd']],
+ );
+}
+
+#[test]
+fn test_case_from_pr_82413_comment() {
+ for () in std::iter::repeat("0".to_owned()).map_windows(|_: &[_; 3]| {}).take(4) {}
+}
+
+#[test]
+#[should_panic = "array in `Iterator::map_windows` must contain more than 0 elements"]
+fn check_zero_window() {
+ let _ = std::iter::repeat(0).map_windows(|_: &[_; 0]| ());
+}
+
+#[test]
+fn test_zero_sized_type() {
+ #[derive(Copy, Clone, Debug, Eq, PartialEq)]
+ struct Data;
+ let data: Vec<_> =
+ std::iter::repeat(Data).take(10).map_windows(|arr: &[Data; 5]| *arr).collect();
+ assert_eq!(data, [[Data; 5]; 6]);
+}
+
+#[test]
+#[should_panic = "array size of `Iterator::map_windows` is too large"]
+fn test_too_large_array_size() {
+ let _ = std::iter::repeat(()).map_windows(|arr: &[(); usize::MAX]| *arr);
+}
+
+#[test]
+fn test_laziness() {
+ let counter = AtomicUsize::new(0);
+ let mut iter = (0..5)
+ .inspect(|_| {
+ counter.fetch_add(1, SeqCst);
+ })
+ .map_windows(|arr: &[i32; 2]| *arr);
+ assert_eq!(counter.load(SeqCst), 0);
+
+ assert_eq!(iter.next(), Some([0, 1]));
+ // The first iteration consumes N items (N = 2).
+ assert_eq!(counter.load(SeqCst), 2);
+
+ assert_eq!(iter.next(), Some([1, 2]));
+ assert_eq!(counter.load(SeqCst), 3);
+
+ assert_eq!(iter.next(), Some([2, 3]));
+ assert_eq!(counter.load(SeqCst), 4);
+
+ assert_eq!(iter.next(), Some([3, 4]));
+ assert_eq!(counter.load(SeqCst), 5);
+
+ assert_eq!(iter.next(), None);
+ assert_eq!(counter.load(SeqCst), 5);
+}
+
+#[test]
+fn test_size_hint() {
+ struct SizeHintCheckHelper((usize, Option<usize>));
+
+ impl Iterator for SizeHintCheckHelper {
+ type Item = i32;
+
+ fn next(&mut self) -> Option<i32> {
+ let (ref mut lo, ref mut hi) = self.0;
+ let next = (*hi != Some(0)).then_some(0);
+ *lo = lo.saturating_sub(1);
+ if let Some(hi) = hi {
+ *hi = hi.saturating_sub(1);
+ }
+ next
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0
+ }
+ }
+
+ fn check_size_hint<const N: usize>(
+ size_hint: (usize, Option<usize>),
+ mut mapped_size_hint: (usize, Option<usize>),
+ ) {
+ let mut iter = SizeHintCheckHelper(size_hint);
+ let mut mapped_iter = iter.by_ref().map_windows(|_: &[_; N]| ());
+ while mapped_iter.size_hint().0 > 0 {
+ assert_eq!(mapped_iter.size_hint(), mapped_size_hint);
+ assert!(mapped_iter.next().is_some());
+ mapped_size_hint.0 -= 1;
+ mapped_size_hint.1 = mapped_size_hint.1.map(|hi| hi.saturating_sub(1));
+ }
+ }
+
+ check_size_hint::<1>((0, None), (0, None));
+ check_size_hint::<1>((0, Some(0)), (0, Some(0)));
+ check_size_hint::<1>((0, Some(2)), (0, Some(2)));
+ check_size_hint::<1>((1, None), (1, None));
+ check_size_hint::<1>((1, Some(1)), (1, Some(1)));
+ check_size_hint::<1>((1, Some(4)), (1, Some(4)));
+ check_size_hint::<1>((5, None), (5, None));
+ check_size_hint::<1>((5, Some(5)), (5, Some(5)));
+ check_size_hint::<1>((5, Some(10)), (5, Some(10)));
+
+ check_size_hint::<2>((0, None), (0, None));
+ check_size_hint::<2>((0, Some(0)), (0, Some(0)));
+ check_size_hint::<2>((0, Some(2)), (0, Some(1)));
+ check_size_hint::<2>((1, None), (0, None));
+ check_size_hint::<2>((1, Some(1)), (0, Some(0)));
+ check_size_hint::<2>((1, Some(4)), (0, Some(3)));
+ check_size_hint::<2>((5, None), (4, None));
+ check_size_hint::<2>((5, Some(5)), (4, Some(4)));
+ check_size_hint::<2>((5, Some(10)), (4, Some(9)));
+
+ check_size_hint::<5>((0, None), (0, None));
+ check_size_hint::<5>((0, Some(0)), (0, Some(0)));
+ check_size_hint::<5>((0, Some(2)), (0, Some(0)));
+ check_size_hint::<5>((1, None), (0, None));
+ check_size_hint::<5>((1, Some(1)), (0, Some(0)));
+ check_size_hint::<5>((1, Some(4)), (0, Some(0)));
+ check_size_hint::<5>((5, None), (1, None));
+ check_size_hint::<5>((5, Some(5)), (1, Some(1)));
+ check_size_hint::<5>((5, Some(10)), (1, Some(6)));
+}
diff --git a/library/core/tests/iter/adapters/mod.rs b/library/core/tests/iter/adapters/mod.rs
index ca3463aa7..dedb4c0a9 100644
--- a/library/core/tests/iter/adapters/mod.rs
+++ b/library/core/tests/iter/adapters/mod.rs
@@ -13,6 +13,7 @@ mod fuse;
mod inspect;
mod intersperse;
mod map;
+mod map_windows;
mod peekable;
mod scan;
mod skip;
diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs
index 9eebfb1f1..995bbf0e2 100644
--- a/library/core/tests/iter/traits/iterator.rs
+++ b/library/core/tests/iter/traits/iterator.rs
@@ -1,3 +1,4 @@
+use core::cmp::Ordering;
use core::num::NonZeroUsize;
/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped
@@ -371,11 +372,39 @@ fn test_by_ref() {
#[test]
fn test_is_sorted() {
+ // Tests on integers
assert!([1, 2, 2, 9].iter().is_sorted());
assert!(![1, 3, 2].iter().is_sorted());
assert!([0].iter().is_sorted());
- assert!(std::iter::empty::<i32>().is_sorted());
+ assert!([0, 0].iter().is_sorted());
+ assert!(core::iter::empty::<i32>().is_sorted());
+
+ // Tests on floats
+ assert!([1.0f32, 2.0, 2.0, 9.0].iter().is_sorted());
+ assert!(![1.0f32, 3.0f32, 2.0f32].iter().is_sorted());
+ assert!([0.0f32].iter().is_sorted());
+ assert!([0.0f32, 0.0f32].iter().is_sorted());
+ // Test cases with NaNs
+ assert!([f32::NAN].iter().is_sorted());
+ assert!(![f32::NAN, f32::NAN].iter().is_sorted());
assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
+ // Tests from <https://github.com/rust-lang/rust/pull/55045#discussion_r229689884>
+ assert!(![f32::NAN, f32::NAN, f32::NAN].iter().is_sorted());
+ assert!(![1.0, f32::NAN, 2.0].iter().is_sorted());
+ assert!(![2.0, f32::NAN, 1.0].iter().is_sorted());
+ assert!(![2.0, f32::NAN, 1.0, 7.0].iter().is_sorted());
+ assert!(![2.0, f32::NAN, 1.0, 0.0].iter().is_sorted());
+ assert!(![-f32::NAN, -1.0, 0.0, 1.0, f32::NAN].iter().is_sorted());
+ assert!(![f32::NAN, -f32::NAN, -1.0, 0.0, 1.0].iter().is_sorted());
+ assert!(![1.0, f32::NAN, -f32::NAN, -1.0, 0.0].iter().is_sorted());
+ assert!(![0.0, 1.0, f32::NAN, -f32::NAN, -1.0].iter().is_sorted());
+ assert!(![-1.0, 0.0, 1.0, f32::NAN, -f32::NAN].iter().is_sorted());
+
+ // Tests for is_sorted_by
+ assert!(![6, 2, 8, 5, 1, -60, 1337].iter().is_sorted());
+ assert!([6, 2, 8, 5, 1, -60, 1337].iter().is_sorted_by(|_, _| Some(Ordering::Less)));
+
+ // Tests for is_sorted_by_key
assert!([-2, -1, 0, 3].iter().is_sorted());
assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
assert!(!["c", "bb", "aaa"].iter().is_sorted());
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 3e6d31fcd..7a6def37a 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -93,7 +93,7 @@
#![feature(const_option)]
#![feature(const_option_ext)]
#![feature(const_result)]
-#![feature(integer_atomics)]
+#![cfg_attr(target_has_atomic = "128", feature(integer_atomics))]
#![feature(int_roundings)]
#![feature(slice_group_by)]
#![feature(split_array)]
@@ -105,11 +105,14 @@
#![feature(const_slice_from_ref)]
#![feature(waker_getters)]
#![feature(slice_flatten)]
-#![feature(provide_any)]
+#![feature(error_generic_member_access)]
+#![feature(error_in_core)]
+#![feature(trait_upcasting)]
#![feature(utf8_chunks)]
#![feature(is_ascii_octdigit)]
#![feature(get_many_mut)]
#![feature(offset_of)]
+#![feature(iter_map_windows)]
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
diff --git a/library/core/tests/manually_drop.rs b/library/core/tests/manually_drop.rs
index 22d72d219..bbf444471 100644
--- a/library/core/tests/manually_drop.rs
+++ b/library/core/tests/manually_drop.rs
@@ -1,4 +1,4 @@
-#![cfg_attr(not(bootstrap), allow(undropped_manually_drops))]
+#![allow(undropped_manually_drops)]
use core::mem::ManuallyDrop;
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 88f54591b..865e702b5 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -2278,11 +2278,39 @@ fn test_copy_within_panics_src_out_of_bounds() {
fn test_is_sorted() {
let empty: [i32; 0] = [];
+ // Tests on integers
assert!([1, 2, 2, 9].is_sorted());
assert!(![1, 3, 2].is_sorted());
assert!([0].is_sorted());
+ assert!([0, 0].is_sorted());
assert!(empty.is_sorted());
+
+ // Tests on floats
+ assert!([1.0f32, 2.0, 2.0, 9.0].is_sorted());
+ assert!(![1.0f32, 3.0f32, 2.0f32].is_sorted());
+ assert!([0.0f32].is_sorted());
+ assert!([0.0f32, 0.0f32].is_sorted());
+ // Test cases with NaNs
+ assert!([f32::NAN].is_sorted());
+ assert!(![f32::NAN, f32::NAN].is_sorted());
assert!(![0.0, 1.0, f32::NAN].is_sorted());
+ // Tests from <https://github.com/rust-lang/rust/pull/55045#discussion_r229689884>
+ assert!(![f32::NAN, f32::NAN, f32::NAN].is_sorted());
+ assert!(![1.0, f32::NAN, 2.0].is_sorted());
+ assert!(![2.0, f32::NAN, 1.0].is_sorted());
+ assert!(![2.0, f32::NAN, 1.0, 7.0].is_sorted());
+ assert!(![2.0, f32::NAN, 1.0, 0.0].is_sorted());
+ assert!(![-f32::NAN, -1.0, 0.0, 1.0, f32::NAN].is_sorted());
+ assert!(![f32::NAN, -f32::NAN, -1.0, 0.0, 1.0].is_sorted());
+ assert!(![1.0, f32::NAN, -f32::NAN, -1.0, 0.0].is_sorted());
+ assert!(![0.0, 1.0, f32::NAN, -f32::NAN, -1.0].is_sorted());
+ assert!(![-1.0, 0.0, 1.0, f32::NAN, -f32::NAN].is_sorted());
+
+ // Tests for is_sorted_by
+ assert!(![6, 2, 8, 5, 1, -60, 1337].is_sorted());
+ assert!([6, 2, 8, 5, 1, -60, 1337].is_sorted_by(|_, _| Some(Ordering::Less)));
+
+ // Tests for is_sorted_by_key
assert!([-2, -1, 0, 3].is_sorted());
assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
assert!(!["c", "bb", "aaa"].is_sorted());
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index b193d79b0..76b359196 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -14,6 +14,7 @@
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(c_unwind)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[cfg(target_os = "android")]
mod android;
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index ce78ab82e..009014de5 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -19,13 +19,14 @@
#![feature(panic_unwind)]
#![feature(staged_api)]
#![feature(std_internals)]
-#![feature(abi_thiscall)]
+#![cfg_attr(bootstrap, feature(abi_thiscall))]
#![feature(rustc_attrs)]
#![panic_runtime]
#![feature(panic_runtime)]
#![feature(c_unwind)]
// `real_imp` is unused with Miri, so silence warnings.
#![cfg_attr(miri, allow(dead_code))]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
use alloc::boxed::Box;
use core::any::Any;
diff --git a/library/portable-simd/crates/core_simd/examples/dot_product.rs b/library/portable-simd/crates/core_simd/examples/dot_product.rs
index 391f08f55..a7973ec74 100644
--- a/library/portable-simd/crates/core_simd/examples/dot_product.rs
+++ b/library/portable-simd/crates/core_simd/examples/dot_product.rs
@@ -130,7 +130,7 @@ pub fn dot_prod_simd_4(a: &[f32], b: &[f32]) -> f32 {
}
// This version allocates a single `XMM` register for accumulation, and the folds don't allocate on top of that.
-// Notice the the use of `mul_add`, which can do a multiply and an add operation ber iteration.
+// Notice the use of `mul_add`, which can do a multiply and an add operation ber iteration.
pub fn dot_prod_simd_5(a: &[f32], b: &[f32]) -> f32 {
a.array_chunks::<4>()
.map(|&a| f32x4::from_array(a))
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 7fb0d989c..83d637b68 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -24,7 +24,6 @@
#![feature(staged_api)]
#![feature(allow_internal_unstable)]
#![feature(decl_macro)]
-#![feature(local_key_cell_methods)]
#![feature(maybe_uninit_write_slice)]
#![feature(negative_impls)]
#![feature(new_uninit)]
@@ -33,6 +32,7 @@
#![feature(min_specialization)]
#![feature(strict_provenance)]
#![recursion_limit = "256"]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#[unstable(feature = "proc_macro_internals", issue = "27812")]
#[doc(hidden)]
@@ -177,6 +177,7 @@ impl FromStr for TokenStream {
// N.B., the bridge only provides `to_string`, implement `fmt::Display`
// based on it (the reverse of the usual relationship between the two).
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib", since = "1.15.0")]
impl ToString for TokenStream {
fn to_string(&self) -> String {
@@ -738,6 +739,7 @@ impl From<Literal> for TokenTree {
// N.B., the bridge only provides `to_string`, implement `fmt::Display`
// based on it (the reverse of the usual relationship between the two).
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib", since = "1.15.0")]
impl ToString for TokenTree {
fn to_string(&self) -> String {
@@ -872,6 +874,7 @@ impl Group {
// N.B., the bridge only provides `to_string`, implement `fmt::Display`
// based on it (the reverse of the usual relationship between the two).
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib", since = "1.15.0")]
impl ToString for Group {
fn to_string(&self) -> String {
@@ -913,21 +916,34 @@ impl !Send for Punct {}
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
impl !Sync for Punct {}
-/// Describes whether a `Punct` is followed immediately by another `Punct` ([`Spacing::Joint`]) or
-/// by a different token or whitespace ([`Spacing::Alone`]).
+/// Indicates whether a `Punct` token can join with the following token
+/// to form a multi-character operator.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
pub enum Spacing {
- /// A `Punct` is not immediately followed by another `Punct`.
- /// E.g. `+` is `Alone` in `+ =`, `+ident` and `+()`.
- #[stable(feature = "proc_macro_lib2", since = "1.29.0")]
- Alone,
- /// A `Punct` is immediately followed by another `Punct`.
- /// E.g. `+` is `Joint` in `+=` and `++`.
+ /// A `Punct` token can join with the following token to form a multi-character operator.
+ ///
+ /// In token streams constructed using proc macro interfaces `Joint` punctuation tokens can be
+ /// followed by any other tokens. \
+ /// However, in token streams parsed from source code compiler will only set spacing to `Joint`
+ /// in the following cases:
+ /// - A `Punct` is immediately followed by another `Punct` without a whitespace. \
+ /// E.g. `+` is `Joint` in `+=` and `++`.
+ /// - A single quote `'` is immediately followed by an identifier without a whitespace. \
+ /// E.g. `'` is `Joint` in `'lifetime`.
///
- /// Additionally, single quote `'` can join with identifiers to form lifetimes: `'ident`.
+ /// This list may be extended in the future to enable more token combinations.
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
Joint,
+ /// A `Punct` token cannot join with the following token to form a multi-character operator.
+ ///
+ /// `Alone` punctuation tokens can be followed by any other tokens. \
+ /// In token streams parsed from source code compiler will set spacing to `Alone` in all cases
+ /// not covered by the conditions for `Joint` above. \
+ /// E.g. `+` is `Alone` in `+ =`, `+ident` and `+()`.
+ /// In particular, token not followed by anything will also be marked as `Alone`.
+ #[stable(feature = "proc_macro_lib2", since = "1.29.0")]
+ Alone,
}
impl Punct {
@@ -959,10 +975,9 @@ impl Punct {
self.0.ch as char
}
- /// Returns the spacing of this punctuation character, indicating whether it's immediately
- /// followed by another `Punct` in the token stream, so they can potentially be combined into
- /// a multi-character operator (`Joint`), or it's followed by some other token or whitespace
- /// (`Alone`) so the operator has certainly ended.
+ /// Returns the spacing of this punctuation character, indicating whether it can be potentially
+ /// combined into a multi-character operator with the following token (`Joint`), or the operator
+ /// has certainly ended (`Alone`).
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
pub fn spacing(&self) -> Spacing {
if self.0.joint { Spacing::Joint } else { Spacing::Alone }
@@ -981,6 +996,7 @@ impl Punct {
}
}
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
impl ToString for Punct {
fn to_string(&self) -> String {
@@ -1083,8 +1099,7 @@ impl Ident {
}
}
-/// Converts the identifier to a string that should be losslessly convertible
-/// back into the same identifier.
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
impl ToString for Ident {
fn to_string(&self) -> String {
@@ -1423,6 +1438,7 @@ impl FromStr for Literal {
}
}
+#[doc(hidden)]
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
impl ToString for Literal {
fn to_string(&self) -> String {
diff --git a/library/profiler_builtins/build.rs b/library/profiler_builtins/build.rs
index 1b1f11798..d14d0b822 100644
--- a/library/profiler_builtins/build.rs
+++ b/library/profiler_builtins/build.rs
@@ -6,6 +6,12 @@ use std::env;
use std::path::Path;
fn main() {
+ println!("cargo:rerun-if-env-changed=LLVM_PROFILER_RT_LIB");
+ if let Ok(rt) = env::var("LLVM_PROFILER_RT_LIB") {
+ println!("cargo:rustc-link-lib=static:+verbatim={rt}");
+ return;
+ }
+
let target = env::var("TARGET").expect("TARGET was not set");
let cfg = &mut cc::Build::new();
diff --git a/library/profiler_builtins/src/lib.rs b/library/profiler_builtins/src/lib.rs
index 0c83bcee0..a81d0a635 100644
--- a/library/profiler_builtins/src/lib.rs
+++ b/library/profiler_builtins/src/lib.rs
@@ -7,4 +7,5 @@
issue = "none"
)]
#![allow(unused_features)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#![feature(staged_api)]
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index e022c2d14..33c9c6e63 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -18,21 +18,19 @@ panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core", public = true }
libc = { version = "0.2.146", default-features = false, features = ['rustc-dep-of-std'], public = true }
-compiler_builtins = { version = "0.1.95" }
+compiler_builtins = { version = "0.1.100" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.14", default-features = false, features = ['rustc-dep-of-std'] }
std_detect = { path = "../stdarch/crates/std_detect", default-features = false, features = ['rustc-dep-of-std'] }
# Dependencies of the `backtrace` crate
-addr2line = { version = "0.21.0", optional = true, default-features = false }
rustc-demangle = { version = "0.1.21", features = ['rustc-dep-of-std'] }
-miniz_oxide = { version = "0.7.0", optional = true, default-features = false, public = false }
-[dependencies.object]
-version = "0.32.0"
-optional = true
-default-features = false
-features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
+
+[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies]
+miniz_oxide = { version = "0.7.0", optional = true, default-features = false }
+addr2line = { version = "0.21.0", optional = true, default-features = false }
+object = { version = "0.32.0", default-features = false, optional = true, features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive'] }
[dev-dependencies]
rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
@@ -45,9 +43,9 @@ dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true }
[target.'cfg(target_os = "hermit")'.dependencies]
-hermit-abi = { version = "0.3.0", features = ['rustc-dep-of-std'] }
+hermit-abi = { version = "0.3.2", features = ['rustc-dep-of-std'], public = true }
-[target.wasm32-wasi.dependencies]
+[target.'cfg(target_os = "wasi")'.dependencies]
wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features = false }
[features]
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index ec774e62d..1eae7fa6a 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -336,7 +336,6 @@ fn default_alloc_error_hook(layout: Layout) {
static __rust_alloc_error_handler_should_panic: u8;
}
- #[allow(unused_unsafe)]
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {} bytes failed", layout.size());
} else {
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs
index 7543ffadd..e7110aebd 100644
--- a/library/std/src/backtrace.rs
+++ b/library/std/src/backtrace.rs
@@ -89,12 +89,12 @@ mod tests;
// a backtrace or actually symbolizing it.
use crate::backtrace_rs::{self, BytesOrWideString};
-use crate::cell::UnsafeCell;
use crate::env;
use crate::ffi::c_void;
use crate::fmt;
+use crate::panic::UnwindSafe;
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
-use crate::sync::Once;
+use crate::sync::LazyLock;
use crate::sys_common::backtrace::{lock, output_filename};
use crate::vec::Vec;
@@ -133,12 +133,11 @@ pub enum BacktraceStatus {
enum Inner {
Unsupported,
Disabled,
- Captured(LazilyResolvedCapture),
+ Captured(LazyLock<Capture, LazyResolve>),
}
struct Capture {
actual_start: usize,
- resolved: bool,
frames: Vec<BacktraceFrame>,
}
@@ -179,7 +178,7 @@ impl fmt::Debug for Backtrace {
let capture = match &self.inner {
Inner::Unsupported => return fmt.write_str("<unsupported>"),
Inner::Disabled => return fmt.write_str("<disabled>"),
- Inner::Captured(c) => c.force(),
+ Inner::Captured(c) => &**c,
};
let frames = &capture.frames[capture.actual_start..];
@@ -347,11 +346,10 @@ impl Backtrace {
let inner = if frames.is_empty() {
Inner::Unsupported
} else {
- Inner::Captured(LazilyResolvedCapture::new(Capture {
+ Inner::Captured(LazyLock::new(lazy_resolve(Capture {
actual_start: actual_start.unwrap_or(0),
frames,
- resolved: false,
- }))
+ })))
};
Backtrace { inner }
@@ -376,7 +374,7 @@ impl<'a> Backtrace {
#[must_use]
#[unstable(feature = "backtrace_frames", issue = "79676")]
pub fn frames(&'a self) -> &'a [BacktraceFrame] {
- if let Inner::Captured(c) = &self.inner { &c.force().frames } else { &[] }
+ if let Inner::Captured(c) = &self.inner { &c.frames } else { &[] }
}
}
@@ -386,7 +384,7 @@ impl fmt::Display for Backtrace {
let capture = match &self.inner {
Inner::Unsupported => return fmt.write_str("unsupported backtrace"),
Inner::Disabled => return fmt.write_str("disabled backtrace"),
- Inner::Captured(c) => c.force(),
+ Inner::Captured(c) => &**c,
};
let full = fmt.alternate();
@@ -430,46 +428,15 @@ impl fmt::Display for Backtrace {
}
}
-struct LazilyResolvedCapture {
- sync: Once,
- capture: UnsafeCell<Capture>,
-}
-
-impl LazilyResolvedCapture {
- fn new(capture: Capture) -> Self {
- LazilyResolvedCapture { sync: Once::new(), capture: UnsafeCell::new(capture) }
- }
-
- fn force(&self) -> &Capture {
- self.sync.call_once(|| {
- // SAFETY: This exclusive reference can't overlap with any others
- // `Once` guarantees callers will block until this closure returns
- // `Once` also guarantees only a single caller will enter this closure
- unsafe { &mut *self.capture.get() }.resolve();
- });
-
- // SAFETY: This shared reference can't overlap with the exclusive reference above
- unsafe { &*self.capture.get() }
- }
-}
-
-// SAFETY: Access to the inner value is synchronized using a thread-safe `Once`
-// So long as `Capture` is `Sync`, `LazilyResolvedCapture` is too
-unsafe impl Sync for LazilyResolvedCapture where Capture: Sync {}
-
-impl Capture {
- fn resolve(&mut self) {
- // If we're already resolved, nothing to do!
- if self.resolved {
- return;
- }
- self.resolved = true;
+type LazyResolve = impl (FnOnce() -> Capture) + Send + Sync + UnwindSafe;
+fn lazy_resolve(mut capture: Capture) -> LazyResolve {
+ move || {
// Use the global backtrace lock to synchronize this as it's a
// requirement of the `backtrace` crate, and then actually resolve
// everything.
let _lock = lock();
- for frame in self.frames.iter_mut() {
+ for frame in capture.frames.iter_mut() {
let symbols = &mut frame.symbols;
let frame = match &frame.frame {
RawFrame::Actual(frame) => frame,
@@ -490,6 +457,8 @@ impl Capture {
});
}
}
+
+ capture
}
}
diff --git a/library/std/src/backtrace/tests.rs b/library/std/src/backtrace/tests.rs
index 4dfbf88e8..73543a3af 100644
--- a/library/std/src/backtrace/tests.rs
+++ b/library/std/src/backtrace/tests.rs
@@ -1,4 +1,5 @@
use super::*;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
fn generate_fake_frames() -> Vec<BacktraceFrame> {
vec![
@@ -43,9 +44,8 @@ fn generate_fake_frames() -> Vec<BacktraceFrame> {
#[test]
fn test_debug() {
let backtrace = Backtrace {
- inner: Inner::Captured(LazilyResolvedCapture::new(Capture {
+ inner: Inner::Captured(LazyLock::preinit(Capture {
actual_start: 1,
- resolved: true,
frames: generate_fake_frames(),
})),
};
@@ -66,9 +66,8 @@ fn test_debug() {
#[test]
fn test_frames() {
let backtrace = Backtrace {
- inner: Inner::Captured(LazilyResolvedCapture::new(Capture {
+ inner: Inner::Captured(LazyLock::preinit(Capture {
actual_start: 1,
- resolved: true,
frames: generate_fake_frames(),
})),
};
@@ -93,3 +92,9 @@ fn test_frames() {
assert!(iter.all(|(f, e)| format!("{f:#?}") == *e));
}
+
+#[test]
+fn backtrace_unwind_safe() {
+ fn assert_unwind_safe<T: UnwindSafe + RefUnwindSafe>() {}
+ assert_unwind_safe::<Backtrace>();
+}
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index a083b6560..be173a7ac 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -49,12 +49,14 @@ use crate::sys;
/// ```
///
/// In other words, if two keys are equal, their hashes must be equal.
+/// Violating this property is a logic error.
///
-/// It is a logic error for a key to be modified in such a way that the key's
+/// It is also a logic error for a key to be modified in such a way that the key's
/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
/// the [`Eq`] trait, changes while it is in the map. This is normally only
/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
-/// The behavior resulting from such a logic error is not specified, but will
+///
+/// The behavior resulting from either logic error is not specified, but will
/// be encapsulated to the `HashMap` that observed the logic error and not
/// result in undefined behavior. This could include panics, incorrect results,
/// aborts, memory leaks, and non-termination.
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index ec59634df..6d85b26af 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -24,13 +24,14 @@ use super::map::{map_try_reserve_error, RandomState};
/// ```
///
/// In other words, if two keys are equal, their hashes must be equal.
+/// Violating this property is a logic error.
///
-///
-/// It is a logic error for a key to be modified in such a way that the key's
+/// It is also a logic error for a key to be modified in such a way that the key's
/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
/// the [`Eq`] trait, changes while it is in the map. This is normally only
/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
-/// The behavior resulting from such a logic error is not specified, but will
+///
+/// The behavior resulting from either logic error is not specified, but will
/// be encapsulated to the `HashSet` that observed the logic error and not
/// result in undefined behavior. This could include panics, incorrect results,
/// aborts, memory leaks, and non-termination.
@@ -65,8 +66,8 @@ use super::map::{map_try_reserve_error, RandomState};
/// ```
///
/// The easiest way to use `HashSet` with a custom type is to derive
-/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`], this will in the
-/// future be implied by [`Eq`].
+/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`],
+/// which is required if [`Eq`] is derived.
///
/// ```
/// use std::collections::HashSet;
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index d372fa640..f67f6034d 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -178,7 +178,8 @@ impl Iterator for Vars {
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Vars {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Vars").finish_non_exhaustive()
+ let Self { inner: VarsOs { inner } } = self;
+ f.debug_struct("Vars").field("inner", &inner.str_debug()).finish()
}
}
@@ -196,7 +197,8 @@ impl Iterator for VarsOs {
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for VarsOs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("VarOs").finish_non_exhaustive()
+ let Self { inner } = self;
+ f.debug_struct("VarsOs").field("inner", inner).finish()
}
}
@@ -829,7 +831,8 @@ impl DoubleEndedIterator for Args {
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Args").field("inner", &self.inner.inner).finish()
+ let Self { inner: ArgsOs { inner } } = self;
+ f.debug_struct("Args").field("inner", inner).finish()
}
}
@@ -870,7 +873,8 @@ impl DoubleEndedIterator for ArgsOs {
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for ArgsOs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("ArgsOs").field("inner", &self.inner).finish()
+ let Self { inner } = self;
+ f.debug_struct("ArgsOs").field("inner", inner).finish()
}
}
@@ -890,6 +894,7 @@ pub mod consts {
/// - aarch64
/// - loongarch64
/// - m68k
+ /// - csky
/// - mips
/// - mips64
/// - powerpc
diff --git a/library/std/src/env/tests.rs b/library/std/src/env/tests.rs
index 94cace03a..558692295 100644
--- a/library/std/src/env/tests.rs
+++ b/library/std/src/env/tests.rs
@@ -95,8 +95,28 @@ fn args_debug() {
format!("Args {{ inner: {:?} }}", args().collect::<Vec<_>>()),
format!("{:?}", args())
);
+}
+
+#[test]
+fn args_os_debug() {
assert_eq!(
format!("ArgsOs {{ inner: {:?} }}", args_os().collect::<Vec<_>>()),
format!("{:?}", args_os())
);
}
+
+#[test]
+fn vars_debug() {
+ assert_eq!(
+ format!("Vars {{ inner: {:?} }}", vars().collect::<Vec<_>>()),
+ format!("{:?}", vars())
+ );
+}
+
+#[test]
+fn vars_os_debug() {
+ assert_eq!(
+ format!("VarsOs {{ inner: {:?} }}", vars_os().collect::<Vec<_>>()),
+ format!("{:?}", vars_os())
+ );
+}
diff --git a/library/std/src/error.rs b/library/std/src/error.rs
index 05f8fd8de..375ff2d24 100644
--- a/library/std/src/error.rs
+++ b/library/std/src/error.rs
@@ -9,6 +9,8 @@ use crate::fmt::{self, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::error::Error;
+#[unstable(feature = "error_generic_member_access", issue = "99301")]
+pub use core::error::{request_ref, request_value, Request};
mod private {
// This is a hack to prevent `type_id` from being overridden by `Error`
@@ -121,7 +123,8 @@ mod private {
/// This example produces the following output:
///
/// ```console
-/// thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: SuperError is here!: SuperErrorSideKick is here!', src/error.rs:34:40
+/// thread 'main' panicked at src/error.rs:34:40:
+/// called `Result::unwrap()` on an `Err` value: SuperError is here!: SuperErrorSideKick is here!
/// note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
/// ```
///
@@ -370,11 +373,10 @@ impl<E> Report<E> {
///
/// ```rust
/// #![feature(error_reporter)]
- /// #![feature(provide_any)]
/// #![feature(error_generic_member_access)]
/// # use std::error::Error;
/// # use std::fmt;
- /// use std::any::Demand;
+ /// use std::error::Request;
/// use std::error::Report;
/// use std::backtrace::Backtrace;
///
@@ -404,8 +406,8 @@ impl<E> Report<E> {
/// }
///
/// impl Error for SuperErrorSideKick {
- /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) {
- /// demand.provide_ref::<Backtrace>(&self.backtrace);
+ /// fn provide<'a>(&'a self, request: &mut Request<'a>) {
+ /// request.provide_ref::<Backtrace>(&self.backtrace);
/// }
/// }
///
@@ -458,11 +460,11 @@ where
fn backtrace(&self) -> Option<&Backtrace> {
// have to grab the backtrace on the first error directly since that error may not be
// 'static
- let backtrace = (&self.error as &dyn Error).request_ref();
+ let backtrace = request_ref(&self.error);
let backtrace = backtrace.or_else(|| {
self.error
.source()
- .map(|source| source.sources().find_map(|source| source.request_ref()))
+ .map(|source| source.sources().find_map(|source| request_ref(source)))
.flatten()
});
backtrace
diff --git a/library/std/src/error/tests.rs b/library/std/src/error/tests.rs
index ee999bd65..ed070a26b 100644
--- a/library/std/src/error/tests.rs
+++ b/library/std/src/error/tests.rs
@@ -1,6 +1,6 @@
use super::Error;
use crate::fmt;
-use core::any::Demand;
+use core::error::Request;
#[derive(Debug, PartialEq)]
struct A;
@@ -199,7 +199,7 @@ where
self.source.as_deref()
}
- fn provide<'a>(&'a self, req: &mut Demand<'a>) {
+ fn provide<'a>(&'a self, req: &mut Request<'a>) {
self.backtrace.as_ref().map(|bt| req.provide_ref::<Backtrace>(bt));
}
}
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index bed90418b..a659b552f 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -61,6 +61,7 @@ impl f32 {
/// assert_eq!(f.ceil(), 4.0);
/// assert_eq!(g.ceil(), 4.0);
/// ```
+ #[doc(alias = "ceiling")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -135,6 +136,7 @@ impl f32 {
/// assert_eq!(g.trunc(), 3.0);
/// assert_eq!(h.trunc(), -3.0);
/// ```
+ #[doc(alias = "truncate")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -321,6 +323,7 @@ impl f32 {
/// // limitation due to round-off error
/// assert!((-f32::EPSILON).rem_euclid(3.0) != 0.0);
/// ```
+ #[doc(alias = "modulo", alias = "mod")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[inline]
@@ -500,10 +503,7 @@ impl f32 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn log2(self) -> f32 {
- #[cfg(target_os = "android")]
- return crate::sys::android::log2f32(self);
- #[cfg(not(target_os = "android"))]
- return unsafe { intrinsics::log2f32(self) };
+ crate::sys::log2f32(self)
}
/// Returns the base 10 logarithm of the number.
@@ -675,6 +675,7 @@ impl f32 {
///
/// assert!(abs_difference <= f32::EPSILON);
/// ```
+ #[doc(alias = "arcsin")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -697,6 +698,7 @@ impl f32 {
///
/// assert!(abs_difference <= f32::EPSILON);
/// ```
+ #[doc(alias = "arccos")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -718,6 +720,7 @@ impl f32 {
///
/// assert!(abs_difference <= f32::EPSILON);
/// ```
+ #[doc(alias = "arctan")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -775,6 +778,7 @@ impl f32 {
/// assert!(abs_difference_0 <= f32::EPSILON);
/// assert!(abs_difference_1 <= f32::EPSILON);
/// ```
+ #[doc(alias = "sincos")]
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
@@ -907,6 +911,7 @@ impl f32 {
///
/// assert!(abs_difference <= f32::EPSILON);
/// ```
+ #[doc(alias = "arcsinh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -929,6 +934,7 @@ impl f32 {
///
/// assert!(abs_difference <= f32::EPSILON);
/// ```
+ #[doc(alias = "arccosh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -953,6 +959,7 @@ impl f32 {
///
/// assert!(abs_difference <= 1e-5);
/// ```
+ #[doc(alias = "arctanh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -960,4 +967,46 @@ impl f32 {
pub fn atanh(self) -> f32 {
0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
}
+
+ /// Gamma function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(float_gamma)]
+ /// let x = 5.0f32;
+ ///
+ /// let abs_difference = (x.gamma() - 24.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "float_gamma", issue = "99842")]
+ #[inline]
+ pub fn gamma(self) -> f32 {
+ unsafe { cmath::tgammaf(self) }
+ }
+
+ /// Returns the natural logarithm of the gamma function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(float_gamma)]
+ /// let x = 2.0f32;
+ ///
+ /// let abs_difference = (x.ln_gamma().0 - 0.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "float_gamma", issue = "99842")]
+ #[inline]
+ pub fn ln_gamma(self) -> (f32, i32) {
+ let mut signgamp: i32 = 0;
+ let x = unsafe { cmath::lgammaf_r(self, &mut signgamp) };
+ (x, signgamp)
+ }
}
diff --git a/library/std/src/f32/tests.rs b/library/std/src/f32/tests.rs
index e949def00..9ca4e8f2f 100644
--- a/library/std/src/f32/tests.rs
+++ b/library/std/src/f32/tests.rs
@@ -653,6 +653,38 @@ fn test_atanh() {
}
#[test]
+fn test_gamma() {
+ // precision can differ between platforms
+ assert_approx_eq!(1.0f32.gamma(), 1.0f32);
+ assert_approx_eq!(2.0f32.gamma(), 1.0f32);
+ assert_approx_eq!(3.0f32.gamma(), 2.0f32);
+ assert_approx_eq!(4.0f32.gamma(), 6.0f32);
+ assert_approx_eq!(5.0f32.gamma(), 24.0f32);
+ assert_approx_eq!(0.5f32.gamma(), consts::PI.sqrt());
+ assert_approx_eq!((-0.5f32).gamma(), -2.0 * consts::PI.sqrt());
+ assert_eq!(0.0f32.gamma(), f32::INFINITY);
+ assert_eq!((-0.0f32).gamma(), f32::NEG_INFINITY);
+ assert!((-1.0f32).gamma().is_nan());
+ assert!((-2.0f32).gamma().is_nan());
+ assert!(f32::NAN.gamma().is_nan());
+ assert!(f32::NEG_INFINITY.gamma().is_nan());
+ assert_eq!(f32::INFINITY.gamma(), f32::INFINITY);
+ assert_eq!(171.71f32.gamma(), f32::INFINITY);
+}
+
+#[test]
+fn test_ln_gamma() {
+ assert_approx_eq!(1.0f32.ln_gamma().0, 0.0f32);
+ assert_eq!(1.0f32.ln_gamma().1, 1);
+ assert_approx_eq!(2.0f32.ln_gamma().0, 0.0f32);
+ assert_eq!(2.0f32.ln_gamma().1, 1);
+ assert_approx_eq!(3.0f32.ln_gamma().0, 2.0f32.ln());
+ assert_eq!(3.0f32.ln_gamma().1, 1);
+ assert_approx_eq!((-0.5f32).ln_gamma().0, (2.0 * consts::PI.sqrt()).ln());
+ assert_eq!((-0.5f32).ln_gamma().1, -1);
+}
+
+#[test]
fn test_real_consts() {
use super::consts;
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index e72de05ca..721e1fb75 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -61,6 +61,7 @@ impl f64 {
/// assert_eq!(f.ceil(), 4.0);
/// assert_eq!(g.ceil(), 4.0);
/// ```
+ #[doc(alias = "ceiling")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -135,6 +136,7 @@ impl f64 {
/// assert_eq!(g.trunc(), 3.0);
/// assert_eq!(h.trunc(), -3.0);
/// ```
+ #[doc(alias = "truncate")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -321,6 +323,7 @@ impl f64 {
/// // limitation due to round-off error
/// assert!((-f64::EPSILON).rem_euclid(3.0) != 0.0);
/// ```
+ #[doc(alias = "modulo", alias = "mod")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[inline]
@@ -456,7 +459,7 @@ impl f64 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn ln(self) -> f64 {
- self.log_wrapper(|n| unsafe { intrinsics::logf64(n) })
+ crate::sys::log_wrapper(self, |n| unsafe { intrinsics::logf64(n) })
}
/// Returns the logarithm of the number with respect to an arbitrary base.
@@ -500,12 +503,7 @@ impl f64 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn log2(self) -> f64 {
- self.log_wrapper(|n| {
- #[cfg(target_os = "android")]
- return crate::sys::android::log2f64(n);
- #[cfg(not(target_os = "android"))]
- return unsafe { intrinsics::log2f64(n) };
- })
+ crate::sys::log_wrapper(self, crate::sys::log2f64)
}
/// Returns the base 10 logarithm of the number.
@@ -525,7 +523,7 @@ impl f64 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn log10(self) -> f64 {
- self.log_wrapper(|n| unsafe { intrinsics::log10f64(n) })
+ crate::sys::log_wrapper(self, |n| unsafe { intrinsics::log10f64(n) })
}
/// The positive difference of two numbers.
@@ -677,6 +675,7 @@ impl f64 {
///
/// assert!(abs_difference < 1e-10);
/// ```
+ #[doc(alias = "arcsin")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -699,6 +698,7 @@ impl f64 {
///
/// assert!(abs_difference < 1e-10);
/// ```
+ #[doc(alias = "arccos")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -720,6 +720,7 @@ impl f64 {
///
/// assert!(abs_difference < 1e-10);
/// ```
+ #[doc(alias = "arctan")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -777,6 +778,7 @@ impl f64 {
/// assert!(abs_difference_0 < 1e-10);
/// assert!(abs_difference_1 < 1e-10);
/// ```
+ #[doc(alias = "sincos")]
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
@@ -909,6 +911,7 @@ impl f64 {
///
/// assert!(abs_difference < 1.0e-10);
/// ```
+ #[doc(alias = "arcsinh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -931,6 +934,7 @@ impl f64 {
///
/// assert!(abs_difference < 1.0e-10);
/// ```
+ #[doc(alias = "arccosh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -955,6 +959,7 @@ impl f64 {
///
/// assert!(abs_difference < 1.0e-10);
/// ```
+ #[doc(alias = "arctanh")]
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -963,27 +968,45 @@ impl f64 {
0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
}
- // Solaris/Illumos requires a wrapper around log, log2, and log10 functions
- // because of their non-standard behavior (e.g., log(-n) returns -Inf instead
- // of expected NaN).
- #[rustc_allow_incoherent_impl]
- fn log_wrapper<F: Fn(f64) -> f64>(self, log_fn: F) -> f64 {
- if !cfg!(any(target_os = "solaris", target_os = "illumos")) {
- log_fn(self)
- } else if self.is_finite() {
- if self > 0.0 {
- log_fn(self)
- } else if self == 0.0 {
- Self::NEG_INFINITY // log(0) = -Inf
- } else {
- Self::NAN // log(-n) = NaN
- }
- } else if self.is_nan() {
- self // log(NaN) = NaN
- } else if self > 0.0 {
- self // log(Inf) = Inf
- } else {
- Self::NAN // log(-Inf) = NaN
- }
+ /// Gamma function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(float_gamma)]
+ /// let x = 5.0f64;
+ ///
+ /// let abs_difference = (x.gamma() - 24.0).abs();
+ ///
+ /// assert!(abs_difference <= f64::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "float_gamma", issue = "99842")]
+ #[inline]
+ pub fn gamma(self) -> f64 {
+ unsafe { cmath::tgamma(self) }
+ }
+
+ /// Returns the natural logarithm of the gamma function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(float_gamma)]
+ /// let x = 2.0f64;
+ ///
+ /// let abs_difference = (x.ln_gamma().0 - 0.0).abs();
+ ///
+ /// assert!(abs_difference <= f64::EPSILON);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "float_gamma", issue = "99842")]
+ #[inline]
+ pub fn ln_gamma(self) -> (f64, i32) {
+ let mut signgamp: i32 = 0;
+ let x = unsafe { cmath::lgamma_r(self, &mut signgamp) };
+ (x, signgamp)
}
}
diff --git a/library/std/src/f64/tests.rs b/library/std/src/f64/tests.rs
index 53d351cce..f88d01593 100644
--- a/library/std/src/f64/tests.rs
+++ b/library/std/src/f64/tests.rs
@@ -636,6 +636,38 @@ fn test_atanh() {
}
#[test]
+fn test_gamma() {
+ // precision can differ between platforms
+ assert_approx_eq!(1.0f64.gamma(), 1.0f64);
+ assert_approx_eq!(2.0f64.gamma(), 1.0f64);
+ assert_approx_eq!(3.0f64.gamma(), 2.0f64);
+ assert_approx_eq!(4.0f64.gamma(), 6.0f64);
+ assert_approx_eq!(5.0f64.gamma(), 24.0f64);
+ assert_approx_eq!(0.5f64.gamma(), consts::PI.sqrt());
+ assert_approx_eq!((-0.5f64).gamma(), -2.0 * consts::PI.sqrt());
+ assert_eq!(0.0f64.gamma(), f64::INFINITY);
+ assert_eq!((-0.0f64).gamma(), f64::NEG_INFINITY);
+ assert!((-1.0f64).gamma().is_nan());
+ assert!((-2.0f64).gamma().is_nan());
+ assert!(f64::NAN.gamma().is_nan());
+ assert!(f64::NEG_INFINITY.gamma().is_nan());
+ assert_eq!(f64::INFINITY.gamma(), f64::INFINITY);
+ assert_eq!(171.71f64.gamma(), f64::INFINITY);
+}
+
+#[test]
+fn test_ln_gamma() {
+ assert_approx_eq!(1.0f64.ln_gamma().0, 0.0f64);
+ assert_eq!(1.0f64.ln_gamma().1, 1);
+ assert_approx_eq!(2.0f64.ln_gamma().0, 0.0f64);
+ assert_eq!(2.0f64.ln_gamma().1, 1);
+ assert_approx_eq!(3.0f64.ln_gamma().0, 2.0f64.ln());
+ assert_eq!(3.0f64.ln_gamma().1, 1);
+ assert_approx_eq!((-0.5f64).ln_gamma().0, (2.0 * consts::PI.sqrt()).ln());
+ assert_eq!((-0.5f64).ln_gamma().1, -1);
+}
+
+#[test]
fn test_real_consts() {
use super::consts;
let pi: f64 = consts::PI;
diff --git a/library/std/src/ffi/mod.rs b/library/std/src/ffi/mod.rs
index 3ddb87487..ee9f6ed08 100644
--- a/library/std/src/ffi/mod.rs
+++ b/library/std/src/ffi/mod.rs
@@ -156,6 +156,8 @@
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub use alloc::ffi::{CString, FromVecWithNulError, IntoStringError, NulError};
+#[stable(feature = "cstr_from_bytes_until_nul", since = "1.73.0")]
+pub use core::ffi::FromBytesUntilNulError;
#[stable(feature = "core_c_str", since = "1.64.0")]
pub use core::ffi::{CStr, FromBytesWithNulError};
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index e7bad9d54..43cecb19b 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -110,12 +110,12 @@ impl crate::sealed::Sealed for OsString {}
/// [conversions]: super#conversions
#[cfg_attr(not(test), rustc_diagnostic_item = "OsStr")]
#[stable(feature = "rust1", since = "1.0.0")]
-// FIXME:
// `OsStr::from_inner` current implementation relies
// on `OsStr` being layout-compatible with `Slice`.
-// When attribute privacy is implemented, `OsStr` should be annotated as `#[repr(transparent)]`.
-// Anyway, `OsStr` representation and layout are considered implementation details, are
-// not documented and must not be relied upon.
+// However, `OsStr` layout is considered an implementation detail and must not be relied upon. We
+// want `repr(transparent)` but we don't want it to show up in rustdoc, so we hide it under
+// `cfg(doc)`. This is an ad-hoc implementation of attribute privacy.
+#[cfg_attr(not(doc), repr(transparent))]
pub struct OsStr {
inner: Slice,
}
@@ -141,6 +141,51 @@ impl OsString {
OsString { inner: Buf::from_string(String::new()) }
}
+ /// Converts bytes to an `OsString` without checking that the bytes contains
+ /// valid [`OsStr`]-encoded data.
+ ///
+ /// The byte encoding is an unspecified, platform-specific, self-synchronizing superset of UTF-8.
+ /// By being a self-synchronizing superset of UTF-8, this encoding is also a superset of 7-bit
+ /// ASCII.
+ ///
+ /// See the [module's toplevel documentation about conversions][conversions] for safe,
+ /// cross-platform [conversions] from/to native representations.
+ ///
+ /// # Safety
+ ///
+ /// As the encoding is unspecified, callers must pass in bytes that originated as a mixture of
+ /// validated UTF-8 and bytes from [`OsStr::as_os_str_bytes`] from within the same rust version
+ /// built for the same target platform. For example, reconstructing an `OsString` from bytes sent
+ /// over the network or stored in a file will likely violate these safety rules.
+ ///
+ /// Due to the encoding being self-synchronizing, the bytes from [`OsStr::as_os_str_bytes`] can be
+ /// split either immediately before or immediately after any valid non-empty UTF-8 substring.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(os_str_bytes)]
+ ///
+ /// use std::ffi::OsStr;
+ ///
+ /// let os_str = OsStr::new("Mary had a little lamb");
+ /// let bytes = os_str.as_os_str_bytes();
+ /// let words = bytes.split(|b| *b == b' ');
+ /// let words: Vec<&OsStr> = words.map(|word| {
+ /// // SAFETY:
+ /// // - Each `word` only contains content that originated from `OsStr::as_os_str_bytes`
+ /// // - Only split with ASCII whitespace which is a non-empty UTF-8 substring
+ /// unsafe { OsStr::from_os_str_bytes_unchecked(word) }
+ /// }).collect();
+ /// ```
+ ///
+ /// [conversions]: super#conversions
+ #[inline]
+ #[unstable(feature = "os_str_bytes", issue = "111544")]
+ pub unsafe fn from_os_str_bytes_unchecked(bytes: Vec<u8>) -> Self {
+ OsString { inner: Buf::from_os_str_bytes_unchecked(bytes) }
+ }
+
/// Converts to an [`OsStr`] slice.
///
/// # Examples
@@ -159,6 +204,26 @@ impl OsString {
self
}
+ /// Converts the `OsString` into a byte slice. To convert the byte slice back into an
+ /// `OsString`, use the [`OsStr::from_os_str_bytes_unchecked`] function.
+ ///
+ /// The byte encoding is an unspecified, platform-specific, self-synchronizing superset of UTF-8.
+ /// By being a self-synchronizing superset of UTF-8, this encoding is also a superset of 7-bit
+ /// ASCII.
+ ///
+ /// Note: As the encoding is unspecified, any sub-slice of bytes that is not valid UTF-8 should
+ /// be treated as opaque and only comparable within the same rust version built for the same
+ /// target platform. For example, sending the bytes over the network or storing it in a file
+ /// will likely result in incompatible data. See [`OsString`] for more encoding details
+ /// and [`std::ffi`] for platform-specific, specified conversions.
+ ///
+ /// [`std::ffi`]: crate::ffi
+ #[inline]
+ #[unstable(feature = "os_str_bytes", issue = "111544")]
+ pub fn into_os_str_bytes(self) -> Vec<u8> {
+ self.inner.into_os_str_bytes()
+ }
+
/// Converts the `OsString` into a [`String`] if it contains valid Unicode data.
///
/// On failure, ownership of the original `OsString` is returned.
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index c2d82169d..4094e3780 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -16,6 +16,7 @@ use crate::fmt;
use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use crate::path::{Path, PathBuf};
use crate::sealed::Sealed;
+use crate::sync::Arc;
use crate::sys::fs as fs_imp;
use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
use crate::time::SystemTime;
@@ -743,7 +744,7 @@ fn buffer_capacity_required(mut file: &File) -> Option<usize> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl Read for File {
+impl Read for &File {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
@@ -776,7 +777,7 @@ impl Read for File {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl Write for File {
+impl Write for &File {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
@@ -790,72 +791,107 @@ impl Write for File {
self.inner.is_write_vectored()
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl Seek for File {
+impl Seek for &File {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.inner.seek(pos)
}
}
+
#[stable(feature = "rust1", since = "1.0.0")]
-impl Read for &File {
+impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- self.inner.read(buf)
+ (&*self).read(buf)
+ }
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ (&*self).read_vectored(bufs)
}
-
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
- self.inner.read_buf(cursor)
+ (&*self).read_buf(cursor)
+ }
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ (&&*self).is_read_vectored()
}
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ (&*self).read_to_end(buf)
+ }
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ (&*self).read_to_string(buf)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Write for File {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (&*self).write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ (&*self).write_vectored(bufs)
+ }
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ (&&*self).is_write_vectored()
+ }
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ (&*self).flush()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Seek for File {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ (&*self).seek(pos)
+ }
+}
+#[stable(feature = "io_traits_arc", since = "1.73.0")]
+impl Read for Arc<File> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (&**self).read(buf)
+ }
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
- self.inner.read_vectored(bufs)
+ (&**self).read_vectored(bufs)
+ }
+ fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ (&**self).read_buf(cursor)
}
-
#[inline]
fn is_read_vectored(&self) -> bool {
- self.inner.is_read_vectored()
+ (&**self).is_read_vectored()
}
-
- // Reserves space in the buffer based on the file size when available.
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
- let size = buffer_capacity_required(self);
- buf.reserve(size.unwrap_or(0));
- io::default_read_to_end(self, buf, size)
+ (&**self).read_to_end(buf)
}
-
- // Reserves space in the buffer based on the file size when available.
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
- let size = buffer_capacity_required(self);
- buf.reserve(size.unwrap_or(0));
- io::default_read_to_string(self, buf, size)
+ (&**self).read_to_string(buf)
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Write for &File {
+#[stable(feature = "io_traits_arc", since = "1.73.0")]
+impl Write for Arc<File> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- self.inner.write(buf)
+ (&**self).write(buf)
}
-
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
- self.inner.write_vectored(bufs)
+ (&**self).write_vectored(bufs)
}
-
#[inline]
fn is_write_vectored(&self) -> bool {
- self.inner.is_write_vectored()
+ (&**self).is_write_vectored()
}
-
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
- self.inner.flush()
+ (&**self).flush()
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Seek for &File {
+#[stable(feature = "io_traits_arc", since = "1.73.0")]
+impl Seek for Arc<File> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
- self.inner.seek(pos)
+ (&**self).seek(pos)
}
}
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index 9ff01b9c3..d74f0f00e 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -957,6 +957,7 @@ fn readlink_not_symlink() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating hardlinks
fn links_work() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
@@ -1453,6 +1454,7 @@ fn metadata_access_times() {
/// Test creating hard links to symlinks.
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating hardlinks
fn symlink_hard_link() {
let tmpdir = tmpdir();
if !got_symlink_permission(&tmpdir) {
diff --git a/library/std/src/io/copy.rs b/library/std/src/io/copy.rs
index ef1f4031e..3322940d2 100644
--- a/library/std/src/io/copy.rs
+++ b/library/std/src/io/copy.rs
@@ -1,4 +1,8 @@
use super::{BorrowedBuf, BufReader, BufWriter, ErrorKind, Read, Result, Write, DEFAULT_BUF_SIZE};
+use crate::alloc::Allocator;
+use crate::cmp;
+use crate::collections::VecDeque;
+use crate::io::IoSlice;
use crate::mem::MaybeUninit;
#[cfg(test)]
@@ -86,7 +90,7 @@ where
/// Specialization of the read-write loop that reuses the internal
/// buffer of a BufReader. If there's no buffer then the writer side
-/// should be used intead.
+/// should be used instead.
trait BufferedReaderSpec {
fn buffer_size(&self) -> usize;
@@ -104,7 +108,39 @@ where
}
default fn copy_to(&mut self, _to: &mut (impl Write + ?Sized)) -> Result<u64> {
- unimplemented!("only called from specializations");
+ unreachable!("only called from specializations")
+ }
+}
+
+impl BufferedReaderSpec for &[u8] {
+ fn buffer_size(&self) -> usize {
+ // prefer this specialization since the source "buffer" is all we'll ever need,
+ // even if it's small
+ usize::MAX
+ }
+
+ fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
+ let len = self.len();
+ to.write_all(self)?;
+ *self = &self[len..];
+ Ok(len as u64)
+ }
+}
+
+impl<A: Allocator> BufferedReaderSpec for VecDeque<u8, A> {
+ fn buffer_size(&self) -> usize {
+ // prefer this specialization since the source "buffer" is all we'll ever need,
+ // even if it's small
+ usize::MAX
+ }
+
+ fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
+ let len = self.len();
+ let (front, back) = self.as_slices();
+ let bufs = &mut [IoSlice::new(front), IoSlice::new(back)];
+ to.write_all_vectored(bufs)?;
+ self.clear();
+ Ok(len as u64)
}
}
@@ -218,6 +254,47 @@ impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
}
}
+impl<A: Allocator> BufferedWriterSpec for Vec<u8, A> {
+ fn buffer_size(&self) -> usize {
+ cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
+ }
+
+ fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
+ let mut bytes = 0;
+
+ // avoid allocating before we have determined that there's anything to read
+ if self.capacity() == 0 {
+ bytes = stack_buffer_copy(&mut reader.take(DEFAULT_BUF_SIZE as u64), self)?;
+ if bytes == 0 {
+ return Ok(0);
+ }
+ }
+
+ loop {
+ self.reserve(DEFAULT_BUF_SIZE);
+ let mut buf: BorrowedBuf<'_> = self.spare_capacity_mut().into();
+ match reader.read_buf(buf.unfilled()) {
+ Ok(()) => {}
+ Err(e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ };
+
+ let read = buf.filled().len();
+ if read == 0 {
+ break;
+ }
+
+ // SAFETY: BorrowedBuf guarantees all of its filled bytes are init
+ // and the number of read bytes can't exceed the spare capacity since
+ // that's what the buffer is borrowing from.
+ unsafe { self.set_len(self.len() + read) };
+ bytes += read as u64;
+ }
+
+ Ok(bytes)
+ }
+}
+
fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
diff --git a/library/std/src/io/copy/tests.rs b/library/std/src/io/copy/tests.rs
index 8c816af15..af137eaf8 100644
--- a/library/std/src/io/copy/tests.rs
+++ b/library/std/src/io/copy/tests.rs
@@ -1,4 +1,6 @@
use crate::cmp::{max, min};
+use crate::collections::VecDeque;
+use crate::io;
use crate::io::*;
#[test]
@@ -19,7 +21,7 @@ struct ShortReader {
impl Read for ShortReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
- let bytes = min(self.cap, self.read_size);
+ let bytes = min(self.cap, self.read_size).min(buf.len());
self.cap -= bytes;
self.observed_buffer = max(self.observed_buffer, buf.len());
Ok(bytes)
@@ -78,6 +80,40 @@ fn copy_specializes_bufreader() {
);
}
+#[test]
+fn copy_specializes_to_vec() {
+ let cap = 123456;
+ let mut source = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
+ let mut sink = Vec::new();
+ assert_eq!(cap as u64, io::copy(&mut source, &mut sink).unwrap());
+ assert!(
+ source.observed_buffer > DEFAULT_BUF_SIZE,
+ "expected a large buffer to be provided to the reader"
+ );
+}
+
+#[test]
+fn copy_specializes_from_vecdeque() {
+ let mut source = VecDeque::with_capacity(100 * 1024);
+ for _ in 0..20 * 1024 {
+ source.push_front(0);
+ }
+ for _ in 0..20 * 1024 {
+ source.push_back(0);
+ }
+ let mut sink = WriteObserver { observed_buffer: 0 };
+ assert_eq!(40 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
+ assert_eq!(20 * 1024, sink.observed_buffer);
+}
+
+#[test]
+fn copy_specializes_from_slice() {
+ let mut source = [1; 60 * 1024].as_slice();
+ let mut sink = WriteObserver { observed_buffer: 0 };
+ assert_eq!(60 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
+ assert_eq!(60 * 1024, sink.observed_buffer);
+}
+
#[cfg(unix)]
mod io_benches {
use crate::fs::File;
diff --git a/library/std/src/io/readbuf.rs b/library/std/src/io/readbuf.rs
index 4800eeda0..034ddd8df 100644
--- a/library/std/src/io/readbuf.rs
+++ b/library/std/src/io/readbuf.rs
@@ -99,6 +99,13 @@ impl<'data> BorrowedBuf<'data> {
unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) }
}
+ /// Returns a mutable reference to the filled portion of the buffer.
+ #[inline]
+ pub fn filled_mut(&mut self) -> &mut [u8] {
+ // SAFETY: We only slice the filled part of the buffer, which is always valid
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[0..self.filled]) }
+ }
+
/// Returns a cursor over the unfilled part of the buffer.
#[inline]
pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> {
@@ -303,6 +310,7 @@ impl<'a> Write for BorrowedCursor<'a> {
Ok(buf.len())
}
+ #[inline]
fn flush(&mut self) -> Result<()> {
Ok(())
}
diff --git a/library/std/src/io/util.rs b/library/std/src/io/util.rs
index f076ee092..6bc8f181c 100644
--- a/library/std/src/io/util.rs
+++ b/library/std/src/io/util.rs
@@ -8,24 +8,41 @@ use crate::io::{
self, BorrowedCursor, BufRead, IoSlice, IoSliceMut, Read, Seek, SeekFrom, SizeHint, Write,
};
-/// A reader which is always at EOF.
+/// `Empty` ignores any data written via [`Write`], and will always be empty
+/// (returning zero bytes) when read via [`Read`].
///
-/// This struct is generally created by calling [`empty()`]. Please see
-/// the documentation of [`empty()`] for more details.
+/// This struct is generally created by calling [`empty()`]. Please
+/// see the documentation of [`empty()`] for more details.
#[stable(feature = "rust1", since = "1.0.0")]
#[non_exhaustive]
-#[derive(Copy, Clone, Default)]
+#[derive(Copy, Clone, Debug, Default)]
pub struct Empty;
-/// Constructs a new handle to an empty reader.
+/// Creates a value that is always at EOF for reads, and ignores all data written.
///
-/// All reads from the returned reader will return <code>[Ok]\(0)</code>.
+/// All calls to [`write`] on the returned instance will return [`Ok(buf.len())`]
+/// and the contents of the buffer will not be inspected.
+///
+/// All calls to [`read`] from the returned reader will return [`Ok(0)`].
+///
+/// [`Ok(buf.len())`]: Ok
+/// [`Ok(0)`]: Ok
+///
+/// [`write`]: Write::write
+/// [`read`]: Read::read
///
/// # Examples
///
-/// A slightly sad example of not reading anything into a buffer:
+/// ```rust
+/// use std::io::{self, Write};
///
+/// let buffer = vec![1, 2, 3, 5, 8];
+/// let num_bytes = io::empty().write(&buffer).unwrap();
+/// assert_eq!(num_bytes, 5);
/// ```
+///
+///
+/// ```rust
/// use std::io::{self, Read};
///
/// let mut buffer = String::new();
@@ -76,13 +93,6 @@ impl Seek for Empty {
}
}
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl fmt::Debug for Empty {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Empty").finish_non_exhaustive()
- }
-}
-
impl SizeHint for Empty {
#[inline]
fn upper_bound(&self) -> Option<usize> {
@@ -90,6 +100,54 @@ impl SizeHint for Empty {
}
}
+#[stable(feature = "empty_write", since = "1.73.0")]
+impl Write for Empty {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total_len = bufs.iter().map(|b| b.len()).sum();
+ Ok(total_len)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "empty_write", since = "1.73.0")]
+impl Write for &Empty {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ let total_len = bufs.iter().map(|b| b.len()).sum();
+ Ok(total_len)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
/// A reader which yields one byte over and over and over and over and over and...
///
/// This struct is generally created by calling [`repeat()`]. Please
@@ -182,19 +240,20 @@ impl fmt::Debug for Repeat {
/// A writer which will move data into the void.
///
-/// This struct is generally created by calling [`sink`]. Please
+/// This struct is generally created by calling [`sink()`]. Please
/// see the documentation of [`sink()`] for more details.
#[stable(feature = "rust1", since = "1.0.0")]
#[non_exhaustive]
-#[derive(Copy, Clone, Default)]
+#[derive(Copy, Clone, Debug, Default)]
pub struct Sink;
/// Creates an instance of a writer which will successfully consume all data.
///
-/// All calls to [`write`] on the returned instance will return `Ok(buf.len())`
+/// All calls to [`write`] on the returned instance will return [`Ok(buf.len())`]
/// and the contents of the buffer will not be inspected.
///
/// [`write`]: Write::write
+/// [`Ok(buf.len())`]: Ok
///
/// # Examples
///
@@ -259,10 +318,3 @@ impl Write for &Sink {
Ok(())
}
}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl fmt::Debug for Sink {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Sink").finish_non_exhaustive()
- }
-}
diff --git a/library/std/src/io/util/tests.rs b/library/std/src/io/util/tests.rs
index 1baa94e64..6de91e29c 100644
--- a/library/std/src/io/util/tests.rs
+++ b/library/std/src/io/util/tests.rs
@@ -18,7 +18,7 @@ fn empty_reads() {
assert_eq!(e.read(&mut []).unwrap(), 0);
assert_eq!(e.read(&mut [0]).unwrap(), 0);
assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
- assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0);
+ assert_eq!(Read::by_ref(&mut e).read(&mut [0; 1024]).unwrap(), 0);
let buf: &mut [MaybeUninit<_>] = &mut [];
let mut buf: BorrowedBuf<'_> = buf.into();
@@ -40,7 +40,7 @@ fn empty_reads() {
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
let mut buf: BorrowedBuf<'_> = buf.into();
- e.by_ref().read_buf(buf.unfilled()).unwrap();
+ Read::by_ref(&mut e).read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.len(), 0);
assert_eq!(buf.init_len(), 0);
}
@@ -66,6 +66,15 @@ fn empty_seeks() {
}
#[test]
+fn empty_sinks() {
+ let mut e = empty();
+ assert_eq!(e.write(&[]).unwrap(), 0);
+ assert_eq!(e.write(&[0]).unwrap(), 1);
+ assert_eq!(e.write(&[0; 1024]).unwrap(), 1024);
+ assert_eq!(Write::by_ref(&mut e).write(&[0; 1024]).unwrap(), 1024);
+}
+
+#[test]
fn repeat_repeats() {
let mut r = repeat(4);
let mut b = [0; 1024];
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 72b9ad348..0ccbb16b1 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -190,7 +190,7 @@
// To run std tests without x.py without ending up with two copies of std, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
+// rustc itself never sets the feature, so this line has no effect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
// miri-test-libstd also prefers to make std use the sysroot versions of the dependencies.
#![cfg_attr(feature = "miri-test-libstd", feature(rustc_private))]
@@ -220,8 +220,10 @@
#![warn(missing_debug_implementations)]
#![allow(explicit_outlives_requirements)]
#![allow(unused_lifetimes)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
#![deny(rustc::existing_doc_keyword)]
#![deny(fuzzy_provenance_casts)]
+#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
#![deny(ffi_unwind_calls)]
// std may use features in a platform-specific way
@@ -272,6 +274,7 @@
#![feature(staged_api)]
#![feature(thread_local)]
#![feature(try_blocks)]
+#![feature(type_alias_impl_trait)]
#![feature(utf8_chunks)]
// tidy-alphabetical-end
//
@@ -286,16 +289,17 @@
#![feature(exact_size_is_empty)]
#![feature(exclusive_wrapper)]
#![feature(extend_one)]
+#![feature(float_gamma)]
#![feature(float_minimum_maximum)]
#![feature(float_next_up_down)]
#![feature(hasher_prefixfree_extras)]
#![feature(hashmap_internals)]
-#![feature(int_roundings)]
#![feature(ip)]
#![feature(ip_in_core)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_write_slice)]
+#![feature(offset_of)]
#![feature(panic_can_unwind)]
#![feature(panic_info_message)]
#![feature(panic_internals)]
@@ -303,7 +307,6 @@
#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(prelude_2024)]
-#![feature(provide_any)]
#![feature(ptr_as_uninit)]
#![feature(raw_os_nonzero)]
#![feature(round_ties_even)]
@@ -393,9 +396,15 @@ extern crate libc;
#[allow(unused_extern_crates)]
extern crate unwind;
+// FIXME: #94122 this extern crate definition only exist here to stop
+// miniz_oxide docs leaking into std docs. Find better way to do it.
+// Remove exclusion from tidy platform check when this removed.
#[doc(masked)]
#[allow(unused_extern_crates)]
-#[cfg(feature = "miniz_oxide")]
+#[cfg(all(
+ not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))),
+ feature = "miniz_oxide"
+))]
extern crate miniz_oxide;
// During testing, this crate is not actually the "real" std library, but rather
@@ -542,7 +551,7 @@ pub mod time;
// Pull in `std_float` crate into std. The contents of
// `std_float` are in a different repository: rust-lang/portable-simd.
#[path = "../../portable-simd/crates/std_float/src/lib.rs"]
-#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn)]
#[allow(rustdoc::bare_urls)]
#[unstable(feature = "portable_simd", issue = "86656")]
mod std_float;
@@ -603,7 +612,6 @@ pub mod alloc;
// Private support modules
mod panicking;
-mod personality;
#[path = "../../backtrace/src/lib.rs"]
#[allow(dead_code, unused_attributes, fuzzy_provenance_casts)]
diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs
index 141a18a42..32fd54c8e 100644
--- a/library/std/src/net/tcp.rs
+++ b/library/std/src/net/tcp.rs
@@ -647,6 +647,7 @@ impl Write for TcpStream {
self.0.is_write_vectored()
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
@@ -685,6 +686,7 @@ impl Write for &TcpStream {
self.0.is_write_vectored()
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
diff --git a/library/std/src/os/android/raw.rs b/library/std/src/os/android/raw.rs
index a255d0320..175f8eac9 100644
--- a/library/std/src/os/android/raw.rs
+++ b/library/std/src/os/android/raw.rs
@@ -21,26 +21,26 @@ pub use self::arch::{blkcnt_t, blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t,
#[cfg(any(target_arch = "arm", target_arch = "x86"))]
mod arch {
- use crate::os::raw::{c_longlong, c_uchar, c_uint, c_ulong, c_ulonglong};
+ use crate::os::raw::{c_long, c_longlong, c_uchar, c_uint, c_ulong, c_ulonglong};
use crate::os::unix::raw::{gid_t, uid_t};
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type dev_t = u64;
+ pub type dev_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type mode_t = u32;
+ pub type mode_t = c_uint;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blkcnt_t = u64;
+ pub type blkcnt_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blksize_t = u64;
+ pub type blksize_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type ino_t = u64;
+ pub type ino_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type nlink_t = u64;
+ pub type nlink_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type off_t = u64;
+ pub type off_t = i32;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type time_t = i64;
+ pub type time_t = c_long;
#[repr(C)]
#[derive(Clone)]
@@ -70,45 +70,47 @@ mod arch {
pub st_blksize: u32,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_blocks: c_ulonglong,
+
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: c_ulong,
+ pub st_atime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: c_ulong,
+ pub st_atime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: c_ulong,
+ pub st_mtime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: c_ulong,
+ pub st_mtime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: c_ulong,
+ pub st_ctime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: c_ulong,
+ pub st_ctime_nsec: c_long,
+
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_ino: c_ulonglong,
}
}
-#[cfg(target_arch = "aarch64")]
+#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
mod arch {
- use crate::os::raw::{c_uchar, c_ulong};
+ use crate::os::raw::{c_int, c_long, c_uint, c_ulong};
use crate::os::unix::raw::{gid_t, uid_t};
#[stable(feature = "raw_ext", since = "1.1.0")]
pub type dev_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type mode_t = u32;
+ pub type mode_t = c_uint;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blkcnt_t = u64;
+ pub type blkcnt_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blksize_t = u64;
+ pub type blksize_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type ino_t = u64;
+ pub type ino_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type nlink_t = u64;
+ pub type nlink_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type off_t = u64;
+ pub type off_t = i64;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type time_t = i64;
+ pub type time_t = c_long;
#[repr(C)]
#[derive(Clone)]
@@ -117,9 +119,7 @@ mod arch {
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_dev: dev_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub __pad0: [c_uchar; 4],
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub __st_ino: ino_t,
+ pub st_ino: ino_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_mode: mode_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
@@ -131,27 +131,33 @@ mod arch {
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_rdev: dev_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub __pad3: [c_uchar; 4],
+ pub __pad1: c_ulong,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_size: off_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blksize: blksize_t,
+ pub st_blksize: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_int,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blocks: blkcnt_t,
+ pub st_blocks: c_long,
+
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_atime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: c_ulong,
+ pub st_atime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_mtime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: c_ulong,
+ pub st_mtime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_ctime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: c_ulong,
+ pub st_ctime_nsec: c_long,
+
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ino: ino_t,
+ pub __unused4: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused5: c_uint,
}
}
@@ -163,20 +169,20 @@ mod arch {
#[stable(feature = "raw_ext", since = "1.1.0")]
pub type dev_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type mode_t = u32;
+ pub type mode_t = c_uint;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blkcnt_t = u64;
+ pub type blkcnt_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type blksize_t = u64;
+ pub type blksize_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type ino_t = u64;
+ pub type ino_t = c_ulong;
#[stable(feature = "raw_ext", since = "1.1.0")]
pub type nlink_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type off_t = u64;
+ pub type off_t = i64;
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub type time_t = i64;
+ pub type time_t = c_long;
#[repr(C)]
#[derive(Clone)]
@@ -195,25 +201,30 @@ mod arch {
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_gid: gid_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
pub st_rdev: dev_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_size: i64,
+ pub st_size: off_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_blksize: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_blocks: c_long,
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: c_ulong,
+ pub st_atime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: c_ulong,
+ pub st_mtime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: c_ulong,
+ pub st_mtime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: c_ulong,
+ pub st_ctime: time_t,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: c_ulong,
+ pub st_ctime_nsec: c_long,
+
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: c_ulong,
- __unused: [c_long; 3],
+ pub __pad3: [c_long; 3],
}
}
diff --git a/library/std/src/os/l4re/raw.rs b/library/std/src/os/l4re/raw.rs
index b3f7439f8..12c029328 100644
--- a/library/std/src/os/l4re/raw.rs
+++ b/library/std/src/os/l4re/raw.rs
@@ -27,6 +27,7 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
#[cfg(any(
target_arch = "x86",
target_arch = "m68k",
+ target_arch = "csky",
target_arch = "powerpc",
target_arch = "sparc",
target_arch = "arm",
diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs
index c55ca8ba2..a568f9b26 100644
--- a/library/std/src/os/linux/raw.rs
+++ b/library/std/src/os/linux/raw.rs
@@ -27,6 +27,7 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
#[cfg(any(
target_arch = "x86",
target_arch = "m68k",
+ target_arch = "csky",
target_arch = "powerpc",
target_arch = "sparc",
target_arch = "arm",
@@ -94,7 +95,7 @@ mod arch {
}
}
-#[cfg(target_arch = "mips")]
+#[cfg(any(target_arch = "mips", target_arch = "mips32r6"))]
mod arch {
use crate::os::raw::{c_long, c_ulong};
@@ -233,6 +234,7 @@ mod arch {
#[cfg(any(
target_arch = "loongarch64",
target_arch = "mips64",
+ target_arch = "mips64r6",
target_arch = "s390x",
target_arch = "sparc64",
target_arch = "riscv64",
diff --git a/library/std/src/os/raw/mod.rs b/library/std/src/os/raw/mod.rs
index 19d0ffb2e..5b302e3c2 100644
--- a/library/std/src/os/raw/mod.rs
+++ b/library/std/src/os/raw/mod.rs
@@ -9,11 +9,6 @@ macro_rules! alias_core_ffi {
($($t:ident)*) => {$(
#[stable(feature = "raw_os", since = "1.1.0")]
#[doc = include_str!(concat!("../../../../core/src/ffi/", stringify!($t), ".md"))]
- // Make this type alias appear cfg-dependent so that Clippy does not suggest
- // replacing expressions like `0 as c_char` with `0_i8`/`0_u8`. This #[cfg(all())] can be
- // removed after the false positive in https://github.com/rust-lang/rust-clippy/issues/8093
- // is fixed.
- #[cfg(all())]
#[doc(cfg(all()))]
pub type $t = core::ffi::$t;
)*}
diff --git a/library/std/src/os/unix/fs.rs b/library/std/src/os/unix/fs.rs
index 1e1c36931..029de8fbf 100644
--- a/library/std/src/os/unix/fs.rs
+++ b/library/std/src/os/unix/fs.rs
@@ -149,7 +149,36 @@ pub trait FileExt {
/// Note that similar to [`File::write`], it is not an error to return a
/// short write.
///
+ /// # Bug
+ /// On some systems, `write_at` utilises [`pwrite64`] to write to files.
+ /// However, this syscall has a [bug] where files opened with the `O_APPEND`
+ /// flag fail to respect the offset parameter, always appending to the end
+ /// of the file instead.
+ ///
+ /// It is possible to inadvertantly set this flag, like in the example below.
+ /// Therefore, it is important to be vigilant while changing options to mitigate
+ /// unexpected behaviour.
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io;
+ /// use std::os::unix::prelude::FileExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// // Open a file with the append option (sets the `O_APPEND` flag)
+ /// let file = File::options().append(true).open("foo.txt")?;
+ ///
+ /// // We attempt to write at offset 10; instead appended to EOF
+ /// file.write_at(b"sushi", 10)?;
+ ///
+ /// // foo.txt is 5 bytes long instead of 15
+ /// Ok(())
+ /// }
+ /// ```
+ ///
/// [`File::write`]: fs::File::write
+ /// [`pwrite64`]: https://man7.org/linux/man-pages/man2/pwrite.2.html
+ /// [bug]: https://man7.org/linux/man-pages/man2/pwrite.2.html#BUGS
///
/// # Examples
///
@@ -159,7 +188,7 @@ pub trait FileExt {
/// use std::os::unix::prelude::FileExt;
///
/// fn main() -> io::Result<()> {
- /// let file = File::open("foo.txt")?;
+ /// let file = File::create("foo.txt")?;
///
/// // We now write at the offset 10.
/// file.write_at(b"sushi", 10)?;
@@ -971,7 +1000,6 @@ impl DirBuilderExt for fs::DirBuilder {
/// # Examples
///
/// ```no_run
-/// #![feature(unix_chown)]
/// use std::os::unix::fs;
///
/// fn main() -> std::io::Result<()> {
@@ -979,7 +1007,7 @@ impl DirBuilderExt for fs::DirBuilder {
/// Ok(())
/// }
/// ```
-#[unstable(feature = "unix_chown", issue = "88989")]
+#[stable(feature = "unix_chown", since = "1.73.0")]
pub fn chown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
sys::fs::chown(dir.as_ref(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
}
@@ -991,7 +1019,6 @@ pub fn chown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::
/// # Examples
///
/// ```no_run
-/// #![feature(unix_chown)]
/// use std::os::unix::fs;
///
/// fn main() -> std::io::Result<()> {
@@ -1000,7 +1027,7 @@ pub fn chown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::
/// Ok(())
/// }
/// ```
-#[unstable(feature = "unix_chown", issue = "88989")]
+#[stable(feature = "unix_chown", since = "1.73.0")]
pub fn fchown<F: AsFd>(fd: F, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
sys::fs::fchown(fd.as_fd().as_raw_fd(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
}
@@ -1013,7 +1040,6 @@ pub fn fchown<F: AsFd>(fd: F, uid: Option<u32>, gid: Option<u32>) -> io::Result<
/// # Examples
///
/// ```no_run
-/// #![feature(unix_chown)]
/// use std::os::unix::fs;
///
/// fn main() -> std::io::Result<()> {
@@ -1021,7 +1047,7 @@ pub fn fchown<F: AsFd>(fd: F, uid: Option<u32>, gid: Option<u32>) -> io::Result<
/// Ok(())
/// }
/// ```
-#[unstable(feature = "unix_chown", issue = "88989")]
+#[stable(feature = "unix_chown", since = "1.73.0")]
pub fn lchown<P: AsRef<Path>>(dir: P, uid: Option<u32>, gid: Option<u32>) -> io::Result<()> {
sys::fs::lchown(dir.as_ref(), uid.unwrap_or(u32::MAX), gid.unwrap_or(u32::MAX))
}
diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs
index e20170873..41290e001 100644
--- a/library/std/src/os/unix/net/stream.rs
+++ b/library/std/src/os/unix/net/stream.rs
@@ -712,6 +712,7 @@ impl<'a> io::Write for &'a UnixStream {
self.0.is_write_vectored()
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs
index 39f10c50d..3d4302e66 100644
--- a/library/std/src/os/unix/net/tests.rs
+++ b/library/std/src/os/unix/net/tests.rs
@@ -23,6 +23,7 @@ macro_rules! or_panic {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn basic() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -93,6 +94,7 @@ fn pair() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn try_clone() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -119,6 +121,7 @@ fn try_clone() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn iter() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -168,6 +171,7 @@ fn long_path() {
#[test]
#[cfg(not(target_os = "nto"))]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn timeouts() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -195,6 +199,7 @@ fn timeouts() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_read_timeout() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -214,6 +219,7 @@ fn test_read_timeout() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_read_with_timeout() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -241,6 +247,7 @@ fn test_read_with_timeout() {
// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
// when passed zero Durations
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_stream_timeout_zero_duration() {
let dir = tmpdir();
let socket_path = dir.path().join("sock");
@@ -260,6 +267,7 @@ fn test_unix_stream_timeout_zero_duration() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
@@ -276,6 +284,7 @@ fn test_unix_datagram() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unnamed_unix_datagram() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
@@ -293,6 +302,7 @@ fn test_unnamed_unix_datagram() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram_connect_to_recv_addr() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
@@ -317,6 +327,7 @@ fn test_unix_datagram_connect_to_recv_addr() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_connect_unix_datagram() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
@@ -343,6 +354,7 @@ fn test_connect_unix_datagram() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram_recv() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
@@ -385,6 +397,7 @@ fn datagram_pair() {
// Ensure the `set_read_timeout` and `set_write_timeout` calls return errors
// when passed zero Durations
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram_timeout_zero_duration() {
let dir = tmpdir();
let path = dir.path().join("sock");
@@ -529,6 +542,7 @@ fn test_abstract_no_pathname_and_not_unnamed() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_stream_peek() {
let (txdone, rxdone) = crate::sync::mpsc::channel();
@@ -561,6 +575,7 @@ fn test_unix_stream_peek() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram_peek() {
let dir = tmpdir();
let path1 = dir.path().join("sock");
@@ -585,6 +600,7 @@ fn test_unix_datagram_peek() {
}
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_unix_datagram_peek_from() {
let dir = tmpdir();
let path1 = dir.path().join("sock");
@@ -648,6 +664,7 @@ fn test_send_vectored_fds_unix_stream() {
#[cfg(any(target_os = "android", target_os = "linux", target_os = "freebsd"))]
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_send_vectored_with_ancillary_to_unix_datagram() {
fn getpid() -> libc::pid_t {
unsafe { libc::getpid() }
@@ -715,6 +732,7 @@ fn test_send_vectored_with_ancillary_to_unix_datagram() {
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
+#[cfg_attr(target_os = "android", ignore)] // Android SELinux rules prevent creating Unix sockets
fn test_send_vectored_with_ancillary_unix_datagram() {
let dir = tmpdir();
let path1 = dir.path().join("sock1");
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index a6a370409..a0c21f704 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -234,6 +234,7 @@ where
*hook = Hook::Custom(Box::new(move |info| hook_fn(&prev, info)));
}
+/// The default panic handler.
fn default_hook(info: &PanicInfo<'_>) {
// If this is a double panic, make sure that we print a backtrace
// for this panic. Otherwise only print it if logging is enabled.
@@ -257,7 +258,7 @@ fn default_hook(info: &PanicInfo<'_>) {
let name = thread.as_ref().and_then(|t| t.name()).unwrap_or("<unnamed>");
let write = |err: &mut dyn crate::io::Write| {
- let _ = writeln!(err, "thread '{name}' panicked at '{msg}', {location}");
+ let _ = writeln!(err, "thread '{name}' panicked at {location}:\n{msg}");
static FIRST_PANIC: AtomicBool = AtomicBool::new(true);
@@ -272,7 +273,8 @@ fn default_hook(info: &PanicInfo<'_>) {
if FIRST_PANIC.swap(false, Ordering::SeqCst) {
let _ = writeln!(
err,
- "note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace"
+ "note: run with `RUST_BACKTRACE=1` environment variable to display a \
+ backtrace"
);
}
}
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 28cd3c4e4..5842c096f 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -1158,12 +1158,12 @@ impl FusedIterator for Ancestors<'_> {}
/// Which method works best depends on what kind of situation you're in.
#[cfg_attr(not(test), rustc_diagnostic_item = "PathBuf")]
#[stable(feature = "rust1", since = "1.0.0")]
-// FIXME:
// `PathBuf::as_mut_vec` current implementation relies
// on `PathBuf` being layout-compatible with `Vec<u8>`.
-// When attribute privacy is implemented, `PathBuf` should be annotated as `#[repr(transparent)]`.
-// Anyway, `PathBuf` representation and layout are considered implementation detail, are
-// not documented and must not be relied upon.
+// However, `PathBuf` layout is considered an implementation detail and must not be relied upon. We
+// want `repr(transparent)` but we don't want it to show up in rustdoc, so we hide it under
+// `cfg(doc)`. This is an ad-hoc implementation of attribute privacy.
+#[cfg_attr(not(doc), repr(transparent))]
pub struct PathBuf {
inner: OsString,
}
@@ -1983,12 +1983,12 @@ impl AsRef<OsStr> for PathBuf {
/// ```
#[cfg_attr(not(test), rustc_diagnostic_item = "Path")]
#[stable(feature = "rust1", since = "1.0.0")]
-// FIXME:
// `Path::new` current implementation relies
// on `Path` being layout-compatible with `OsStr`.
-// When attribute privacy is implemented, `Path` should be annotated as `#[repr(transparent)]`.
-// Anyway, `Path` representation and layout are considered implementation detail, are
-// not documented and must not be relied upon.
+// However, `Path` layout is considered an implementation detail and must not be relied upon. We
+// want `repr(transparent)` but we don't want it to show up in rustdoc, so we hide it under
+// `cfg(doc)`. This is an ad-hoc implementation of attribute privacy.
+#[cfg_attr(not(doc), repr(transparent))]
pub struct Path {
inner: OsStr,
}
@@ -2608,9 +2608,27 @@ impl Path {
}
fn _with_extension(&self, extension: &OsStr) -> PathBuf {
- let mut buf = self.to_path_buf();
- buf.set_extension(extension);
- buf
+ let self_len = self.as_os_str().len();
+ let self_bytes = self.as_os_str().as_os_str_bytes();
+
+ let (new_capacity, slice_to_copy) = match self.extension() {
+ None => {
+ // Enough capacity for the extension and the dot
+ let capacity = self_len + extension.len() + 1;
+ let whole_path = self_bytes.iter();
+ (capacity, whole_path)
+ }
+ Some(previous_extension) => {
+ let capacity = self_len + extension.len() - previous_extension.len();
+ let path_till_dot = self_bytes[..self_len - previous_extension.len()].iter();
+ (capacity, path_till_dot)
+ }
+ };
+
+ let mut new_path = PathBuf::with_capacity(new_capacity);
+ new_path.as_mut_vec().extend(slice_to_copy);
+ new_path.set_extension(extension);
+ new_path
}
/// Produces an iterator over the [`Component`]s of the path.
diff --git a/library/std/src/path/tests.rs b/library/std/src/path/tests.rs
index dd307022c..f12ffbf2e 100644
--- a/library/std/src/path/tests.rs
+++ b/library/std/src/path/tests.rs
@@ -1183,7 +1183,7 @@ pub fn test_prefix_ext() {
#[test]
pub fn test_push() {
macro_rules! tp (
- ($path:expr, $push:expr, $expected:expr) => ( {
+ ($path:expr, $push:expr, $expected:expr) => ({
let mut actual = PathBuf::from($path);
actual.push($push);
assert!(actual.to_str() == Some($expected),
@@ -1281,7 +1281,7 @@ pub fn test_push() {
#[test]
pub fn test_pop() {
macro_rules! tp (
- ($path:expr, $expected:expr, $output:expr) => ( {
+ ($path:expr, $expected:expr, $output:expr) => ({
let mut actual = PathBuf::from($path);
let output = actual.pop();
assert!(actual.to_str() == Some($expected) && output == $output,
@@ -1335,7 +1335,7 @@ pub fn test_pop() {
#[test]
pub fn test_set_file_name() {
macro_rules! tfn (
- ($path:expr, $file:expr, $expected:expr) => ( {
+ ($path:expr, $file:expr, $expected:expr) => ({
let mut p = PathBuf::from($path);
p.set_file_name($file);
assert!(p.to_str() == Some($expected),
@@ -1369,7 +1369,7 @@ pub fn test_set_file_name() {
#[test]
pub fn test_set_extension() {
macro_rules! tfe (
- ($path:expr, $ext:expr, $expected:expr, $output:expr) => ( {
+ ($path:expr, $ext:expr, $expected:expr, $output:expr) => ({
let mut p = PathBuf::from($path);
let output = p.set_extension($ext);
assert!(p.to_str() == Some($expected) && output == $output,
@@ -1395,6 +1395,46 @@ pub fn test_set_extension() {
}
#[test]
+pub fn test_with_extension() {
+ macro_rules! twe (
+ ($input:expr, $extension:expr, $expected:expr) => ({
+ let input = Path::new($input);
+ let output = input.with_extension($extension);
+
+ assert!(
+ output.to_str() == Some($expected),
+ "calling Path::new({:?}).with_extension({:?}): Expected {:?}, got {:?}",
+ $input, $extension, $expected, output,
+ );
+ });
+ );
+
+ twe!("foo", "txt", "foo.txt");
+ twe!("foo.bar", "txt", "foo.txt");
+ twe!("foo.bar.baz", "txt", "foo.bar.txt");
+ twe!(".test", "txt", ".test.txt");
+ twe!("foo.txt", "", "foo");
+ twe!("foo", "", "foo");
+ twe!("", "foo", "");
+ twe!(".", "foo", ".");
+ twe!("foo/", "bar", "foo.bar");
+ twe!("foo/.", "bar", "foo.bar");
+ twe!("..", "foo", "..");
+ twe!("foo/..", "bar", "foo/..");
+ twe!("/", "foo", "/");
+
+ // New extension is smaller than file name
+ twe!("aaa_aaa_aaa", "bbb_bbb", "aaa_aaa_aaa.bbb_bbb");
+ // New extension is greater than file name
+ twe!("bbb_bbb", "aaa_aaa_aaa", "bbb_bbb.aaa_aaa_aaa");
+
+ // New extension is smaller than previous extension
+ twe!("ccc.aaa_aaa_aaa", "bbb_bbb", "ccc.bbb_bbb");
+ // New extension is greater than previous extension
+ twe!("ccc.bbb_bbb", "aaa_aaa_aaa", "ccc.aaa_aaa_aaa");
+}
+
+#[test]
fn test_eq_receivers() {
use crate::borrow::Cow;
@@ -1669,7 +1709,7 @@ fn into_rc() {
#[test]
fn test_ord() {
macro_rules! ord(
- ($ord:ident, $left:expr, $right:expr) => ( {
+ ($ord:ident, $left:expr, $right:expr) => ({
use core::cmp::Ordering;
let left = Path::new($left);
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index 8f3201b00..7380b45b0 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -280,6 +280,7 @@ impl Write for ChildStdin {
io::Write::is_write_vectored(&&*self)
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
(&*self).flush()
}
@@ -299,6 +300,7 @@ impl Write for &ChildStdin {
self.inner.is_write_vectored()
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
@@ -558,6 +560,14 @@ impl Command {
/// but this has some implementation limitations on Windows
/// (see issue #37519).
///
+ /// # Platform-specific behavior
+ ///
+ /// Note on Windows: For executable files with the .exe extension,
+ /// it can be omitted when specifying the program for this Command.
+ /// However, if the file has a different extension,
+ /// a filename including the extension needs to be provided,
+ /// otherwise the file won't be found.
+ ///
/// # Examples
///
/// Basic usage:
@@ -1524,6 +1534,15 @@ impl From<fs::File> for Stdio {
#[stable(feature = "process", since = "1.0.0")]
pub struct ExitStatus(imp::ExitStatus);
+/// The default value is one which indicates successful completion.
+#[stable(feature = "process-exitcode-default", since = "1.73.0")]
+impl Default for ExitStatus {
+ fn default() -> Self {
+ // Ideally this would be done by ExitCode::default().into() but that is complicated.
+ ExitStatus::from_inner(imp::ExitStatus::default())
+ }
+}
+
/// Allows extension traits within `std`.
#[unstable(feature = "sealed", issue = "none")]
impl crate::sealed::Sealed for ExitStatus {}
diff --git a/library/std/src/sync/barrier.rs b/library/std/src/sync/barrier.rs
index e39254aa4..ed3c55120 100644
--- a/library/std/src/sync/barrier.rs
+++ b/library/std/src/sync/barrier.rs
@@ -130,11 +130,8 @@ impl Barrier {
let local_gen = lock.generation_id;
lock.count += 1;
if lock.count < self.num_threads {
- // We need a while loop to guard against spurious wakeups.
- // https://en.wikipedia.org/wiki/Spurious_wakeup
- while local_gen == lock.generation_id {
- lock = self.cvar.wait(lock).unwrap();
- }
+ let _guard =
+ self.cvar.wait_while(lock, |state| local_gen == state.generation_id).unwrap();
BarrierWaitResult(false)
} else {
lock.count = 0;
diff --git a/library/std/src/sync/condvar.rs b/library/std/src/sync/condvar.rs
index 76a1b4a2a..9c4b926b7 100644
--- a/library/std/src/sync/condvar.rs
+++ b/library/std/src/sync/condvar.rs
@@ -21,11 +21,11 @@ impl WaitTimeoutResult {
///
/// # Examples
///
- /// This example spawns a thread which will update the boolean value and
- /// then wait 100 milliseconds before notifying the condvar.
+ /// This example spawns a thread which will sleep 20 milliseconds before
+ /// updating a boolean value and then notifying the condvar.
///
- /// The main thread will wait with a timeout on the condvar and then leave
- /// once the boolean has been updated and notified.
+ /// The main thread will wait with a 10 millisecond timeout on the condvar
+ /// and will leave the loop upon timeout.
///
/// ```
/// use std::sync::{Arc, Condvar, Mutex};
@@ -49,14 +49,12 @@ impl WaitTimeoutResult {
///
/// // Wait for the thread to start up.
/// let (lock, cvar) = &*pair;
- /// let mut started = lock.lock().unwrap();
/// loop {
/// // Let's put a timeout on the condvar's wait.
- /// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap();
- /// // 10 milliseconds have passed, or maybe the value changed!
- /// started = result.0;
- /// if *started == true {
- /// // We received the notification and the value has been updated, we can leave.
+ /// let result = cvar.wait_timeout(lock.lock().unwrap(), Duration::from_millis(10)).unwrap();
+ /// // 10 milliseconds have passed.
+ /// if result.1.timed_out() {
+ /// // timed out now and we can leave.
/// break
/// }
/// }
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
index a6bc468b0..3598598cf 100644
--- a/library/std/src/sync/lazy_lock.rs
+++ b/library/std/src/sync/lazy_lock.rs
@@ -25,6 +25,8 @@ union Data<T, F> {
///
/// # Examples
///
+/// Initialize static variables with `LazyLock`.
+///
/// ```
/// #![feature(lazy_cell)]
///
@@ -54,6 +56,24 @@ union Data<T, F> {
/// // Some("Hoyten")
/// }
/// ```
+/// Initialize fields with `LazyLock`.
+/// ```
+/// #![feature(lazy_cell)]
+///
+/// use std::sync::LazyLock;
+///
+/// #[derive(Debug)]
+/// struct UseCellLock {
+/// number: LazyLock<u32>,
+/// }
+/// fn main() {
+/// let lock: LazyLock<u32> = LazyLock::new(|| 0u32);
+///
+/// let data = UseCellLock { number: lock };
+/// println!("{}", *data.number);
+/// }
+/// ```
+
#[unstable(feature = "lazy_cell", issue = "109736")]
pub struct LazyLock<T, F = fn() -> T> {
once: Once,
@@ -69,6 +89,15 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) }
}
+ /// Creates a new lazy value that is already initialized.
+ #[inline]
+ #[cfg(test)]
+ pub(crate) fn preinit(value: T) -> LazyLock<T, F> {
+ let once = Once::new();
+ once.call_once(|| {});
+ LazyLock { once, data: UnsafeCell::new(Data { value: ManuallyDrop::new(value) }) }
+ }
+
/// Consumes this `LazyLock` returning the stored value.
///
/// Returns `Ok(value)` if `Lazy` is initialized and `Err(f)` otherwise.
@@ -193,10 +222,12 @@ impl<T: Default> Default for LazyLock<T> {
#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_tuple("LazyLock");
match self.get() {
- Some(v) => f.debug_tuple("LazyLock").field(v).finish(),
- None => f.write_str("LazyLock(Uninit)"),
- }
+ Some(v) => d.field(v),
+ None => d.field(&format_args!("<uninit>")),
+ };
+ d.finish()
}
}
diff --git a/library/std/src/sync/mpmc/utils.rs b/library/std/src/sync/mpmc/utils.rs
index d053d69e2..0cbc61160 100644
--- a/library/std/src/sync/mpmc/utils.rs
+++ b/library/std/src/sync/mpmc/utils.rs
@@ -35,7 +35,9 @@ use crate::ops::{Deref, DerefMut};
any(
target_arch = "arm",
target_arch = "mips",
+ target_arch = "mips32r6",
target_arch = "mips64",
+ target_arch = "mips64r6",
target_arch = "riscv64",
),
repr(align(32))
@@ -59,7 +61,9 @@ use crate::ops::{Deref, DerefMut};
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
+ target_arch = "mips32r6",
target_arch = "mips64",
+ target_arch = "mips64r6",
target_arch = "riscv64",
target_arch = "s390x",
)),
diff --git a/library/std/src/sync/mpsc/mod.rs b/library/std/src/sync/mpsc/mod.rs
index c00134c8b..f92bb1a4b 100644
--- a/library/std/src/sync/mpsc/mod.rs
+++ b/library/std/src/sync/mpsc/mod.rs
@@ -303,12 +303,11 @@ pub struct IntoIter<T> {
rx: Receiver<T>,
}
-/// The sending-half of Rust's asynchronous [`channel`] type. This half can only be
-/// owned by one thread, but it can be cloned to send to other threads.
+/// The sending-half of Rust's asynchronous [`channel`] type.
///
/// Messages can be sent through this channel with [`send`].
///
-/// Note: all senders (the original and the clones) need to be dropped for the receiver
+/// Note: all senders (the original and its clones) need to be dropped for the receiver
/// to stop blocking to receive messages with [`Receiver::recv`].
///
/// [`send`]: Sender::send
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index b8fec6902..b4ae6b7e0 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -490,13 +490,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
d.field("data", &&**err.get_ref());
}
Err(TryLockError::WouldBlock) => {
- struct LockedPlaceholder;
- impl fmt::Debug for LockedPlaceholder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("<locked>")
- }
- }
- d.field("data", &LockedPlaceholder);
+ d.field("data", &format_args!("<locked>"));
}
}
d.field("poisoned", &self.poison.get());
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index e83bc35ee..e2b7b893c 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -365,10 +365,12 @@ impl<T> Default for OnceLock<T> {
#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_tuple("OnceLock");
match self.get() {
- Some(v) => f.debug_tuple("Once").field(v).finish(),
- None => f.write_str("Once(Uninit)"),
- }
+ Some(v) => d.field(v),
+ None => d.field(&format_args!("<uninit>")),
+ };
+ d.finish()
}
}
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 7c409cb3e..26aaa2414 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -485,13 +485,7 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
d.field("data", &&**err.get_ref());
}
Err(TryLockError::WouldBlock) => {
- struct LockedPlaceholder;
- impl fmt::Debug for LockedPlaceholder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("<locked>")
- }
- }
- d.field("data", &LockedPlaceholder);
+ d.field("data", &format_args!("<locked>"));
}
}
d.field("poisoned", &self.poison.get());
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
index a5fcbdf39..d58aa6c27 100644
--- a/library/std/src/sys/common/alloc.rs
+++ b/library/std/src/sys/common/alloc.rs
@@ -8,7 +8,9 @@ use crate::ptr;
target_arch = "x86",
target_arch = "arm",
target_arch = "m68k",
+ target_arch = "csky",
target_arch = "mips",
+ target_arch = "mips32r6",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "sparc",
@@ -24,6 +26,7 @@ pub const MIN_ALIGN: usize = 8;
target_arch = "aarch64",
target_arch = "loongarch64",
target_arch = "mips64",
+ target_arch = "mips64r6",
target_arch = "s390x",
target_arch = "sparc64",
target_arch = "riscv64",
diff --git a/library/std/src/sys/common/thread_local/fast_local.rs b/library/std/src/sys/common/thread_local/fast_local.rs
index bc5da1a18..c0a9619bf 100644
--- a/library/std/src/sys/common/thread_local/fast_local.rs
+++ b/library/std/src/sys/common/thread_local/fast_local.rs
@@ -87,10 +87,6 @@ pub macro thread_local_inner {
static __KEY: $crate::thread::local_impl::Key<$t> =
$crate::thread::local_impl::Key::<$t>::new();
- // FIXME: remove the #[allow(...)] marker when macros don't
- // raise warning for missing/extraneous unsafe blocks anymore.
- // See https://github.com/rust-lang/rust/issues/74838.
- #[allow(unused_unsafe)]
unsafe {
__KEY.get(move || {
if let $crate::option::Option::Some(init) = init {
diff --git a/library/std/src/sys/common/thread_local/os_local.rs b/library/std/src/sys/common/thread_local/os_local.rs
index 5d48ce1e0..7cf291921 100644
--- a/library/std/src/sys/common/thread_local/os_local.rs
+++ b/library/std/src/sys/common/thread_local/os_local.rs
@@ -24,7 +24,6 @@ pub macro thread_local_inner {
const fn __init() -> $t { INIT_EXPR }
static __KEY: $crate::thread::local_impl::Key<$t> =
$crate::thread::local_impl::Key::new();
- #[allow(unused_unsafe)]
unsafe {
__KEY.get(move || {
if let $crate::option::Option::Some(init) = _init {
@@ -59,10 +58,6 @@ pub macro thread_local_inner {
static __KEY: $crate::thread::local_impl::Key<$t> =
$crate::thread::local_impl::Key::new();
- // FIXME: remove the #[allow(...)] marker when macros don't
- // raise warning for missing/extraneous unsafe blocks anymore.
- // See https://github.com/rust-lang/rust/issues/74838.
- #[allow(unused_unsafe)]
unsafe {
__KEY.get(move || {
if let $crate::option::Option::Some(init) = init {
diff --git a/library/std/src/sys/common/thread_local/static_local.rs b/library/std/src/sys/common/thread_local/static_local.rs
index 80322a978..5cb6c541a 100644
--- a/library/std/src/sys/common/thread_local/static_local.rs
+++ b/library/std/src/sys/common/thread_local/static_local.rs
@@ -43,10 +43,6 @@ pub macro thread_local_inner {
static __KEY: $crate::thread::local_impl::Key<$t> =
$crate::thread::local_impl::Key::new();
- // FIXME: remove the #[allow(...)] marker when macros don't
- // raise warning for missing/extraneous unsafe blocks anymore.
- // See https://github.com/rust-lang/rust/issues/74838.
- #[allow(unused_unsafe)]
unsafe {
__KEY.get(move || {
if let $crate::option::Option::Some(init) = init {
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index 4bb735668..6aa4ea7f5 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -335,6 +335,7 @@ impl File {
false
}
+ #[inline]
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
diff --git a/library/std/src/sys/hermit/os.rs b/library/std/src/sys/hermit/os.rs
index e53dbae61..c79197a9a 100644
--- a/library/std/src/sys/hermit/os.rs
+++ b/library/std/src/sys/hermit/os.rs
@@ -112,6 +112,34 @@ pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
}
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ slice: &'a [(OsString, OsString)],
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { slice } = self;
+ f.debug_list()
+ .entries(slice.iter().map(|(a, b)| (a.to_str().unwrap(), b.to_str().unwrap())))
+ .finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { iter } = self;
+ EnvStrDebug { slice: iter.as_slice() }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ f.debug_list().entries(iter.as_slice()).finish()
+ }
+}
+
impl !Send for Env {}
impl !Sync for Env {}
diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs
index 2507f7069..332151e40 100644
--- a/library/std/src/sys/hermit/thread.rs
+++ b/library/std/src/sys/hermit/thread.rs
@@ -1,6 +1,5 @@
#![allow(dead_code)]
-use super::unsupported;
use crate::ffi::CStr;
use crate::io;
use crate::mem;
@@ -99,7 +98,7 @@ impl Thread {
}
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
- unsupported()
+ unsafe { Ok(NonZeroUsize::new_unchecked(abi::get_processor_count())) }
}
pub mod guard {
diff --git a/library/std/src/sys/hermit/time.rs b/library/std/src/sys/hermit/time.rs
index 5440d85df..7d91460ab 100644
--- a/library/std/src/sys/hermit/time.rs
+++ b/library/std/src/sys/hermit/time.rs
@@ -40,7 +40,7 @@ impl Timespec {
}
fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> {
- let mut secs = self.tv_sec.checked_add_unsigned(other.as_secs())?;
+ let mut secs = self.t.tv_sec.checked_add_unsigned(other.as_secs())?;
// Nano calculations can't overflow because nanos are <1B which fit
// in a u32.
@@ -53,7 +53,7 @@ impl Timespec {
}
fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> {
- let mut secs = self.tv_sec.checked_sub_unsigned(other.as_secs())?;
+ let mut secs = self.t.tv_sec.checked_sub_unsigned(other.as_secs())?;
// Similar to above, nanos can't overflow.
let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32;
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index c72be1380..beea3f23c 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -23,6 +23,7 @@
#![allow(missing_debug_implementations)]
pub mod common;
+mod personality;
cfg_if::cfg_if! {
if #[cfg(unix)] {
@@ -60,3 +61,52 @@ cfg_if::cfg_if! {
pub const FULL_BACKTRACE_DEFAULT: bool = false;
}
}
+
+#[cfg(not(test))]
+cfg_if::cfg_if! {
+ if #[cfg(target_os = "android")] {
+ pub use self::android::log2f32;
+ pub use self::android::log2f64;
+ } else {
+ #[inline]
+ pub fn log2f32(n: f32) -> f32 {
+ unsafe { crate::intrinsics::log2f32(n) }
+ }
+
+ #[inline]
+ pub fn log2f64(n: f64) -> f64 {
+ unsafe { crate::intrinsics::log2f64(n) }
+ }
+ }
+}
+
+// Solaris/Illumos requires a wrapper around log, log2, and log10 functions
+// because of their non-standard behavior (e.g., log(-n) returns -Inf instead
+// of expected NaN).
+#[cfg(not(test))]
+#[cfg(any(target_os = "solaris", target_os = "illumos"))]
+#[inline]
+pub fn log_wrapper<F: Fn(f64) -> f64>(n: f64, log_fn: F) -> f64 {
+ if n.is_finite() {
+ if n > 0.0 {
+ log_fn(n)
+ } else if n == 0.0 {
+ f64::NEG_INFINITY // log(0) = -Inf
+ } else {
+ f64::NAN // log(-n) = NaN
+ }
+ } else if n.is_nan() {
+ n // log(NaN) = NaN
+ } else if n > 0.0 {
+ n // log(Inf) = Inf
+ } else {
+ f64::NAN // log(-Inf) = NaN
+ }
+}
+
+#[cfg(not(test))]
+#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
+#[inline]
+pub fn log_wrapper<F: Fn(f64) -> f64>(n: f64, log_fn: F) -> f64 {
+ log_fn(n)
+}
diff --git a/library/std/src/personality/dwarf/eh.rs b/library/std/src/sys/personality/dwarf/eh.rs
index 79624703a..79624703a 100644
--- a/library/std/src/personality/dwarf/eh.rs
+++ b/library/std/src/sys/personality/dwarf/eh.rs
diff --git a/library/std/src/personality/dwarf/mod.rs b/library/std/src/sys/personality/dwarf/mod.rs
index 652fbe95a..652fbe95a 100644
--- a/library/std/src/personality/dwarf/mod.rs
+++ b/library/std/src/sys/personality/dwarf/mod.rs
diff --git a/library/std/src/personality/dwarf/tests.rs b/library/std/src/sys/personality/dwarf/tests.rs
index 1644f3708..1644f3708 100644
--- a/library/std/src/personality/dwarf/tests.rs
+++ b/library/std/src/sys/personality/dwarf/tests.rs
diff --git a/library/std/src/personality/emcc.rs b/library/std/src/sys/personality/emcc.rs
index cb52ae89b..cb52ae89b 100644
--- a/library/std/src/personality/emcc.rs
+++ b/library/std/src/sys/personality/emcc.rs
diff --git a/library/std/src/personality/gcc.rs b/library/std/src/sys/personality/gcc.rs
index 6552d96ca..e477a0cd7 100644
--- a/library/std/src/personality/gcc.rs
+++ b/library/std/src/sys/personality/gcc.rs
@@ -59,9 +59,17 @@ const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1
#[cfg(target_arch = "m68k")]
const UNWIND_DATA_REG: (i32, i32) = (0, 1); // D0, D1
-#[cfg(any(target_arch = "mips", target_arch = "mips64"))]
+#[cfg(any(
+ target_arch = "mips",
+ target_arch = "mips32r6",
+ target_arch = "mips64",
+ target_arch = "mips64r6"
+))]
const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1
+#[cfg(target_arch = "csky")]
+const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
+
#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]
const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4
diff --git a/library/std/src/personality.rs b/library/std/src/sys/personality/mod.rs
index 386a399f5..386a399f5 100644
--- a/library/std/src/personality.rs
+++ b/library/std/src/sys/personality/mod.rs
diff --git a/library/std/src/sys/sgx/os.rs b/library/std/src/sys/sgx/os.rs
index 5da0257f3..86f4c7d3d 100644
--- a/library/std/src/sys/sgx/os.rs
+++ b/library/std/src/sys/sgx/os.rs
@@ -96,14 +96,61 @@ fn create_env_store() -> &'static EnvStore {
unsafe { &*(ENV.load(Ordering::Relaxed) as *const EnvStore) }
}
-pub type Env = vec::IntoIter<(OsString, OsString)>;
+pub struct Env {
+ iter: vec::IntoIter<(OsString, OsString)>,
+}
+
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ slice: &'a [(OsString, OsString)],
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { slice } = self;
+ f.debug_list()
+ .entries(slice.iter().map(|(a, b)| (a.to_str().unwrap(), b.to_str().unwrap())))
+ .finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { iter } = self;
+ EnvStrDebug { slice: iter.as_slice() }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ f.debug_list().entries(iter.as_slice()).finish()
+ }
+}
+
+impl !Send for Env {}
+impl !Sync for Env {}
+
+impl Iterator for Env {
+ type Item = (OsString, OsString);
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ self.iter.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
pub fn env() -> Env {
let clone_to_vec = |map: &HashMap<OsString, OsString>| -> Vec<_> {
map.iter().map(|(k, v)| (k.clone(), v.clone())).collect()
};
- get_env_store().map(|env| clone_to_vec(&env.lock().unwrap())).unwrap_or_default().into_iter()
+ let iter = get_env_store()
+ .map(|env| clone_to_vec(&env.lock().unwrap()))
+ .unwrap_or_default()
+ .into_iter();
+ Env { iter }
}
pub fn getenv(k: &OsStr) -> Option<OsString> {
diff --git a/library/std/src/sys/sgx/thread.rs b/library/std/src/sys/sgx/thread.rs
index 1608b8cb6..7ac9d1d64 100644
--- a/library/std/src/sys/sgx/thread.rs
+++ b/library/std/src/sys/sgx/thread.rs
@@ -121,8 +121,16 @@ impl Thread {
rtassert!(wait_error.kind() == io::ErrorKind::WouldBlock);
}
+ /// SGX should protect in-enclave data from the outside (attacker),
+ /// so there should be no data leakage to the OS,
+ /// and therefore also no 1-1 mapping between SGX thread names and OS thread names.
+ ///
+ /// This is why the method is intentionally No-Op.
pub fn set_name(_name: &CStr) {
- // FIXME: could store this pointer in TLS somewhere
+ // Note that the internally visible SGX thread name is already provided
+ // by the platform-agnostic (target-agnostic) Rust thread code.
+ // This can be observed in the [`std::thread::tests::test_named_thread`] test,
+ // which succeeds as-is with the SGX target.
}
pub fn sleep(dur: Duration) {
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index 6135921f0..9f4e66d62 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -81,10 +81,42 @@ pub fn current_exe() -> io::Result<PathBuf> {
static ENV_LOCK: RwLock<()> = RwLock::new(());
+pub fn env_read_lock() -> impl Drop {
+ ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner)
+}
+
pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
}
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ slice: &'a [(OsString, OsString)],
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { slice } = self;
+ f.debug_list()
+ .entries(slice.iter().map(|(a, b)| (a.to_str().unwrap(), b.to_str().unwrap())))
+ .finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { iter } = self;
+ EnvStrDebug { slice: iter.as_slice() }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ f.debug_list().entries(iter.as_slice()).finish()
+ }
+}
+
impl !Send for Env {}
impl !Sync for Env {}
@@ -106,7 +138,7 @@ pub fn env() -> Env {
}
unsafe {
- let _guard = ENV_LOCK.read();
+ let _guard = env_read_lock();
let mut result = Vec::new();
if !environ.is_null() {
while !(*environ).is_null() {
@@ -140,17 +172,21 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let s = run_with_cstr(k.as_bytes(), |k| {
- let _guard = ENV_LOCK.read();
- Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
- })
- .ok()?;
+ run_with_cstr(k.as_bytes(), |k| {
+ let _guard = env_read_lock();
+ let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
- }
+ if v.is_null() {
+ Ok(None)
+ } else {
+ // SAFETY: `v` cannot be mutated while executing this line since we've a read lock
+ let bytes = unsafe { CStr::from_ptr(v) }.to_bytes().to_vec();
+
+ Ok(Some(OsStringExt::from_vec(bytes)))
+ }
+ })
+ .ok()
+ .flatten()
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
diff --git a/library/std/src/sys/unix/cmath.rs b/library/std/src/sys/unix/cmath.rs
index 2bf80d7a4..5346d2291 100644
--- a/library/std/src/sys/unix/cmath.rs
+++ b/library/std/src/sys/unix/cmath.rs
@@ -30,4 +30,8 @@ extern "C" {
pub fn tanf(n: f32) -> f32;
pub fn tanh(n: f64) -> f64;
pub fn tanhf(n: f32) -> f32;
+ pub fn tgamma(n: f64) -> f64;
+ pub fn tgammaf(n: f32) -> f32;
+ pub fn lgamma_r(n: f64, s: &mut i32) -> f64;
+ pub fn lgammaf_r(n: f32, s: &mut i32) -> f32;
}
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index fbc7f04ce..a5604c92a 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -7,17 +7,6 @@ use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
use crate::mem;
-#[cfg(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "fuchsia",
- target_os = "redox",
- target_os = "illumos",
- target_os = "nto",
- target_os = "vita",
-))]
-use crate::mem::MaybeUninit;
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd};
use crate::path::{Path, PathBuf};
use crate::ptr;
@@ -712,22 +701,10 @@ impl Iterator for ReadDir {
// requires the full extent of *entry_ptr to be in bounds of the same
// allocation, which is not necessarily the case here.
//
- // Absent any other way to obtain a pointer to `(*entry_ptr).d_name`
- // legally in Rust analogously to how it would be done in C, we instead
- // need to make our own non-libc allocation that conforms to the weird
- // imaginary definition of dirent64, and use that for a field offset
- // computation.
+ // Instead we must access fields individually through their offsets.
macro_rules! offset_ptr {
($entry_ptr:expr, $field:ident) => {{
- const OFFSET: isize = {
- let delusion = MaybeUninit::<dirent64>::uninit();
- let entry_ptr = delusion.as_ptr();
- unsafe {
- ptr::addr_of!((*entry_ptr).$field)
- .cast::<u8>()
- .offset_from(entry_ptr.cast::<u8>())
- }
- };
+ const OFFSET: isize = mem::offset_of!(dirent64, $field) as isize;
if true {
// Cast to the same type determined by the else branch.
$entry_ptr.byte_offset(OFFSET).cast::<_>()
@@ -1227,6 +1204,7 @@ impl File {
self.0.write_vectored_at(bufs, offset)
}
+ #[inline]
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 7d49bbdcb..4d17a1b00 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -89,6 +89,12 @@ enum FdMeta {
NoneObtained,
}
+#[derive(PartialEq)]
+enum FdHandle {
+ Input,
+ Output,
+}
+
impl FdMeta {
fn maybe_fifo(&self) -> bool {
match self {
@@ -114,12 +120,14 @@ impl FdMeta {
}
}
- fn copy_file_range_candidate(&self) -> bool {
+ fn copy_file_range_candidate(&self, f: FdHandle) -> bool {
match self {
// copy_file_range will fail on empty procfs files. `read` can determine whether EOF has been reached
// without extra cost and skip the write, thus there is no benefit in attempting copy_file_range
- FdMeta::Metadata(meta) if meta.is_file() && meta.len() > 0 => true,
- FdMeta::NoneObtained => true,
+ FdMeta::Metadata(meta) if f == FdHandle::Input && meta.is_file() && meta.len() > 0 => {
+ true
+ }
+ FdMeta::Metadata(meta) if f == FdHandle::Output && meta.is_file() => true,
_ => false,
}
}
@@ -197,7 +205,9 @@ impl<R: CopyRead, W: CopyWrite> SpecCopy for Copier<'_, '_, R, W> {
written += flush()?;
let max_write = reader.min_limit();
- if input_meta.copy_file_range_candidate() && output_meta.copy_file_range_candidate() {
+ if input_meta.copy_file_range_candidate(FdHandle::Input)
+ && output_meta.copy_file_range_candidate(FdHandle::Output)
+ {
let result = copy_regular_files(readfd, writefd, max_write);
result.update_take(reader);
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 326f1481e..77ef086f2 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -110,6 +110,11 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
while libc::poll(pfds.as_mut_ptr(), 3, 0) == -1 {
match errno() {
libc::EINTR => continue,
+ #[cfg(target_vendor = "unikraft")]
+ libc::ENOSYS => {
+ // Not all configurations of Unikraft enable `LIBPOSIX_EVENT`.
+ break 'poll;
+ }
libc::EINVAL | libc::EAGAIN | libc::ENOMEM => {
// RLIMIT_NOFILE or temporary allocation failures
// may be preventing use of poll(), fall back to fcntl
@@ -165,7 +170,14 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
}
unsafe fn reset_sigpipe(#[allow(unused_variables)] sigpipe: u8) {
- #[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))]
+ #[cfg(not(any(
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "horizon",
+ // Unikraft's `signal` implementation is currently broken:
+ // https://github.com/unikraft/lib-musl/issues/57
+ target_vendor = "unikraft",
+ )))]
{
// We don't want to add this as a public type to std, nor do we
// want to `include!` a file from the compiler (which would break
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index a68c14758..57e1a36da 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -495,6 +495,34 @@ pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
}
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ slice: &'a [(OsString, OsString)],
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { slice } = self;
+ f.debug_list()
+ .entries(slice.iter().map(|(a, b)| (a.to_str().unwrap(), b.to_str().unwrap())))
+ .finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { iter } = self;
+ EnvStrDebug { slice: iter.as_slice() }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ f.debug_list().entries(iter.as_slice()).finish()
+ }
+}
+
impl !Send for Env {}
impl !Sync for Env {}
@@ -566,16 +594,21 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let s = run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(k.as_bytes(), |k| {
let _guard = env_read_lock();
- Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
+ let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char;
+
+ if v.is_null() {
+ Ok(None)
+ } else {
+ // SAFETY: `v` cannot be mutated while executing this line since we've a read lock
+ let bytes = unsafe { CStr::from_ptr(v) }.to_bytes().to_vec();
+
+ Ok(Some(OsStringExt::from_vec(bytes)))
+ }
})
- .ok()?;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
- }
+ .ok()
+ .flatten()
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
index f7333fd5a..463b0a275 100644
--- a/library/std/src/sys/unix/os_str.rs
+++ b/library/std/src/sys/unix/os_str.rs
@@ -96,6 +96,16 @@ impl AsInner<[u8]> for Buf {
}
impl Buf {
+ #[inline]
+ pub fn into_os_str_bytes(self) -> Vec<u8> {
+ self.inner
+ }
+
+ #[inline]
+ pub unsafe fn from_os_str_bytes_unchecked(s: Vec<u8>) -> Self {
+ Self { inner: s }
+ }
+
pub fn from_string(s: String) -> Buf {
Buf { inner: s.into_bytes() }
}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
index e45c380a0..9931c2af2 100644
--- a/library/std/src/sys/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -235,7 +235,7 @@ impl Process {
}
}
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
pub struct ExitStatus(i64);
impl ExitStatus {
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 0ce93af66..3963e7f52 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -10,9 +10,6 @@ use core::ffi::NonZero_c_int;
#[cfg(target_os = "linux")]
use crate::os::linux::process::PidFd;
-#[cfg(target_os = "linux")]
-use crate::sys::weak::raw_syscall;
-
#[cfg(any(
target_os = "macos",
target_os = "watchos",
@@ -91,6 +88,11 @@ impl Command {
if let Some(ret) = self.posix_spawn(&theirs, envp.as_ref())? {
return Ok((ret, ours));
}
+
+ #[cfg(target_os = "linux")]
+ let (input, output) = sys::net::Socket::new_pair(libc::AF_UNIX, libc::SOCK_SEQPACKET)?;
+
+ #[cfg(not(target_os = "linux"))]
let (input, output) = sys::pipe::anon_pipe()?;
// Whatever happens after the fork is almost for sure going to touch or
@@ -104,12 +106,16 @@ impl Command {
// The child calls `mem::forget` to leak the lock, which is crucial because
// releasing a lock is not async-signal-safe.
let env_lock = sys::os::env_read_lock();
- let (pid, pidfd) = unsafe { self.do_fork()? };
+ let pid = unsafe { self.do_fork()? };
if pid == 0 {
crate::panic::always_abort();
mem::forget(env_lock); // avoid non-async-signal-safe unlocking
drop(input);
+ #[cfg(target_os = "linux")]
+ if self.get_create_pidfd() {
+ self.send_pidfd(&output);
+ }
let Err(err) = unsafe { self.do_exec(theirs, envp.as_ref()) };
let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32;
let errno = errno.to_be_bytes();
@@ -133,6 +139,12 @@ impl Command {
drop(env_lock);
drop(output);
+ #[cfg(target_os = "linux")]
+ let pidfd = if self.get_create_pidfd() { self.recv_pidfd(&input) } else { -1 };
+
+ #[cfg(not(target_os = "linux"))]
+ let pidfd = -1;
+
// Safety: We obtained the pidfd from calling `clone3` with
// `CLONE_PIDFD` so it's valid an otherwise unowned.
let mut p = unsafe { Process::new(pid, pidfd) };
@@ -160,6 +172,7 @@ impl Command {
}
Ok(..) => {
// pipe I/O up to PIPE_BUF bytes should be atomic
+ // similarly SOCK_SEQPACKET messages should arrive whole
assert!(p.wait().is_ok(), "wait() should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
@@ -185,20 +198,19 @@ impl Command {
);
#[cfg(any(target_os = "tvos", target_os = "watchos"))]
- unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
+ unsafe fn do_fork(&mut self) -> Result<pid_t, io::Error> {
return Err(Self::ERR_APPLE_TV_WATCH_NO_FORK_EXEC);
}
// Attempts to fork the process. If successful, returns Ok((0, -1))
// in the child, and Ok((child_pid, -1)) in the parent.
#[cfg(not(any(
- target_os = "linux",
target_os = "watchos",
target_os = "tvos",
all(target_os = "nto", target_env = "nto71"),
)))]
- unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
- cvt(libc::fork()).map(|res| (res, -1))
+ unsafe fn do_fork(&mut self) -> Result<pid_t, io::Error> {
+ cvt(libc::fork())
}
// On QNX Neutrino, fork can fail with EBADF in case "another thread might have opened
@@ -206,7 +218,7 @@ impl Command {
// Documentation says "... or try calling fork() again". This is what we do here.
// See also https://www.qnx.com/developers/docs/7.1/#com.qnx.doc.neutrino.lib_ref/topic/f/fork.html
#[cfg(all(target_os = "nto", target_env = "nto71"))]
- unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
+ unsafe fn do_fork(&mut self) -> Result<pid_t, io::Error> {
use crate::sys::os::errno;
let mut delay = MIN_FORKSPAWN_SLEEP;
@@ -229,91 +241,11 @@ impl Command {
delay *= 2;
continue;
} else {
- return cvt(r).map(|res| (res, -1));
+ return cvt(r);
}
}
}
- // Attempts to fork the process. If successful, returns Ok((0, -1))
- // in the child, and Ok((child_pid, child_pidfd)) in the parent.
- #[cfg(target_os = "linux")]
- unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> {
- use crate::sync::atomic::{AtomicBool, Ordering};
-
- static HAS_CLONE3: AtomicBool = AtomicBool::new(true);
- const CLONE_PIDFD: u64 = 0x00001000;
-
- #[repr(C)]
- struct clone_args {
- flags: u64,
- pidfd: u64,
- child_tid: u64,
- parent_tid: u64,
- exit_signal: u64,
- stack: u64,
- stack_size: u64,
- tls: u64,
- set_tid: u64,
- set_tid_size: u64,
- cgroup: u64,
- }
-
- raw_syscall! {
- fn clone3(cl_args: *mut clone_args, len: libc::size_t) -> libc::c_long
- }
-
- // Bypassing libc for `clone3` can make further libc calls unsafe,
- // so we use it sparingly for now. See #89522 for details.
- // Some tools (e.g. sandboxing tools) may also expect `fork`
- // rather than `clone3`.
- let want_clone3_pidfd = self.get_create_pidfd();
-
- // If we fail to create a pidfd for any reason, this will
- // stay as -1, which indicates an error.
- let mut pidfd: pid_t = -1;
-
- // Attempt to use the `clone3` syscall, which supports more arguments
- // (in particular, the ability to create a pidfd). If this fails,
- // we will fall through this block to a call to `fork()`
- if want_clone3_pidfd && HAS_CLONE3.load(Ordering::Relaxed) {
- let mut args = clone_args {
- flags: CLONE_PIDFD,
- pidfd: &mut pidfd as *mut pid_t as u64,
- child_tid: 0,
- parent_tid: 0,
- exit_signal: libc::SIGCHLD as u64,
- stack: 0,
- stack_size: 0,
- tls: 0,
- set_tid: 0,
- set_tid_size: 0,
- cgroup: 0,
- };
-
- let args_ptr = &mut args as *mut clone_args;
- let args_size = crate::mem::size_of::<clone_args>();
-
- let res = cvt(clone3(args_ptr, args_size));
- match res {
- Ok(n) => return Ok((n as pid_t, pidfd)),
- Err(e) => match e.raw_os_error() {
- // Multiple threads can race to execute this store,
- // but that's fine - that just means that multiple threads
- // will have tried and failed to execute the same syscall,
- // with no other side effects.
- Some(libc::ENOSYS) => HAS_CLONE3.store(false, Ordering::Relaxed),
- // Fallback to fork if `EPERM` is returned. (e.g. blocked by seccomp)
- Some(libc::EPERM) => {}
- _ => return Err(e),
- },
- }
- }
-
- // Generally, we just call `fork`. If we get here after wanting `clone3`,
- // then the syscall does not exist or we do not have permission to call it.
- cvt(libc::fork()).map(|res| (res, pidfd))
- }
-
pub fn exec(&mut self, default: Stdio) -> io::Error {
let envp = self.capture_env();
@@ -722,6 +654,115 @@ impl Command {
Ok(Some(p))
}
}
+
+ #[cfg(target_os = "linux")]
+ fn send_pidfd(&self, sock: &crate::sys::net::Socket) {
+ use crate::io::IoSlice;
+ use crate::os::fd::RawFd;
+ use crate::sys::cvt_r;
+ use libc::{CMSG_DATA, CMSG_FIRSTHDR, CMSG_LEN, CMSG_SPACE, SCM_RIGHTS, SOL_SOCKET};
+
+ unsafe {
+ let child_pid = libc::getpid();
+ // pidfd_open sets CLOEXEC by default
+ let pidfd = libc::syscall(libc::SYS_pidfd_open, child_pid, 0);
+
+ let fds: [c_int; 1] = [pidfd as RawFd];
+
+ const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>();
+
+ #[repr(C)]
+ union Cmsg {
+ buf: [u8; unsafe { CMSG_SPACE(SCM_MSG_LEN as u32) as usize }],
+ _align: libc::cmsghdr,
+ }
+
+ let mut cmsg: Cmsg = mem::zeroed();
+
+ // 0-length message to send through the socket so we can pass along the fd
+ let mut iov = [IoSlice::new(b"")];
+ let mut msg: libc::msghdr = mem::zeroed();
+
+ msg.msg_iov = &mut iov as *mut _ as *mut _;
+ msg.msg_iovlen = 1;
+ msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
+ msg.msg_control = &mut cmsg.buf as *mut _ as *mut _;
+
+ // only attach cmsg if we successfully acquired the pidfd
+ if pidfd >= 0 {
+ let hdr = CMSG_FIRSTHDR(&mut msg as *mut _ as *mut _);
+ (*hdr).cmsg_level = SOL_SOCKET;
+ (*hdr).cmsg_type = SCM_RIGHTS;
+ (*hdr).cmsg_len = CMSG_LEN(SCM_MSG_LEN as _) as _;
+ let data = CMSG_DATA(hdr);
+ crate::ptr::copy_nonoverlapping(
+ fds.as_ptr().cast::<u8>(),
+ data as *mut _,
+ SCM_MSG_LEN,
+ );
+ }
+
+ // we send the 0-length message even if we failed to acquire the pidfd
+ // so we get a consistent SEQPACKET order
+ match cvt_r(|| libc::sendmsg(sock.as_raw(), &msg, 0)) {
+ Ok(0) => {}
+ _ => rtabort!("failed to communicate with parent process"),
+ }
+ }
+ }
+
+ #[cfg(target_os = "linux")]
+ fn recv_pidfd(&self, sock: &crate::sys::net::Socket) -> pid_t {
+ use crate::io::IoSliceMut;
+ use crate::sys::cvt_r;
+
+ use libc::{CMSG_DATA, CMSG_FIRSTHDR, CMSG_LEN, CMSG_SPACE, SCM_RIGHTS, SOL_SOCKET};
+
+ unsafe {
+ const SCM_MSG_LEN: usize = mem::size_of::<[c_int; 1]>();
+
+ #[repr(C)]
+ union Cmsg {
+ _buf: [u8; unsafe { CMSG_SPACE(SCM_MSG_LEN as u32) as usize }],
+ _align: libc::cmsghdr,
+ }
+ let mut cmsg: Cmsg = mem::zeroed();
+ // 0-length read to get the fd
+ let mut iov = [IoSliceMut::new(&mut [])];
+
+ let mut msg: libc::msghdr = mem::zeroed();
+
+ msg.msg_iov = &mut iov as *mut _ as *mut _;
+ msg.msg_iovlen = 1;
+ msg.msg_controllen = mem::size_of::<Cmsg>() as _;
+ msg.msg_control = &mut cmsg as *mut _ as *mut _;
+
+ match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, 0)) {
+ Err(_) => return -1,
+ Ok(_) => {}
+ }
+
+ let hdr = CMSG_FIRSTHDR(&mut msg as *mut _ as *mut _);
+ if hdr.is_null()
+ || (*hdr).cmsg_level != SOL_SOCKET
+ || (*hdr).cmsg_type != SCM_RIGHTS
+ || (*hdr).cmsg_len != CMSG_LEN(SCM_MSG_LEN as _) as _
+ {
+ return -1;
+ }
+ let data = CMSG_DATA(hdr);
+
+ let mut fds = [-1 as c_int];
+
+ crate::ptr::copy_nonoverlapping(
+ data as *const _,
+ fds.as_mut_ptr().cast::<u8>(),
+ SCM_MSG_LEN,
+ );
+
+ fds[0]
+ }
+ }
}
////////////////////////////////////////////////////////////////////////////////
@@ -800,7 +841,7 @@ impl Process {
//
// This is not actually an "exit status" in Unix terminology. Rather, it is a "wait status".
// See the discussion in comments and doc comments for `std::process::ExitStatus`.
-#[derive(PartialEq, Eq, Clone, Copy)]
+#[derive(PartialEq, Eq, Clone, Copy, Default)]
pub struct ExitStatus(c_int);
impl fmt::Debug for ExitStatus {
diff --git a/library/std/src/sys/unix/process/process_unix/tests.rs b/library/std/src/sys/unix/process/process_unix/tests.rs
index e5e1f956b..6aa79e7f9 100644
--- a/library/std/src/sys/unix/process/process_unix/tests.rs
+++ b/library/std/src/sys/unix/process/process_unix/tests.rs
@@ -60,3 +60,28 @@ fn test_command_fork_no_unwind() {
|| signal == libc::SIGSEGV
);
}
+
+#[test]
+#[cfg(target_os = "linux")]
+fn test_command_pidfd() {
+ use crate::os::fd::RawFd;
+ use crate::os::linux::process::{ChildExt, CommandExt};
+ use crate::process::Command;
+
+ let our_pid = crate::process::id();
+ let pidfd = unsafe { libc::syscall(libc::SYS_pidfd_open, our_pid, 0) };
+ let pidfd_open_available = if pidfd >= 0 {
+ unsafe { libc::close(pidfd as RawFd) };
+ true
+ } else {
+ false
+ };
+
+ // always exercise creation attempts
+ let child = Command::new("echo").create_pidfd(true).spawn().unwrap();
+
+ // but only check if we know that the kernel supports pidfds
+ if pidfd_open_available {
+ assert!(child.pidfd().is_ok())
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_unsupported.rs b/library/std/src/sys/unix/process/process_unsupported.rs
index f28ca58d0..8e0b971af 100644
--- a/library/std/src/sys/unix/process/process_unsupported.rs
+++ b/library/std/src/sys/unix/process/process_unsupported.rs
@@ -55,7 +55,7 @@ impl Process {
}
}
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
pub struct ExitStatus(c_int);
impl ExitStatus {
diff --git a/library/std/src/sys/unix/process/process_vxworks.rs b/library/std/src/sys/unix/process/process_vxworks.rs
index f70d3cb39..1ff2b2fb3 100644
--- a/library/std/src/sys/unix/process/process_vxworks.rs
+++ b/library/std/src/sys/unix/process/process_vxworks.rs
@@ -179,7 +179,7 @@ impl Process {
}
/// Unix exit statuses
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
pub struct ExitStatus(c_int);
impl ExitStatus {
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
index d471be33e..fbf158f56 100644
--- a/library/std/src/sys/unix/rand.rs
+++ b/library/std/src/sys/unix/rand.rs
@@ -17,7 +17,6 @@ pub fn hashmap_random_keys() -> (u64, u64) {
not(target_os = "tvos"),
not(target_os = "watchos"),
not(target_os = "openbsd"),
- not(target_os = "freebsd"),
not(target_os = "netbsd"),
not(target_os = "fuchsia"),
not(target_os = "redox"),
@@ -68,11 +67,25 @@ mod imp {
unsafe { libc::getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) }
}
+ #[cfg(target_os = "freebsd")]
+ fn getrandom(buf: &mut [u8]) -> libc::ssize_t {
+ // FIXME: using the above when libary std's libc is updated
+ extern "C" {
+ fn getrandom(
+ buffer: *mut libc::c_void,
+ length: libc::size_t,
+ flags: libc::c_uint,
+ ) -> libc::ssize_t;
+ }
+ unsafe { getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) }
+ }
+
#[cfg(not(any(
target_os = "linux",
target_os = "android",
target_os = "espidf",
- target_os = "horizon"
+ target_os = "horizon",
+ target_os = "freebsd"
)))]
fn getrandom_fill_bytes(_buf: &mut [u8]) -> bool {
false
@@ -82,7 +95,8 @@ mod imp {
target_os = "linux",
target_os = "android",
target_os = "espidf",
- target_os = "horizon"
+ target_os = "horizon",
+ target_os = "freebsd"
))]
fn getrandom_fill_bytes(v: &mut [u8]) -> bool {
use crate::sync::atomic::{AtomicBool, Ordering};
@@ -222,7 +236,7 @@ mod imp {
}
}
-#[cfg(any(target_os = "freebsd", target_os = "netbsd"))]
+#[cfg(target_os = "netbsd")]
mod imp {
use crate::ptr;
diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs
index a26f20795..97e75f1b5 100644
--- a/library/std/src/sys/unix/stdio.rs
+++ b/library/std/src/sys/unix/stdio.rs
@@ -54,6 +54,7 @@ impl io::Write for Stdout {
true
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
@@ -81,6 +82,7 @@ impl io::Write for Stderr {
true
}
+ #[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
diff --git a/library/std/src/sys/unsupported/os.rs b/library/std/src/sys/unsupported/os.rs
index e150ae143..248b34829 100644
--- a/library/std/src/sys/unsupported/os.rs
+++ b/library/std/src/sys/unsupported/os.rs
@@ -65,10 +65,26 @@ pub fn current_exe() -> io::Result<PathBuf> {
pub struct Env(!);
+impl Env {
+ // FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self(inner) = self;
+ match *inner {}
+ }
+}
+
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
- self.0
+ let Self(inner) = self;
+ match *inner {}
}
}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
index a494f2d6b..77b675aaa 100644
--- a/library/std/src/sys/unsupported/process.rs
+++ b/library/std/src/sys/unsupported/process.rs
@@ -99,58 +99,59 @@ impl fmt::Debug for Command {
}
}
-pub struct ExitStatus(!);
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
+#[non_exhaustive]
+pub struct ExitStatus();
impl ExitStatus {
pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
- self.0
+ Ok(())
}
pub fn code(&self) -> Option<i32> {
- self.0
+ Some(0)
}
}
-impl Clone for ExitStatus {
- fn clone(&self) -> ExitStatus {
- self.0
+impl fmt::Display for ExitStatus {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "<dummy exit status>")
}
}
-impl Copy for ExitStatus {}
+pub struct ExitStatusError(!);
-impl PartialEq for ExitStatus {
- fn eq(&self, _other: &ExitStatus) -> bool {
+impl Clone for ExitStatusError {
+ fn clone(&self) -> ExitStatusError {
self.0
}
}
-impl Eq for ExitStatus {}
+impl Copy for ExitStatusError {}
-impl fmt::Debug for ExitStatus {
- fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+impl PartialEq for ExitStatusError {
+ fn eq(&self, _other: &ExitStatusError) -> bool {
self.0
}
}
-impl fmt::Display for ExitStatus {
+impl Eq for ExitStatusError {}
+
+impl fmt::Debug for ExitStatusError {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0
}
}
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
-pub struct ExitStatusError(ExitStatus);
-
impl Into<ExitStatus> for ExitStatusError {
fn into(self) -> ExitStatus {
- self.0.0
+ self.0
}
}
impl ExitStatusError {
pub fn code(self) -> Option<NonZeroI32> {
- self.0.0
+ self.0
}
}
diff --git a/library/std/src/sys/wasi/fd.rs b/library/std/src/sys/wasi/fd.rs
index 1b50c2ea6..d7295a799 100644
--- a/library/std/src/sys/wasi/fd.rs
+++ b/library/std/src/sys/wasi/fd.rs
@@ -16,14 +16,20 @@ pub struct WasiFd {
fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] {
assert_eq!(mem::size_of::<IoSliceMut<'_>>(), mem::size_of::<wasi::Iovec>());
assert_eq!(mem::align_of::<IoSliceMut<'_>>(), mem::align_of::<wasi::Iovec>());
- // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout
+ // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout.
+ // We decorate our `IoSliceMut` with `repr(transparent)` (see `io.rs`), and
+ // `crate::io::IoSliceMut` is a `repr(transparent)` wrapper around our type, so this is
+ // guaranteed.
unsafe { mem::transmute(a) }
}
fn ciovec<'a>(a: &'a [IoSlice<'_>]) -> &'a [wasi::Ciovec] {
assert_eq!(mem::size_of::<IoSlice<'_>>(), mem::size_of::<wasi::Ciovec>());
assert_eq!(mem::align_of::<IoSlice<'_>>(), mem::align_of::<wasi::Ciovec>());
- // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout
+ // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout.
+ // We decorate our `IoSlice` with `repr(transparent)` (see `io.rs`), and
+ // `crate::io::IoSlice` is a `repr(transparent)` wrapper around our type, so this is
+ // guaranteed.
unsafe { mem::transmute(a) }
}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index a22237080..98517da1d 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -29,8 +29,7 @@ pub mod fs;
#[path = "../wasm/atomics/futex.rs"]
pub mod futex;
pub mod io;
-#[path = "../unsupported/locks/mod.rs"]
-pub mod locks;
+
pub mod net;
pub mod os;
#[path = "../unix/os_str.rs"]
@@ -47,14 +46,27 @@ pub mod thread;
pub mod thread_local_dtor;
#[path = "../unsupported/thread_local_key.rs"]
pub mod thread_local_key;
-#[path = "../unsupported/thread_parking.rs"]
-pub mod thread_parking;
pub mod time;
cfg_if::cfg_if! {
- if #[cfg(not(target_feature = "atomics"))] {
+ if #[cfg(target_feature = "atomics")] {
+ #[path = "../unix/locks"]
+ pub mod locks {
+ #![allow(unsafe_op_in_unsafe_fn)]
+ mod futex_condvar;
+ mod futex_mutex;
+ mod futex_rwlock;
+ pub(crate) use futex_condvar::Condvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ }
+ } else {
+ #[path = "../unsupported/locks/mod.rs"]
+ pub mod locks;
#[path = "../unsupported/once.rs"]
pub mod once;
+ #[path = "../unsupported/thread_parking.rs"]
+ pub mod thread_parking;
}
}
diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs
index 9919dc708..d53bddd8e 100644
--- a/library/std/src/sys/wasi/os.rs
+++ b/library/std/src/sys/wasi/os.rs
@@ -142,10 +142,39 @@ impl StdError for JoinPathsError {
pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
+
pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
}
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ slice: &'a [(OsString, OsString)],
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { slice } = self;
+ f.debug_list()
+ .entries(slice.iter().map(|(a, b)| (a.to_str().unwrap(), b.to_str().unwrap())))
+ .finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { iter } = self;
+ EnvStrDebug { slice: iter.as_slice() }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ f.debug_list().entries(iter.as_slice()).finish()
+ }
+}
+
impl !Send for Env {}
impl !Sync for Env {}
@@ -196,16 +225,23 @@ pub fn env() -> Env {
}
pub fn getenv(k: &OsStr) -> Option<OsString> {
- let s = run_with_cstr(k.as_bytes(), |k| unsafe {
+ // environment variables with a nul byte can't be set, so their value is
+ // always None as well
+ run_with_cstr(k.as_bytes(), |k| {
let _guard = env_read_lock();
- Ok(libc::getenv(k.as_ptr()) as *const libc::c_char)
+ let v = unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char;
+
+ if v.is_null() {
+ Ok(None)
+ } else {
+ // SAFETY: `v` cannot be mutated while executing this line since we've a read lock
+ let bytes = unsafe { CStr::from_ptr(v) }.to_bytes().to_vec();
+
+ Ok(Some(OsStringExt::from_vec(bytes)))
+ }
})
- .ok()?;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
- }
+ .ok()
+ .flatten()
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
@@ -224,6 +260,11 @@ pub fn unsetenv(n: &OsStr) -> io::Result<()> {
})
}
+#[allow(dead_code)]
+pub fn page_size() -> usize {
+ unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
+}
+
pub fn temp_dir() -> PathBuf {
panic!("no filesystem on wasm")
}
diff --git a/library/std/src/sys/wasi/thread.rs b/library/std/src/sys/wasi/thread.rs
index e7a6ab4be..a0eefa881 100644
--- a/library/std/src/sys/wasi/thread.rs
+++ b/library/std/src/sys/wasi/thread.rs
@@ -1,5 +1,3 @@
-#![deny(unsafe_op_in_unsafe_fn)]
-
use crate::ffi::CStr;
use crate::io;
use crate::mem;
@@ -7,14 +5,124 @@ use crate::num::NonZeroUsize;
use crate::sys::unsupported;
use crate::time::Duration;
-pub struct Thread(!);
+cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ use crate::cmp;
+ use crate::ptr;
+ use crate::sys::os;
+ // Add a few symbols not in upstream `libc` just yet.
+ mod libc {
+ pub use crate::ffi;
+ pub use crate::mem;
+ pub use libc::*;
+
+ // defined in wasi-libc
+ // https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-top-half/musl/include/alltypes.h.in#L108
+ #[repr(C)]
+ union pthread_attr_union {
+ __i: [ffi::c_int; if mem::size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
+ __vi: [ffi::c_int; if mem::size_of::<ffi::c_long>() == 8 { 14 } else { 9 }],
+ __s: [ffi::c_ulong; if mem::size_of::<ffi::c_long>() == 8 { 7 } else { 9 }],
+ }
+
+ #[repr(C)]
+ pub struct pthread_attr_t {
+ __u: pthread_attr_union,
+ }
+
+ #[allow(non_camel_case_types)]
+ pub type pthread_t = *mut ffi::c_void;
+
+ extern "C" {
+ pub fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(*mut ffi::c_void) -> *mut ffi::c_void,
+ value: *mut ffi::c_void,
+ ) -> ffi::c_int;
+ pub fn pthread_join(native: pthread_t, value: *mut *mut ffi::c_void) -> ffi::c_int;
+ pub fn pthread_attr_init(attrp: *mut pthread_attr_t) -> ffi::c_int;
+ pub fn pthread_attr_setstacksize(
+ attr: *mut pthread_attr_t,
+ stack_size: libc::size_t,
+ ) -> ffi::c_int;
+ pub fn pthread_attr_destroy(attr: *mut pthread_attr_t) -> ffi::c_int;
+ pub fn pthread_detach(thread: pthread_t) -> ffi::c_int;
+ }
+ }
+
+ pub struct Thread {
+ id: libc::pthread_t,
+ }
+
+ impl Drop for Thread {
+ fn drop(&mut self) {
+ let ret = unsafe { libc::pthread_detach(self.id) };
+ debug_assert_eq!(ret, 0);
+ }
+ }
+ } else {
+ pub struct Thread(!);
+ }
+}
pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
impl Thread {
// unsafe: see thread::Builder::spawn_unchecked for safety requirements
- pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
- unsupported()
+ cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ let p = Box::into_raw(Box::new(p));
+ let mut native: libc::pthread_t = mem::zeroed();
+ let mut attr: libc::pthread_attr_t = mem::zeroed();
+ assert_eq!(libc::pthread_attr_init(&mut attr), 0);
+
+ let stack_size = cmp::max(stack, DEFAULT_MIN_STACK_SIZE);
+
+ match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
+ 0 => {}
+ n => {
+ assert_eq!(n, libc::EINVAL);
+ // EINVAL means |stack_size| is either too small or not a
+ // multiple of the system page size. Because it's definitely
+ // >= PTHREAD_STACK_MIN, it must be an alignment issue.
+ // Round up to the nearest page and try again.
+ let page_size = os::page_size();
+ let stack_size =
+ (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
+ assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
+ }
+ };
+
+ let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
+ // Note: if the thread creation fails and this assert fails, then p will
+ // be leaked. However, an alternative design could cause double-free
+ // which is clearly worse.
+ assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
+
+ return if ret != 0 {
+ // The thread failed to start and as a result p was not consumed. Therefore, it is
+ // safe to reconstruct the box so that it gets deallocated.
+ drop(Box::from_raw(p));
+ Err(io::Error::from_raw_os_error(ret))
+ } else {
+ Ok(Thread { id: native })
+ };
+
+ extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
+ unsafe {
+ // Finally, let's run some code.
+ Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ }
+ ptr::null_mut()
+ }
+ }
+ } else {
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+ }
}
pub fn yield_now() {
@@ -62,7 +170,19 @@ impl Thread {
}
pub fn join(self) {
- self.0
+ cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ unsafe {
+ let ret = libc::pthread_join(self.id, ptr::null_mut());
+ mem::forget(self);
+ if ret != 0 {
+ rtabort!("failed to join thread: {}", io::Error::from_raw_os_error(ret));
+ }
+ }
+ } else {
+ self.0
+ }
+ }
}
}
diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs
index 43ab8c7ee..1b2a86f3c 100644
--- a/library/std/src/sys/windows/cmath.rs
+++ b/library/std/src/sys/windows/cmath.rs
@@ -1,6 +1,6 @@
#![cfg(not(test))]
-use libc::{c_double, c_float};
+use libc::{c_double, c_float, c_int};
extern "C" {
pub fn acos(n: c_double) -> c_double;
@@ -23,6 +23,10 @@ extern "C" {
pub fn sinh(n: c_double) -> c_double;
pub fn tan(n: c_double) -> c_double;
pub fn tanh(n: c_double) -> c_double;
+ pub fn tgamma(n: c_double) -> c_double;
+ pub fn tgammaf(n: c_float) -> c_float;
+ pub fn lgamma_r(n: c_double, s: &mut c_int) -> c_double;
+ pub fn lgammaf_r(n: c_float, s: &mut c_int) -> c_float;
}
pub use self::shims::*;
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index 4fe95d411..e28dd4935 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -69,10 +69,7 @@ unsafe extern "C" fn init() {
/// Helper macro for creating CStrs from literals and symbol names.
macro_rules! ansi_str {
- (sym $ident:ident) => {{
- #[allow(unused_unsafe)]
- crate::sys::compat::const_cstr_from_bytes(concat!(stringify!($ident), "\0").as_bytes())
- }};
+ (sym $ident:ident) => {{ crate::sys::compat::const_cstr_from_bytes(concat!(stringify!($ident), "\0").as_bytes()) }};
($lit:literal) => {{ crate::sys::compat::const_cstr_from_bytes(concat!($lit, "\0").as_bytes()) }};
}
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index d7adeb266..58afca088 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -25,10 +25,6 @@ pub fn errno() -> i32 {
/// Gets a detailed string description for the given error number.
pub fn error_string(mut errnum: i32) -> String {
- // This value is calculated from the macro
- // MAKELANGID(LANG_SYSTEM_DEFAULT, SUBLANG_SYS_DEFAULT)
- let langId = 0x0800 as c::DWORD;
-
let mut buf = [0 as c::WCHAR; 2048];
unsafe {
@@ -56,13 +52,13 @@ pub fn error_string(mut errnum: i32) -> String {
flags | c::FORMAT_MESSAGE_FROM_SYSTEM | c::FORMAT_MESSAGE_IGNORE_INSERTS,
module,
errnum as c::DWORD,
- langId,
+ 0,
buf.as_mut_ptr(),
buf.len() as c::DWORD,
ptr::null(),
) as usize;
if res == 0 {
- // Sometimes FormatMessageW can fail e.g., system doesn't like langId,
+ // Sometimes FormatMessageW can fail e.g., system doesn't like 0 as langId,
let fm_err = errno();
return format!("OS Error {errnum} (FormatMessageW() returned error {fm_err})");
}
@@ -85,25 +81,69 @@ pub fn error_string(mut errnum: i32) -> String {
pub struct Env {
base: c::LPWCH,
- cur: c::LPWCH,
+ iter: EnvIterator,
+}
+
+// FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when <OsStr as Debug>::fmt matches <str as Debug>::fmt.
+pub struct EnvStrDebug<'a> {
+ iter: &'a EnvIterator,
+}
+
+impl fmt::Debug for EnvStrDebug<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { iter } = self;
+ let iter: EnvIterator = (*iter).clone();
+ let mut list = f.debug_list();
+ for (a, b) in iter {
+ list.entry(&(a.to_str().unwrap(), b.to_str().unwrap()));
+ }
+ list.finish()
+ }
+}
+
+impl Env {
+ pub fn str_debug(&self) -> impl fmt::Debug + '_ {
+ let Self { base: _, iter } = self;
+ EnvStrDebug { iter }
+ }
+}
+
+impl fmt::Debug for Env {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { base: _, iter } = self;
+ f.debug_list().entries(iter.clone()).finish()
+ }
}
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
+ let Self { base: _, iter } = self;
+ iter.next()
+ }
+}
+
+#[derive(Clone)]
+struct EnvIterator(c::LPWCH);
+
+impl Iterator for EnvIterator {
+ type Item = (OsString, OsString);
+
+ fn next(&mut self) -> Option<(OsString, OsString)> {
+ let Self(cur) = self;
loop {
unsafe {
- if *self.cur == 0 {
+ if **cur == 0 {
return None;
}
- let p = self.cur as *const u16;
+ let p = *cur as *const u16;
let mut len = 0;
while *p.add(len) != 0 {
len += 1;
}
let s = slice::from_raw_parts(p, len);
- self.cur = self.cur.add(len + 1);
+ *cur = cur.add(len + 1);
// Windows allows environment variables to start with an equals
// symbol (in any other position, this is the separator between
@@ -137,7 +177,7 @@ pub fn env() -> Env {
if ch.is_null() {
panic!("failure getting env string from OS: {}", io::Error::last_os_error());
}
- Env { base: ch, cur: ch }
+ Env { base: ch, iter: EnvIterator(ch) }
}
}
diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs
index 16c4f55c6..4708657a9 100644
--- a/library/std/src/sys/windows/os_str.rs
+++ b/library/std/src/sys/windows/os_str.rs
@@ -63,6 +63,16 @@ impl fmt::Display for Slice {
}
impl Buf {
+ #[inline]
+ pub fn into_os_str_bytes(self) -> Vec<u8> {
+ self.inner.into_bytes()
+ }
+
+ #[inline]
+ pub unsafe fn from_os_str_bytes_unchecked(s: Vec<u8>) -> Self {
+ Self { inner: Wtf8Buf::from_bytes_unchecked(s) }
+ }
+
pub fn with_capacity(capacity: usize) -> Buf {
Buf { inner: Wtf8Buf::with_capacity(capacity) }
}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index e3493cbb8..2dd0c67ac 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -652,7 +652,7 @@ impl Process {
}
}
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
pub struct ExitStatus(c::DWORD);
impl ExitStatus {
diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs
index 9707a95df..cf542d2bf 100644
--- a/library/std/src/sys/windows/thread_local_dtor.rs
+++ b/library/std/src/sys/windows/thread_local_dtor.rs
@@ -4,29 +4,4 @@
#![unstable(feature = "thread_local_internals", issue = "none")]
#![cfg(target_thread_local)]
-// Using a per-thread list avoids the problems in synchronizing global state.
-#[thread_local]
-static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
-
-// Ensure this can never be inlined because otherwise this may break in dylibs.
-// See #44391.
-#[inline(never)]
-pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
- DESTRUCTORS.push((t, dtor));
-}
-
-#[inline(never)] // See comment above
-/// Runs destructors. This should not be called until thread exit.
-pub unsafe fn run_keyless_dtors() {
- // Drop all the destructors.
- //
- // Note: While this is potentially an infinite loop, it *should* be
- // the case that this loop always terminates because we provide the
- // guarantee that a TLS key cannot be set after it is flagged for
- // destruction.
- while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
- (dtor)(ptr);
- }
- // We're done so free the memory.
- DESTRUCTORS = Vec::new();
-}
+pub use super::thread_local_key::register_keyless_dtor as register_dtor;
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
index 17628b757..036d96596 100644
--- a/library/std/src/sys/windows/thread_local_key.rs
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -1,7 +1,7 @@
use crate::cell::UnsafeCell;
use crate::ptr;
use crate::sync::atomic::{
- AtomicPtr, AtomicU32,
+ AtomicBool, AtomicPtr, AtomicU32,
Ordering::{AcqRel, Acquire, Relaxed, Release},
};
use crate::sys::c;
@@ -9,6 +9,41 @@ use crate::sys::c;
#[cfg(test)]
mod tests;
+/// An optimization hint. The compiler is often smart enough to know if an atomic
+/// is never set and can remove dead code based on that fact.
+static HAS_DTORS: AtomicBool = AtomicBool::new(false);
+
+// Using a per-thread list avoids the problems in synchronizing global state.
+#[thread_local]
+#[cfg(target_thread_local)]
+static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+
+// Ensure this can never be inlined because otherwise this may break in dylibs.
+// See #44391.
+#[inline(never)]
+#[cfg(target_thread_local)]
+pub unsafe fn register_keyless_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ DESTRUCTORS.push((t, dtor));
+ HAS_DTORS.store(true, Relaxed);
+}
+
+#[inline(never)] // See comment above
+#[cfg(target_thread_local)]
+/// Runs destructors. This should not be called until thread exit.
+unsafe fn run_keyless_dtors() {
+ // Drop all the destructors.
+ //
+ // Note: While this is potentially an infinite loop, it *should* be
+ // the case that this loop always terminates because we provide the
+ // guarantee that a TLS key cannot be set after it is flagged for
+ // destruction.
+ while let Some((ptr, dtor)) = DESTRUCTORS.pop() {
+ (dtor)(ptr);
+ }
+ // We're done so free the memory.
+ DESTRUCTORS = Vec::new();
+}
+
type Key = c::DWORD;
type Dtor = unsafe extern "C" fn(*mut u8);
@@ -156,6 +191,8 @@ static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut());
/// Should only be called once per key, otherwise loops or breaks may occur in
/// the linked list.
unsafe fn register_dtor(key: &'static StaticKey) {
+ // Ensure this is never run when native thread locals are available.
+ assert_eq!(false, cfg!(target_thread_local));
let this = <*const StaticKey>::cast_mut(key);
// Use acquire ordering to pass along the changes done by the previously
// registered keys when we store the new head with release ordering.
@@ -167,6 +204,7 @@ unsafe fn register_dtor(key: &'static StaticKey) {
Err(new) => head = new,
}
}
+ HAS_DTORS.store(true, Release);
}
// -------------------------------------------------------------------------
@@ -240,10 +278,14 @@ pub static p_thread_callback: unsafe extern "system" fn(c::LPVOID, c::DWORD, c::
#[allow(dead_code, unused_variables)]
unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID) {
+ if !HAS_DTORS.load(Acquire) {
+ return;
+ }
if dwReason == c::DLL_THREAD_DETACH || dwReason == c::DLL_PROCESS_DETACH {
+ #[cfg(not(target_thread_local))]
run_dtors();
#[cfg(target_thread_local)]
- super::thread_local_dtor::run_keyless_dtors();
+ run_keyless_dtors();
}
// See comments above for what this is doing. Note that we don't need this
diff --git a/library/std/src/sys/windows/thread_local_key/tests.rs b/library/std/src/sys/windows/thread_local_key/tests.rs
index c95f383fb..c739f0caf 100644
--- a/library/std/src/sys/windows/thread_local_key/tests.rs
+++ b/library/std/src/sys/windows/thread_local_key/tests.rs
@@ -1,3 +1,7 @@
+// This file only tests the thread local key fallback.
+// Windows targets with native thread local support do not use this.
+#![cfg(not(target_thread_local))]
+
use super::StaticKey;
use crate::ptr;
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
index 6f020940d..84e2c5d8d 100644
--- a/library/std/src/sys_common/backtrace.rs
+++ b/library/std/src/sys_common/backtrace.rs
@@ -60,6 +60,8 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::
bt_fmt.add_context()?;
let mut idx = 0;
let mut res = Ok(());
+ let mut omitted_count: usize = 0;
+ let mut first_omit = true;
// Start immediately if we're not using a short backtrace.
let mut start = print_fmt != PrintFmt::Short;
backtrace_rs::trace_unsynchronized(|frame| {
@@ -85,10 +87,27 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::
start = true;
return;
}
+ if !start {
+ omitted_count += 1;
+ }
}
}
if start {
+ if omitted_count > 0 {
+ debug_assert!(print_fmt == PrintFmt::Short);
+ // only print the message between the middle of frames
+ if !first_omit {
+ let _ = writeln!(
+ bt_fmt.formatter(),
+ " [... omitted {} frame{} ...]",
+ omitted_count,
+ if omitted_count > 1 { "s" } else { "" }
+ );
+ }
+ first_omit = false;
+ omitted_count = 0;
+ }
res = bt_fmt.frame().symbol(frame, symbol);
}
});
diff --git a/library/std/src/sys_common/thread_info.rs b/library/std/src/sys_common/thread_info.rs
index 38c9e5000..88d937a7d 100644
--- a/library/std/src/sys_common/thread_info.rs
+++ b/library/std/src/sys_common/thread_info.rs
@@ -1,5 +1,4 @@
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
-#![allow(unused_unsafe)] // thread_local with `const {}` triggers this liny
use crate::cell::RefCell;
use crate::sys::thread::guard::Guard;
diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs
index 15042fc3b..046674396 100644
--- a/library/std/src/sys_common/thread_parking/id.rs
+++ b/library/std/src/sys_common/thread_parking/id.rs
@@ -56,18 +56,14 @@ impl Parker {
self.init_tid();
// Changes NOTIFIED to EMPTY and EMPTY to PARKED.
- let mut state = self.state.fetch_sub(1, Acquire).wrapping_sub(1);
- if state == PARKED {
+ let state = self.state.fetch_sub(1, Acquire);
+ if state == EMPTY {
// Loop to guard against spurious wakeups.
- while state == PARKED {
+ // The state must be reset with acquire ordering to ensure that all
+ // calls to `unpark` synchronize with this thread.
+ while self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_err() {
park(self.state.as_ptr().addr());
- state = self.state.load(Acquire);
}
-
- // Since the state change has already been observed with acquire
- // ordering, the state can be reset with a relaxed store instead
- // of a swap.
- self.state.store(EMPTY, Relaxed);
}
}
@@ -78,8 +74,7 @@ impl Parker {
if state == PARKED {
park_timeout(dur, self.state.as_ptr().addr());
// Swap to ensure that we observe all state changes with acquire
- // ordering, even if the state has been changed after the timeout
- // occurred.
+ // ordering.
self.state.swap(EMPTY, Acquire);
}
}
diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs
index c9d3e13cf..67db5ebd8 100644
--- a/library/std/src/sys_common/wtf8.rs
+++ b/library/std/src/sys_common/wtf8.rs
@@ -182,6 +182,15 @@ impl Wtf8Buf {
Wtf8Buf { bytes: Vec::with_capacity(capacity), is_known_utf8: true }
}
+ /// Creates a WTF-8 string from a WTF-8 byte vec.
+ ///
+ /// Since the byte vec is not checked for valid WTF-8, this functions is
+ /// marked unsafe.
+ #[inline]
+ pub unsafe fn from_bytes_unchecked(value: Vec<u8>) -> Wtf8Buf {
+ Wtf8Buf { bytes: value, is_known_utf8: false }
+ }
+
/// Creates a WTF-8 string from a UTF-8 `String`.
///
/// This takes ownership of the `String` and does not copy.
@@ -402,6 +411,12 @@ impl Wtf8Buf {
self.bytes.truncate(new_len)
}
+ /// Consumes the WTF-8 string and tries to convert it to a vec of bytes.
+ #[inline]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+
/// Consumes the WTF-8 string and tries to convert it to UTF-8.
///
/// This does not copy the data.
@@ -444,6 +459,7 @@ impl Wtf8Buf {
/// Converts this `Wtf8Buf` into a boxed `Wtf8`.
#[inline]
pub fn into_box(self) -> Box<Wtf8> {
+ // SAFETY: relies on `Wtf8` being `repr(transparent)`.
unsafe { mem::transmute(self.bytes.into_boxed_slice()) }
}
@@ -496,6 +512,7 @@ impl Extend<CodePoint> for Wtf8Buf {
/// Similar to `&str`, but can additionally contain surrogate code points
/// if they’re not in a surrogate pair.
#[derive(Eq, Ord, PartialEq, PartialOrd)]
+#[repr(transparent)]
pub struct Wtf8 {
bytes: [u8],
}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index 1b86d898c..09994e47f 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -313,7 +313,6 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::Cell;
///
/// thread_local! {
@@ -326,7 +325,7 @@ impl<T: 'static> LocalKey<Cell<T>> {
///
/// assert_eq!(X.get(), 123);
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn set(&'static self, value: T) {
self.initialize_with(Cell::new(value), |value, cell| {
if let Some(value) = value {
@@ -351,7 +350,6 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::Cell;
///
/// thread_local! {
@@ -360,7 +358,7 @@ impl<T: 'static> LocalKey<Cell<T>> {
///
/// assert_eq!(X.get(), 1);
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn get(&'static self) -> T
where
T: Copy,
@@ -381,7 +379,6 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::Cell;
///
/// thread_local! {
@@ -391,7 +388,7 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// assert_eq!(X.take(), Some(1));
/// assert_eq!(X.take(), None);
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn take(&'static self) -> T
where
T: Default,
@@ -412,7 +409,6 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::Cell;
///
/// thread_local! {
@@ -422,7 +418,7 @@ impl<T: 'static> LocalKey<Cell<T>> {
/// assert_eq!(X.replace(2), 1);
/// assert_eq!(X.replace(3), 2);
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn replace(&'static self, value: T) -> T {
self.with(|cell| cell.replace(value))
}
@@ -444,7 +440,6 @@ impl<T: 'static> LocalKey<RefCell<T>> {
/// # Example
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::RefCell;
///
/// thread_local! {
@@ -453,7 +448,7 @@ impl<T: 'static> LocalKey<RefCell<T>> {
///
/// X.with_borrow(|v| assert!(v.is_empty()));
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn with_borrow<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
@@ -476,7 +471,6 @@ impl<T: 'static> LocalKey<RefCell<T>> {
/// # Example
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::RefCell;
///
/// thread_local! {
@@ -487,7 +481,7 @@ impl<T: 'static> LocalKey<RefCell<T>> {
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1]));
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn with_borrow_mut<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&mut T) -> R,
@@ -511,7 +505,6 @@ impl<T: 'static> LocalKey<RefCell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::RefCell;
///
/// thread_local! {
@@ -524,7 +517,7 @@ impl<T: 'static> LocalKey<RefCell<T>> {
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn set(&'static self, value: T) {
self.initialize_with(RefCell::new(value), |value, cell| {
if let Some(value) = value {
@@ -551,7 +544,6 @@ impl<T: 'static> LocalKey<RefCell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::RefCell;
///
/// thread_local! {
@@ -566,7 +558,7 @@ impl<T: 'static> LocalKey<RefCell<T>> {
///
/// X.with_borrow(|v| assert!(v.is_empty()));
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn take(&'static self) -> T
where
T: Default,
@@ -586,7 +578,6 @@ impl<T: 'static> LocalKey<RefCell<T>> {
/// # Examples
///
/// ```
- /// #![feature(local_key_cell_methods)]
/// use std::cell::RefCell;
///
/// thread_local! {
@@ -598,7 +589,7 @@ impl<T: 'static> LocalKey<RefCell<T>> {
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
/// ```
- #[unstable(feature = "local_key_cell_methods", issue = "92122")]
+ #[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn replace(&'static self, value: T) -> T {
self.with(|cell| cell.replace(value))
}
diff --git a/library/std/tests/process_spawning.rs b/library/std/tests/process_spawning.rs
new file mode 100644
index 000000000..46dc9ff00
--- /dev/null
+++ b/library/std/tests/process_spawning.rs
@@ -0,0 +1,38 @@
+#![cfg(not(target_env="sgx"))]
+
+use std::env;
+use std::fs;
+use std::process;
+use std::str;
+
+mod common;
+
+#[test]
+fn issue_15149() {
+ // If we're the parent, copy our own binary to a new directory.
+ let my_path = env::current_exe().unwrap();
+
+ let temp = common::tmpdir();
+ let child_dir = temp.join("issue-15140-child");
+ fs::create_dir_all(&child_dir).unwrap();
+
+ let child_path = child_dir.join(&format!("mytest{}", env::consts::EXE_SUFFIX));
+ fs::copy(&my_path, &child_path).unwrap();
+
+ // Append the new directory to our own PATH.
+ let path = {
+ let mut paths: Vec<_> = env::split_paths(&env::var_os("PATH").unwrap()).collect();
+ paths.push(child_dir.to_path_buf());
+ env::join_paths(paths).unwrap()
+ };
+
+ let child_output =
+ process::Command::new("mytest").env("PATH", &path).arg("child").output().unwrap();
+
+ assert!(
+ child_output.status.success(),
+ "child assertion failed\n child stdout:\n {}\n child stderr:\n {}",
+ str::from_utf8(&child_output.stdout).unwrap(),
+ str::from_utf8(&child_output.stderr).unwrap()
+ );
+}
diff --git a/library/std/tests/switch-stdout.rs b/library/std/tests/switch-stdout.rs
new file mode 100644
index 000000000..2605664d2
--- /dev/null
+++ b/library/std/tests/switch-stdout.rs
@@ -0,0 +1,53 @@
+#![cfg(any(target_family = "unix", target_family = "windows"))]
+
+use std::fs::File;
+use std::io::{Read, Write};
+
+mod common;
+
+#[cfg(unix)]
+fn switch_stdout_to(file: File) {
+ use std::os::unix::prelude::*;
+
+ extern "C" {
+ fn dup2(old: i32, new: i32) -> i32;
+ }
+
+ unsafe {
+ assert_eq!(dup2(file.as_raw_fd(), 1), 1);
+ }
+}
+
+#[cfg(windows)]
+fn switch_stdout_to(file: File) {
+ use std::os::windows::prelude::*;
+
+ extern "system" {
+ fn SetStdHandle(nStdHandle: u32, handle: *mut u8) -> i32;
+ }
+
+ const STD_OUTPUT_HANDLE: u32 = (-11i32) as u32;
+
+ unsafe {
+ let rc = SetStdHandle(STD_OUTPUT_HANDLE, file.into_raw_handle() as *mut _);
+ assert!(rc != 0);
+ }
+}
+
+#[test]
+fn switch_stdout() {
+ let temp = common::tmpdir();
+ let path = temp.join("switch-stdout-output");
+ let f = File::create(&path).unwrap();
+
+ let mut stdout = std::io::stdout();
+ stdout.write(b"foo\n").unwrap();
+ stdout.flush().unwrap();
+ switch_stdout_to(f);
+ stdout.write(b"bar\n").unwrap();
+ stdout.flush().unwrap();
+
+ let mut contents = String::new();
+ File::open(&path).unwrap().read_to_string(&mut contents).unwrap();
+ assert_eq!(contents, "bar\n");
+}
diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs
index 9f5bf2436..a211ebf1d 100644
--- a/library/test/src/formatters/junit.rs
+++ b/library/test/src/formatters/junit.rs
@@ -32,7 +32,7 @@ fn str_to_cdata(s: &str) -> String {
let escaped_output = s.replace("]]>", "]]]]><![CDATA[>");
let escaped_output = escaped_output.replace("<?", "<]]><![CDATA[?");
// We also smuggle newlines as &#xa so as to keep all the output on one line
- let escaped_output = escaped_output.replace("\n", "]]>&#xA;<![CDATA[");
+ let escaped_output = escaped_output.replace('\n', "]]>&#xA;<![CDATA[");
// Prune empty CDATA blocks resulting from any escaping
let escaped_output = escaped_output.replace("<![CDATA[]]>", "");
format!("<![CDATA[{}]]>", escaped_output)
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index b40b6009e..64d10dd57 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -21,6 +21,7 @@
#![feature(process_exitcode_internals)]
#![feature(panic_can_unwind)]
#![feature(test)]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
// Public reexports
pub use self::bench::{black_box, Bencher};
@@ -183,8 +184,7 @@ pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
let test = tests
.into_iter()
- .filter(|test| test.desc.name.as_slice() == name)
- .next()
+ .find(|test| test.desc.name.as_slice() == name)
.unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
let TestDescAndFn { desc, testfn } = test;
match testfn.into_runnable() {
diff --git a/library/test/src/term/terminfo/searcher/tests.rs b/library/test/src/term/terminfo/searcher/tests.rs
index 4227a585e..e1edd3b25 100644
--- a/library/test/src/term/terminfo/searcher/tests.rs
+++ b/library/test/src/term/terminfo/searcher/tests.rs
@@ -6,14 +6,12 @@ fn test_get_dbpath_for_term() {
// woefully inadequate test coverage
// note: current tests won't work with non-standard terminfo hierarchies (e.g., macOS's)
use std::env;
- // FIXME (#9639): This needs to handle non-utf8 paths
- fn x(t: &str) -> String {
- let p = get_dbpath_for_term(t).expect("no terminfo entry found");
- p.to_str().unwrap().to_string()
+ fn x(t: &str) -> PathBuf {
+ get_dbpath_for_term(t).expect(&format!("no terminfo entry found for {t:?}"))
}
- assert!(x("screen") == "/usr/share/terminfo/s/screen");
- assert!(get_dbpath_for_term("") == None);
+ assert_eq!(x("screen"), PathBuf::from("/usr/share/terminfo/s/screen"));
+ assert_eq!(get_dbpath_for_term(""), None);
env::set_var("TERMINFO_DIRS", ":");
- assert!(x("screen") == "/usr/share/terminfo/s/screen");
+ assert_eq!(x("screen"), PathBuf::from("/usr/share/terminfo/s/screen"));
env::remove_var("TERMINFO_DIRS");
}
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
index 504ceee7f..1a8ae889c 100644
--- a/library/test/src/types.rs
+++ b/library/test/src/types.rs
@@ -224,7 +224,7 @@ impl TestDesc {
}
}
- /// Returns None for ignored test or that that are just run, otherwise give a description of the type of test.
+ /// Returns None for ignored test or tests that are just run, otherwise returns a description of the type of test.
/// Descriptions include "should panic", "compile fail" and "compile".
pub fn test_mode(&self) -> Option<&'static str> {
if self.ignore {
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index b655bae96..0b4daeafe 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -5,6 +5,7 @@
#![feature(c_unwind)]
#![feature(cfg_target_abi)]
#![cfg_attr(not(target_env = "msvc"), feature(libc))]
+#![cfg_attr(not(bootstrap), allow(internal_features))]
cfg_if::cfg_if! {
if #[cfg(target_env = "msvc")] {
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index ec24e1375..a2bfa8e96 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -51,10 +51,13 @@ pub const unwinder_private_data_size: usize = 5;
#[cfg(target_arch = "m68k")]
pub const unwinder_private_data_size: usize = 2;
-#[cfg(target_arch = "mips")]
+#[cfg(any(target_arch = "mips", target_arch = "mips32r6"))]
pub const unwinder_private_data_size: usize = 2;
-#[cfg(target_arch = "mips64")]
+#[cfg(target_arch = "csky")]
+pub const unwinder_private_data_size: usize = 2;
+
+#[cfg(any(target_arch = "mips64", target_arch = "mips64r6"))]
pub const unwinder_private_data_size: usize = 2;
#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))]