summaryrefslogtreecommitdiffstats
path: root/library/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
commit4547b622d8d29df964fa2914213088b148c498fc (patch)
tree9fc6b25f3c3add6b745be9a2400a6e96140046e9 /library/core
parentReleasing progress-linux version 1.66.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-4547b622d8d29df964fa2914213088b148c498fc.tar.xz
rustc-4547b622d8d29df964fa2914213088b148c498fc.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core')
-rw-r--r--library/core/benches/iter.rs23
-rw-r--r--library/core/benches/lib.rs2
-rw-r--r--library/core/src/alloc/global.rs8
-rw-r--r--library/core/src/alloc/layout.rs60
-rw-r--r--library/core/src/alloc/mod.rs10
-rw-r--r--library/core/src/arch.rs30
-rw-r--r--library/core/src/array/mod.rs88
-rw-r--r--library/core/src/async_iter/async_iter.rs2
-rw-r--r--library/core/src/cell.rs6
-rw-r--r--library/core/src/cell/lazy.rs4
-rw-r--r--library/core/src/cell/once.rs10
-rw-r--r--library/core/src/char/convert.rs1
-rw-r--r--library/core/src/char/methods.rs8
-rw-r--r--library/core/src/char/mod.rs6
-rw-r--r--library/core/src/clone.rs9
-rw-r--r--library/core/src/cmp.rs11
-rw-r--r--library/core/src/const_closure.rs30
-rw-r--r--library/core/src/convert/mod.rs8
-rw-r--r--library/core/src/convert/num.rs2
-rw-r--r--library/core/src/default.rs2
-rw-r--r--library/core/src/error.md2
-rw-r--r--library/core/src/error.rs5
-rw-r--r--library/core/src/ffi/c_str.rs7
-rw-r--r--library/core/src/fmt/mod.rs3
-rw-r--r--library/core/src/future/mod.rs28
-rw-r--r--library/core/src/hash/mod.rs83
-rw-r--r--library/core/src/hash/sip.rs36
-rw-r--r--library/core/src/hint.rs3
-rw-r--r--library/core/src/intrinsics.rs146
-rw-r--r--library/core/src/intrinsics/mir.rs289
-rw-r--r--library/core/src/iter/adapters/array_chunks.rs75
-rw-r--r--library/core/src/iter/adapters/take.rs21
-rw-r--r--library/core/src/iter/mod.rs2
-rw-r--r--library/core/src/iter/sources.rs4
-rw-r--r--library/core/src/iter/sources/repeat_n.rs195
-rw-r--r--library/core/src/iter/sources/repeat_with.rs17
-rw-r--r--library/core/src/iter/traits/iterator.rs2
-rw-r--r--library/core/src/lib.rs45
-rw-r--r--library/core/src/macros/mod.rs47
-rw-r--r--library/core/src/marker.rs14
-rw-r--r--library/core/src/mem/maybe_uninit.rs4
-rw-r--r--library/core/src/mem/mod.rs73
-rw-r--r--library/core/src/num/flt2dec/strategy/dragon.rs2
-rw-r--r--library/core/src/num/int_macros.rs63
-rw-r--r--library/core/src/num/mod.rs5
-rw-r--r--library/core/src/num/nonzero.rs39
-rw-r--r--library/core/src/num/uint_macros.rs49
-rw-r--r--library/core/src/ops/control_flow.rs4
-rw-r--r--library/core/src/ops/deref.rs2
-rw-r--r--library/core/src/ops/function.rs332
-rw-r--r--library/core/src/ops/index.rs4
-rw-r--r--library/core/src/option.rs68
-rw-r--r--library/core/src/panic.rs1
-rw-r--r--library/core/src/panicking.rs31
-rw-r--r--library/core/src/pin.rs59
-rw-r--r--library/core/src/prelude/v1.rs15
-rw-r--r--library/core/src/primitive_docs.rs2
-rw-r--r--library/core/src/ptr/alignment.rs24
-rw-r--r--library/core/src/ptr/const_ptr.rs338
-rw-r--r--library/core/src/ptr/metadata.rs1
-rw-r--r--library/core/src/ptr/mod.rs111
-rw-r--r--library/core/src/ptr/mut_ptr.rs350
-rw-r--r--library/core/src/ptr/non_null.rs8
-rw-r--r--library/core/src/slice/index.rs21
-rw-r--r--library/core/src/slice/iter.rs32
-rw-r--r--library/core/src/slice/mod.rs173
-rw-r--r--library/core/src/str/converts.rs2
-rw-r--r--library/core/src/str/mod.rs16
-rw-r--r--library/core/src/str/pattern.rs234
-rw-r--r--library/core/src/task/poll.rs1
-rw-r--r--library/core/src/tuple.rs11
-rw-r--r--library/core/tests/any.rs18
-rw-r--r--library/core/tests/fmt/float.rs124
-rw-r--r--library/core/tests/hash/mod.rs38
-rw-r--r--library/core/tests/hash/sip.rs15
-rw-r--r--library/core/tests/iter/adapters/array_chunks.rs3
-rw-r--r--library/core/tests/iter/adapters/take.rs20
-rw-r--r--library/core/tests/iter/sources.rs49
-rw-r--r--library/core/tests/lib.rs10
-rw-r--r--library/core/tests/mem.rs20
-rw-r--r--library/core/tests/num/flt2dec/mod.rs4
-rw-r--r--library/core/tests/option.rs2
-rw-r--r--library/core/tests/ptr.rs291
-rw-r--r--library/core/tests/slice.rs60
84 files changed, 3529 insertions, 544 deletions
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
index 38887f29a..9193c79be 100644
--- a/library/core/benches/iter.rs
+++ b/library/core/benches/iter.rs
@@ -1,3 +1,4 @@
+use core::borrow::Borrow;
use core::iter::*;
use core::mem;
use core::num::Wrapping;
@@ -403,13 +404,31 @@ fn bench_trusted_random_access_adapters(b: &mut Bencher) {
/// Exercises the iter::Copied specialization for slice::Iter
#[bench]
-fn bench_copied_array_chunks(b: &mut Bencher) {
+fn bench_copied_chunks(b: &mut Bencher) {
+ let v = vec![1u8; 1024];
+
+ b.iter(|| {
+ let mut iter = black_box(&v).iter().copied();
+ let mut acc = Wrapping(0);
+ // This uses a while-let loop to side-step the TRA specialization in ArrayChunks
+ while let Ok(chunk) = iter.next_chunk::<{ mem::size_of::<u64>() }>() {
+ let d = u64::from_ne_bytes(chunk);
+ acc += Wrapping(d.rotate_left(7).wrapping_add(1));
+ }
+ acc
+ })
+}
+
+/// Exercises the TrustedRandomAccess specialization in ArrayChunks
+#[bench]
+fn bench_trusted_random_access_chunks(b: &mut Bencher) {
let v = vec![1u8; 1024];
b.iter(|| {
black_box(&v)
.iter()
- .copied()
+ // this shows that we're not relying on the slice::Iter specialization in Copied
+ .map(|b| *b.borrow())
.array_chunks::<{ mem::size_of::<u64>() }>()
.map(|ary| {
let d = u64::from_ne_bytes(ary);
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
index 1e462e3fc..f1244d932 100644
--- a/library/core/benches/lib.rs
+++ b/library/core/benches/lib.rs
@@ -1,10 +1,10 @@
// wasm32 does not support benches (no time).
#![cfg(not(target_arch = "wasm32"))]
#![feature(flt2dec)]
-#![feature(int_log)]
#![feature(test)]
#![feature(trusted_random_access)]
#![feature(iter_array_chunks)]
+#![feature(iter_next_chunk)]
extern crate test;
diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs
index 6756eecd0..1d80b8bf9 100644
--- a/library/core/src/alloc/global.rs
+++ b/library/core/src/alloc/global.rs
@@ -208,9 +208,11 @@ pub unsafe trait GlobalAlloc {
///
/// If this returns a non-null pointer, then ownership of the memory block
/// referenced by `ptr` has been transferred to this allocator.
- /// The memory may or may not have been deallocated, and should be
- /// considered unusable. The new memory block is allocated with `layout`,
- /// but with the `size` updated to `new_size`. This new layout should be
+ /// Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation remained in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
+ /// The new memory block is allocated with `layout`,
+ /// but with the `size` updated to `new_size`. This new layout must be
/// used when deallocating the new memory block with `dealloc`. The range
/// `0..min(layout.size(), new_size)` of the new memory block is
/// guaranteed to have the same values as the original block.
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 920e559cc..ac3d84718 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -7,8 +7,8 @@
use crate::cmp;
use crate::error::Error;
use crate::fmt;
-use crate::mem::{self, ValidAlign};
-use crate::ptr::NonNull;
+use crate::mem;
+use crate::ptr::{Alignment, NonNull};
// While this function is used in one place and its implementation
// could be inlined, the previous attempts to do so made rustc
@@ -46,7 +46,7 @@ pub struct Layout {
//
// (However, we do not analogously require `align >= sizeof(void*)`,
// even though that is *also* a requirement of `posix_memalign`.)
- align: ValidAlign,
+ align: Alignment,
}
impl Layout {
@@ -71,11 +71,11 @@ impl Layout {
}
// SAFETY: just checked that align is a power of two.
- Layout::from_size_valid_align(size, unsafe { ValidAlign::new_unchecked(align) })
+ Layout::from_size_alignment(size, unsafe { Alignment::new_unchecked(align) })
}
#[inline(always)]
- const fn max_size_for_align(align: ValidAlign) -> usize {
+ const fn max_size_for_align(align: Alignment) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
@@ -95,7 +95,7 @@ impl Layout {
/// Internal helper constructor to skip revalidating alignment validity.
#[inline]
- const fn from_size_valid_align(size: usize, align: ValidAlign) -> Result<Self, LayoutError> {
+ const fn from_size_alignment(size: usize, align: Alignment) -> Result<Self, LayoutError> {
if size > Self::max_size_for_align(align) {
return Err(LayoutError);
}
@@ -117,7 +117,7 @@ impl Layout {
#[rustc_allow_const_fn_unstable(ptr_alignment_type)]
pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
// SAFETY: the caller is required to uphold the preconditions.
- unsafe { Layout { size, align: ValidAlign::new_unchecked(align) } }
+ unsafe { Layout { size, align: Alignment::new_unchecked(align) } }
}
/// The minimum size in bytes for a memory block of this layout.
@@ -157,9 +157,10 @@ impl Layout {
/// allocate backing structure for `T` (which could be a trait
/// or other unsized type like a slice).
#[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use]
#[inline]
- pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ pub const fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
@@ -191,8 +192,9 @@ impl Layout {
/// [trait object]: ../../book/ch17-02-trait-objects.html
/// [extern type]: ../../unstable-book/language-features/extern-types.html
#[unstable(feature = "layout_for_ptr", issue = "69835")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use]
- pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
+ pub const unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
// SAFETY: we pass along the prerequisites of these functions to the caller
let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
// SAFETY: see rationale in `new` for why this is using the unsafe variant
@@ -229,8 +231,9 @@ impl Layout {
/// Returns an error if the combination of `self.size()` and the given
/// `align` violates the conditions listed in [`Layout::from_size_align`].
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
+ pub const fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
Layout::from_size_align(self.size(), cmp::max(self.align(), align))
}
@@ -287,10 +290,11 @@ impl Layout {
/// This is equivalent to adding the result of `padding_needed_for`
/// to the layout's current size.
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use = "this returns a new `Layout`, \
without modifying the original"]
#[inline]
- pub fn pad_to_align(&self) -> Layout {
+ pub const fn pad_to_align(&self) -> Layout {
let pad = self.padding_needed_for(self.align());
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
@@ -311,8 +315,9 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
+ pub const fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow isize (i.e., the rounded value must be
@@ -321,7 +326,8 @@ impl Layout {
let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(alloc_size, self.align).map(|layout| (layout, padded_size))
+ let layout = Layout::from_size_alignment(alloc_size, self.align)?;
+ Ok((layout, padded_size))
}
/// Creates a layout describing the record for `self` followed by
@@ -370,8 +376,9 @@ impl Layout {
/// # assert_eq!(repr_c(&[u64, u32, u16, u32]), Ok((s, vec![0, 8, 12, 16])));
/// ```
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
+ pub const fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
let new_align = cmp::max(self.align, next.align);
let pad = self.padding_needed_for(next.align());
@@ -379,7 +386,7 @@ impl Layout {
let new_size = offset.checked_add(next.size()).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- let layout = Layout::from_size_valid_align(new_size, new_align)?;
+ let layout = Layout::from_size_alignment(new_size, new_align)?;
Ok((layout, offset))
}
@@ -396,11 +403,12 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
+ pub const fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
let size = self.size().checked_mul(n).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(size, self.align)
+ Layout::from_size_alignment(size, self.align)
}
/// Creates a layout describing the record for `self` followed by
@@ -410,11 +418,12 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
+ pub const fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
let new_size = self.size().checked_add(next.size()).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(new_size, self.align)
+ Layout::from_size_alignment(new_size, self.align)
}
/// Creates a layout describing the record for a `[T; n]`.
@@ -422,13 +431,18 @@ impl Layout {
/// On arithmetic overflow or when the total size would exceed
/// `isize::MAX`, returns `LayoutError`.
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
+ pub const fn array<T>(n: usize) -> Result<Self, LayoutError> {
// Reduce the amount of code we need to monomorphize per `T`.
- return inner(mem::size_of::<T>(), ValidAlign::of::<T>(), n);
+ return inner(mem::size_of::<T>(), Alignment::of::<T>(), n);
#[inline]
- fn inner(element_size: usize, align: ValidAlign, n: usize) -> Result<Layout, LayoutError> {
+ const fn inner(
+ element_size: usize,
+ align: Alignment,
+ n: usize,
+ ) -> Result<Layout, LayoutError> {
// We need to check two things about the size:
// - That the total size won't overflow a `usize`, and
// - That the total size still fits in an `isize`.
@@ -443,7 +457,7 @@ impl Layout {
// SAFETY: We just checked above that the `array_size` will not
// exceed `isize::MAX` even when rounded up to the alignment.
- // And `ValidAlign` guarantees it's a power of two.
+ // And `Alignment` guarantees it's a power of two.
unsafe { Ok(Layout::from_size_align_unchecked(array_size, align.as_usize())) }
}
}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
index a4bf6a853..a6082455f 100644
--- a/library/core/src/alloc/mod.rs
+++ b/library/core/src/alloc/mod.rs
@@ -169,8 +169,9 @@ pub unsafe trait Allocator {
/// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
- /// transferred to this allocator. The memory may or may not have been freed, and should be
- /// considered unusable.
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was grown in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
@@ -295,8 +296,9 @@ pub unsafe trait Allocator {
/// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
- /// transferred to this allocator. The memory may or may not have been freed, and should be
- /// considered unusable.
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
diff --git a/library/core/src/arch.rs b/library/core/src/arch.rs
new file mode 100644
index 000000000..fc2a5b89c
--- /dev/null
+++ b/library/core/src/arch.rs
@@ -0,0 +1,30 @@
+#![doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
+
+#[stable(feature = "simd_arch", since = "1.27.0")]
+pub use crate::core_arch::arch::*;
+
+/// Inline assembly.
+///
+/// Refer to [rust by example] for a usage guide and the [reference] for
+/// detailed information about the syntax and available options.
+///
+/// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+/// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+#[stable(feature = "asm", since = "1.59.0")]
+#[rustc_builtin_macro]
+pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+}
+
+/// Module-level inline assembly.
+///
+/// Refer to [rust by example] for a usage guide and the [reference] for
+/// detailed information about the syntax and available options.
+///
+/// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+/// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+#[stable(feature = "global_asm", since = "1.59.0")]
+#[rustc_builtin_macro]
+pub macro global_asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index eae0e1c76..94a1a1d32 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -23,7 +23,8 @@ mod iter;
#[stable(feature = "array_value_iter", since = "1.51.0")]
pub use iter::IntoIter;
-/// Creates an array `[T; N]` where each array element `T` is returned by the `cb` call.
+/// Creates an array of type [T; N], where each element `T` is the returned value from `cb`
+/// using that element's index.
///
/// # Arguments
///
@@ -36,8 +37,18 @@ pub use iter::IntoIter;
/// // elements to produce is the length of array down there: only arrays of
/// // equal lengths can be compared, so the const generic parameter `N` is
/// // inferred to be 5, thus creating array of 5 elements.
+///
/// let array = core::array::from_fn(|i| i);
+/// // indexes are: 0 1 2 3 4
/// assert_eq!(array, [0, 1, 2, 3, 4]);
+///
+/// let array2: [usize; 8] = core::array::from_fn(|i| i * 2);
+/// // indexes are: 0 1 2 3 4 5 6 7
+/// assert_eq!(array2, [0, 2, 4, 6, 8, 10, 12, 14]);
+///
+/// let bool_arr = core::array::from_fn::<_, 5, _>(|i| i % 2 == 0);
+/// // indexes are: 0 1 2 3 4
+/// assert_eq!(bool_arr, [true, false, true, false, true]);
/// ```
#[inline]
#[stable(feature = "array_from_fn", since = "1.63.0")]
@@ -865,24 +876,6 @@ where
return Ok(Try::from_output(unsafe { mem::zeroed() }));
}
- struct Guard<'a, T, const N: usize> {
- array_mut: &'a mut [MaybeUninit<T>; N],
- initialized: usize,
- }
-
- impl<T, const N: usize> Drop for Guard<'_, T, N> {
- fn drop(&mut self) {
- debug_assert!(self.initialized <= N);
-
- // SAFETY: this slice will contain only initialized objects.
- unsafe {
- crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
- &mut self.array_mut.get_unchecked_mut(..self.initialized),
- ));
- }
- }
- }
-
let mut array = MaybeUninit::uninit_array::<N>();
let mut guard = Guard { array_mut: &mut array, initialized: 0 };
@@ -896,13 +889,11 @@ where
ControlFlow::Continue(elem) => elem,
};
- // SAFETY: `guard.initialized` starts at 0, is increased by one in the
- // loop and the loop is aborted once it reaches N (which is
- // `array.len()`).
+ // SAFETY: `guard.initialized` starts at 0, which means push can be called
+ // at most N times, which this loop does.
unsafe {
- guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
+ guard.push_unchecked(item);
}
- guard.initialized += 1;
}
None => {
let alive = 0..guard.initialized;
@@ -920,6 +911,55 @@ where
Ok(Try::from_output(output))
}
+/// Panic guard for incremental initialization of arrays.
+///
+/// Disarm the guard with `mem::forget` once the array has been initialized.
+///
+/// # Safety
+///
+/// All write accesses to this structure are unsafe and must maintain a correct
+/// count of `initialized` elements.
+///
+/// To minimize indirection fields are still pub but callers should at least use
+/// `push_unchecked` to signal that something unsafe is going on.
+pub(crate) struct Guard<'a, T, const N: usize> {
+ /// The array to be initialized.
+ pub array_mut: &'a mut [MaybeUninit<T>; N],
+ /// The number of items that have been initialized so far.
+ pub initialized: usize,
+}
+
+impl<T, const N: usize> Guard<'_, T, N> {
+ /// Adds an item to the array and updates the initialized item counter.
+ ///
+ /// # Safety
+ ///
+ /// No more than N elements must be initialized.
+ #[inline]
+ pub unsafe fn push_unchecked(&mut self, item: T) {
+ // SAFETY: If `initialized` was correct before and the caller does not
+ // invoke this method more than N times then writes will be in-bounds
+ // and slots will not be initialized more than once.
+ unsafe {
+ self.array_mut.get_unchecked_mut(self.initialized).write(item);
+ self.initialized = self.initialized.unchecked_add(1);
+ }
+ }
+}
+
+impl<T, const N: usize> Drop for Guard<'_, T, N> {
+ fn drop(&mut self) {
+ debug_assert!(self.initialized <= N);
+
+ // SAFETY: this slice will contain only initialized objects.
+ unsafe {
+ crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
+ &mut self.array_mut.get_unchecked_mut(..self.initialized),
+ ));
+ }
+ }
+}
+
/// Returns the next chunk of `N` items from the iterator or errors with an
/// iterator over the remainder. Used for `Iterator::next_chunk`.
#[inline]
diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs
index 016a3685e..12a47f9fc 100644
--- a/library/core/src/async_iter/async_iter.rs
+++ b/library/core/src/async_iter/async_iter.rs
@@ -2,7 +2,7 @@ use crate::ops::DerefMut;
use crate::pin::Pin;
use crate::task::{Context, Poll};
-/// An interface for dealing with asynchronous iterators.
+/// A trait for dealing with asynchronous iterators.
///
/// This is the main async iterator trait. For more about the concept of async iterators
/// generally, please see the [module-level documentation]. In particular, you
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 7bf32cb0d..47cce2aa3 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -1025,7 +1025,7 @@ impl<T: ?Sized> RefCell<T> {
///
/// Since this method borrows `RefCell` mutably, it is statically guaranteed
/// that no borrows to the underlying data exist. The dynamic checks inherent
- /// in [`borrow_mut`] and most other methods of `RefCell` are therefor
+ /// in [`borrow_mut`] and most other methods of `RefCell` are therefore
/// unnecessary.
///
/// This method can only be called if `RefCell` can be mutably borrowed,
@@ -1856,7 +1856,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// }
/// ```
///
-/// Coverting in the other direction from a `&mut T`
+/// Converting in the other direction from a `&mut T`
/// to an `&UnsafeCell<T>` is allowed:
///
/// ```rust
@@ -1936,7 +1936,7 @@ impl<T> UnsafeCell<T> {
/// Constructs a new instance of `UnsafeCell` which will wrap the specified
/// value.
///
- /// All access to the inner value through methods is `unsafe`.
+ /// All access to the inner value through `&UnsafeCell<T>` requires `unsafe` code.
///
/// # Examples
///
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
index 7844be5f7..b355d94ce 100644
--- a/library/core/src/cell/lazy.rs
+++ b/library/core/src/cell/lazy.rs
@@ -4,6 +4,10 @@ use crate::ops::Deref;
/// A value which is initialized on the first access.
///
+/// For a thread-safe version of this struct, see [`std::sync::LazyLock`].
+///
+/// [`std::sync::LazyLock`]: ../../std/sync/struct.LazyLock.html
+///
/// # Examples
///
/// ```
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 3c39394dd..8c01643c7 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -4,8 +4,14 @@ use crate::mem;
/// A cell which can be written to only once.
///
-/// Unlike `RefCell`, a `OnceCell` only provides shared `&T` references to its value.
-/// Unlike `Cell`, a `OnceCell` doesn't require copying or replacing the value to access it.
+/// Unlike [`RefCell`], a `OnceCell` only provides shared `&T` references to its value.
+/// Unlike [`Cell`], a `OnceCell` doesn't require copying or replacing the value to access it.
+///
+/// For a thread-safe version of this struct, see [`std::sync::OnceLock`].
+///
+/// [`RefCell`]: crate::cell::RefCell
+/// [`Cell`]: crate::cell::Cell
+/// [`std::sync::OnceLock`]: ../../std/sync/struct.OnceLock.html
///
/// # Examples
///
diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs
index 7c5f82f5e..f1a51a550 100644
--- a/library/core/src/char/convert.rs
+++ b/library/core/src/char/convert.rs
@@ -18,7 +18,6 @@ pub(super) const fn from_u32(i: u32) -> Option<char> {
}
/// Converts a `u32` to a `char`, ignoring validity. See [`char::from_u32_unchecked`].
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
#[inline]
#[must_use]
pub(super) const unsafe fn from_u32_unchecked(i: u32) -> char {
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index bb8359936..3e7383b4c 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -140,7 +140,7 @@ impl char {
/// assert_eq!(None, c);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_u32(i: u32) -> Option<char> {
@@ -183,7 +183,7 @@ impl char {
/// assert_eq!('❤', c);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_unstable(feature = "const_char_from_u32_unchecked", issue = "89259")]
#[must_use]
#[inline]
pub const unsafe fn from_u32_unchecked(i: u32) -> char {
@@ -241,7 +241,7 @@ impl char {
/// let _c = char::from_digit(1, 37);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
@@ -338,7 +338,7 @@ impl char {
/// let _ = '1'.to_digit(37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs
index b34a71216..af98059cf 100644
--- a/library/core/src/char/mod.rs
+++ b/library/core/src/char/mod.rs
@@ -110,7 +110,7 @@ pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::Into
/// Converts a `u32` to a `char`. Use [`char::from_u32`] instead.
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_u32(i: u32) -> Option<char> {
@@ -120,7 +120,7 @@ pub const fn from_u32(i: u32) -> Option<char> {
/// Converts a `u32` to a `char`, ignoring validity. Use [`char::from_u32_unchecked`].
/// instead.
#[stable(feature = "char_from_unchecked", since = "1.5.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_unstable(feature = "const_char_from_u32_unchecked", issue = "89259")]
#[must_use]
#[inline]
pub const unsafe fn from_u32_unchecked(i: u32) -> char {
@@ -130,7 +130,7 @@ pub const unsafe fn from_u32_unchecked(i: u32) -> char {
/// Converts a digit in the given radix to a `char`. Use [`char::from_digit`] instead.
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index 06dca7e59..398437d9a 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -176,7 +176,6 @@ pub struct AssertParamIsCopy<T: Copy + ?Sized> {
/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
/// in `rustc_trait_selection`.
mod impls {
-
use super::Clone;
macro_rules! impl_clone {
@@ -185,7 +184,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl const Clone for $t {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -213,7 +212,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for *const T {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -222,7 +221,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for *mut T {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -232,7 +231,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for &T {
- #[inline]
+ #[inline(always)]
#[rustc_diagnostic_item = "noop_method_clone"]
fn clone(&self) -> Self {
*self
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index f0fa2e1d2..949896e57 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -24,12 +24,12 @@
use crate::const_closure::ConstFnMutClosure;
use crate::marker::Destruct;
+#[cfg(bootstrap)]
use crate::marker::StructuralPartialEq;
use self::Ordering::*;
-/// Trait for equality comparisons which are [partial equivalence
-/// relations](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
+/// Trait for equality comparisons.
///
/// `x.eq(y)` can also be written `x == y`, and `x.ne(y)` can be written `x != y`.
/// We use the easier-to-read infix notation in the remainder of this documentation.
@@ -37,6 +37,8 @@ use self::Ordering::*;
/// This trait allows for partial equality, for types that do not have a full
/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
/// so floating point types implement `PartialEq` but not [`trait@Eq`].
+/// Formally speaking, when `Rhs == Self`, this trait corresponds to a [partial equivalence
+/// relation](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
///
/// Implementations must ensure that `eq` and `ne` are consistent with each other:
///
@@ -331,6 +333,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> {
/// assert_eq!(Ordering::Greater, result);
/// ```
#[derive(Clone, Copy, Eq, Debug, Hash)]
+#[cfg_attr(not(bootstrap), derive_const(PartialOrd, Ord, PartialEq))]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
@@ -877,10 +880,12 @@ pub macro Ord($item:item) {
}
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg(bootstrap)]
impl StructuralPartialEq for Ordering {}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const PartialEq for Ordering {
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -890,6 +895,7 @@ impl const PartialEq for Ordering {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const Ord for Ordering {
#[inline]
fn cmp(&self, other: &Ordering) -> Ordering {
@@ -899,6 +905,7 @@ impl const Ord for Ordering {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const PartialOrd for Ordering {
#[inline]
fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs
index 9e9c02093..151c8e6d8 100644
--- a/library/core/src/const_closure.rs
+++ b/library/core/src/const_closure.rs
@@ -1,4 +1,6 @@
use crate::marker::Destruct;
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
/// Struct representing a closure with mutably borrowed data.
///
@@ -44,6 +46,7 @@ impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData,
macro_rules! impl_fn_mut_tuple {
($($var:ident)*) => {
+ #[cfg(bootstrap)]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -56,6 +59,7 @@ macro_rules! impl_fn_mut_tuple {
self.call_mut(args)
}
}
+ #[cfg(bootstrap)]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -68,6 +72,32 @@ macro_rules! impl_fn_mut_tuple {
(self.func)(($($var),*), args)
}
}
+ #[cfg(not(bootstrap))]
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
+ FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct,
+ {
+ type Output = ClosureReturnValue;
+
+ extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output {
+ self.call_mut(args)
+ }
+ }
+ #[cfg(not(bootstrap))]
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
+ FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output {
+ #[allow(non_snake_case)]
+ let ($($var),*) = &mut self.data;
+ (self.func)(($($var),*), args)
+ }
+ }
};
}
impl_fn_mut_tuple!(A);
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index 33493964b..f95b880df 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -99,7 +99,7 @@ pub use num::FloatToInt;
/// ```
#[stable(feature = "convert_id", since = "1.33.0")]
#[rustc_const_stable(feature = "const_identity", since = "1.33.0")]
-#[inline]
+#[inline(always)]
pub const fn identity<T>(x: T) -> T {
x
}
@@ -789,6 +789,7 @@ where
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsRef<[T]> for [T] {
+ #[inline(always)]
fn as_ref(&self) -> &[T] {
self
}
@@ -796,6 +797,7 @@ impl<T> AsRef<[T]> for [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsMut<[T]> for [T] {
+ #[inline(always)]
fn as_mut(&mut self) -> &mut [T] {
self
}
@@ -803,7 +805,7 @@ impl<T> AsMut<[T]> for [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for str {
- #[inline]
+ #[inline(always)]
fn as_ref(&self) -> &str {
self
}
@@ -811,7 +813,7 @@ impl AsRef<str> for str {
#[stable(feature = "as_mut_str_for_str", since = "1.51.0")]
impl AsMut<str> for str {
- #[inline]
+ #[inline(always)]
fn as_mut(&mut self) -> &mut str {
self
}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 4fa5d129b..9c0d7e9a1 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -49,7 +49,7 @@ macro_rules! impl_from {
// Rustdocs on the impl block show a "[+] show undocumented items" toggle.
// Rustdocs on functions do not.
#[doc = $doc]
- #[inline]
+ #[inline(always)]
fn from(small: $Small) -> Self {
small as Self
}
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index a5b4e9655..d96b53de0 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -99,7 +99,7 @@
/// ```
#[cfg_attr(not(test), rustc_diagnostic_item = "Default")]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Default: Sized {
/// Returns the "default value" for a type.
///
diff --git a/library/core/src/error.md b/library/core/src/error.md
index 891abebbf..78808d489 100644
--- a/library/core/src/error.md
+++ b/library/core/src/error.md
@@ -46,7 +46,7 @@ These functions are equivalent, they either return the inner value if the
`Result` is `Ok` or panic if the `Result` is `Err` printing the inner error
as the source. The only difference between them is that with `expect` you
provide a panic error message to be printed alongside the source, whereas
-`unwrap` has a default message indicating only that you unwraped an `Err`.
+`unwrap` has a default message indicating only that you unwrapped an `Err`.
Of the two, `expect` is generally preferred since its `msg` field allows you
to convey your intent and assumptions which makes tracking down the source
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
index 2738b4994..7152300ab 100644
--- a/library/core/src/error.rs
+++ b/library/core/src/error.rs
@@ -1,5 +1,5 @@
#![doc = include_str!("error.md")]
-#![unstable(feature = "error_in_core", issue = "none")]
+#![unstable(feature = "error_in_core", issue = "103765")]
#[cfg(test)]
mod tests;
@@ -506,3 +506,6 @@ impl Error for crate::ffi::FromBytesWithNulError {
#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
impl Error for crate::ffi::FromBytesUntilNulError {}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> Error for crate::slice::GetManyMutError<N> {}
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 8923f548a..15dd9ea7e 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -13,9 +13,9 @@ use crate::str;
/// array of bytes. It can be constructed safely from a <code>&[[u8]]</code>
/// slice, or unsafely from a raw `*const c_char`. It can then be
/// converted to a Rust <code>&[str]</code> by performing UTF-8 validation, or
-/// into an owned `CString`.
+/// into an owned [`CString`].
///
-/// `&CStr` is to `CString` as <code>&[str]</code> is to `String`: the former
+/// `&CStr` is to [`CString`] as <code>&[str]</code> is to [`String`]: the former
/// in each pair are borrowed references; the latter are owned
/// strings.
///
@@ -24,6 +24,9 @@ use crate::str;
/// functions may leverage the unsafe [`CStr::from_ptr`] constructor to provide
/// a safe interface to other consumers.
///
+/// [`CString`]: ../../std/ffi/struct.CString.html
+/// [`String`]: ../../std/string/struct.String.html
+///
/// # Examples
///
/// Inspecting a foreign C string:
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index c8d285505..48b617743 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -510,7 +510,7 @@ impl<'a> Arguments<'a> {
/// assert_eq!(format_args!("{}", 1).as_str(), None);
/// ```
#[stable(feature = "fmt_as_str", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "none")]
+ #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "103900")]
#[must_use]
#[inline]
pub const fn as_str(&self) -> Option<&'static str> {
@@ -1054,7 +1054,6 @@ pub trait UpperHex {
pub trait Pointer {
/// Formats the value using the given formatter.
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_diagnostic_item = "pointer_trait_fmt"]
fn fmt(&self, f: &mut Formatter<'_>) -> Result;
}
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index 6487aa088..f2b961d62 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -9,12 +9,8 @@
//! [`await`]: ../../std/keyword.await.html
//! [async book]: https://rust-lang.github.io/async-book/
-use crate::{
- ops::{Generator, GeneratorState},
- pin::Pin,
- ptr::NonNull,
- task::{Context, Poll},
-};
+use crate::ptr::NonNull;
+use crate::task::Context;
mod future;
mod into_future;
@@ -48,6 +44,7 @@ pub use poll_fn::{poll_fn, PollFn};
/// non-Send/Sync as well, and we don't want that.
///
/// It also simplifies the HIR lowering of `.await`.
+#[cfg_attr(not(bootstrap), lang = "ResumeTy")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[derive(Debug, Copy, Clone)]
@@ -64,15 +61,21 @@ unsafe impl Sync for ResumeTy {}
/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
// This is `const` to avoid extra errors after we recover from `const async fn`
-#[lang = "from_generator"]
+#[cfg_attr(bootstrap, lang = "from_generator")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
#[inline]
pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return>
where
- T: Generator<ResumeTy, Yield = ()>,
+ T: crate::ops::Generator<ResumeTy, Yield = ()>,
{
+ use crate::{
+ ops::{Generator, GeneratorState},
+ pin::Pin,
+ task::Poll,
+ };
+
#[rustc_diagnostic_item = "gen_future"]
struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T);
@@ -82,6 +85,7 @@ where
impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> {
type Output = T::Return;
+ #[track_caller]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection.
let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) };
@@ -108,3 +112,11 @@ pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
// that fulfills all the requirements for a mutable reference.
unsafe { &mut *cx.0.as_ptr().cast() }
}
+
+#[cfg_attr(not(bootstrap), lang = "identity_future")]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[inline]
+pub const fn identity_future<O, Fut: Future<Output = O>>(f: Fut) -> Fut {
+ f
+}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index aa13435e6..c755afa39 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -86,7 +86,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::fmt;
-use crate::marker;
+use crate::intrinsics::const_eval_select;
+use crate::marker::{self, Destruct};
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(deprecated)]
@@ -183,6 +184,7 @@ mod sip;
/// [impl]: ../../std/primitive.str.html#impl-Hash-for-str
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Hash"]
+#[const_trait]
pub trait Hash {
/// Feeds this value into the given [`Hasher`].
///
@@ -234,13 +236,25 @@ pub trait Hash {
/// [`hash`]: Hash::hash
/// [`hash_slice`]: Hash::hash_slice
#[stable(feature = "hash_slice", since = "1.3.0")]
- fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
+ fn hash_slice<H: ~const Hasher>(data: &[Self], state: &mut H)
where
Self: Sized,
{
- for piece in data {
- piece.hash(state);
+ //FIXME(const_trait_impl): revert to only a for loop
+ fn rt<T: Hash, H: Hasher>(data: &[T], state: &mut H) {
+ for piece in data {
+ piece.hash(state)
+ }
+ }
+ const fn ct<T: ~const Hash, H: ~const Hasher>(data: &[T], state: &mut H) {
+ let mut i = 0;
+ while i < data.len() {
+ data[i].hash(state);
+ i += 1;
+ }
}
+ // SAFETY: same behavior, CT just uses while instead of for
+ unsafe { const_eval_select((data, state), ct, rt) };
}
}
@@ -313,6 +327,7 @@ pub use macros::Hash;
/// [`write_u8`]: Hasher::write_u8
/// [`write_u32`]: Hasher::write_u32
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait Hasher {
/// Returns the hash value for the values written so far.
///
@@ -558,7 +573,8 @@ pub trait Hasher {
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
-impl<H: Hasher + ?Sized> Hasher for &mut H {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl<H: ~const Hasher + ?Sized> const Hasher for &mut H {
fn finish(&self) -> u64 {
(**self).finish()
}
@@ -638,6 +654,7 @@ impl<H: Hasher + ?Sized> Hasher for &mut H {
/// [`build_hasher`]: BuildHasher::build_hasher
/// [`HashMap`]: ../../std/collections/struct.HashMap.html
#[stable(since = "1.7.0", feature = "build_hasher")]
+#[const_trait]
pub trait BuildHasher {
/// Type of the hasher that will be created.
#[stable(since = "1.7.0", feature = "build_hasher")]
@@ -698,9 +715,10 @@ pub trait BuildHasher {
/// );
/// ```
#[unstable(feature = "build_hasher_simple_hash_one", issue = "86161")]
- fn hash_one<T: Hash>(&self, x: T) -> u64
+ fn hash_one<T: ~const Hash + ~const Destruct>(&self, x: T) -> u64
where
Self: Sized,
+ Self::Hasher: ~const Hasher + ~const Destruct,
{
let mut hasher = self.build_hasher();
x.hash(&mut hasher);
@@ -764,7 +782,8 @@ impl<H> fmt::Debug for BuildHasherDefault<H> {
}
#[stable(since = "1.7.0", feature = "build_hasher")]
-impl<H: Default + Hasher> BuildHasher for BuildHasherDefault<H> {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl<H: ~const Default + Hasher> const BuildHasher for BuildHasherDefault<H> {
type Hasher = H;
fn build_hasher(&self) -> H {
@@ -806,14 +825,15 @@ mod impls {
macro_rules! impl_write {
($(($ty:ident, $meth:ident),)*) => {$(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for $ty {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for $ty {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
#[inline]
- fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
+ fn hash_slice<H: ~const Hasher>(data: &[$ty], state: &mut H) {
let newlen = data.len() * mem::size_of::<$ty>();
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
@@ -842,33 +862,37 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for bool {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for bool {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u8(*self as u8)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for char {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for char {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u32(*self as u32)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for str {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for str {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_str(self);
}
}
#[stable(feature = "never_hash", since = "1.29.0")]
- impl Hash for ! {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for ! {
#[inline]
- fn hash<H: Hasher>(&self, _: &mut H) {
+ fn hash<H: ~const Hasher>(&self, _: &mut H) {
*self
}
}
@@ -876,9 +900,10 @@ mod impls {
macro_rules! impl_hash_tuple {
() => (
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for () {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for () {
#[inline]
- fn hash<H: Hasher>(&self, _state: &mut H) {}
+ fn hash<H: ~const Hasher>(&self, _state: &mut H) {}
}
);
@@ -886,10 +911,11 @@ mod impls {
maybe_tuple_doc! {
$($name)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($name: Hash),+> Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<$($name: ~const Hash),+> const Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
#[allow(non_snake_case)]
#[inline]
- fn hash<S: Hasher>(&self, state: &mut S) {
+ fn hash<S: ~const Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
@@ -932,24 +958,27 @@ mod impls {
impl_hash_tuple! { T B C D E F G H I J K L }
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: Hash> Hash for [T] {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ~const Hash> const Hash for [T] {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_length_prefix(self.len());
Hash::hash_slice(self, state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized + Hash> Hash for &T {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ?Sized + ~const Hash> const Hash for &T {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized + Hash> Hash for &mut T {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ?Sized + ~const Hash> const Hash for &mut T {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
index 81bf1dfdf..7f8287bf5 100644
--- a/library/core/src/hash/sip.rs
+++ b/library/core/src/hash/sip.rs
@@ -118,7 +118,7 @@ macro_rules! load_int_le {
/// Safety: this performs unchecked indexing of `buf` at `start..start+len`, so
/// that must be in-bounds.
#[inline]
-unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+const unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
debug_assert!(len < 8);
let mut i = 0; // current byte index (from LSB) in the output u64
let mut out = 0;
@@ -138,7 +138,8 @@ unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
i += 1;
}
- debug_assert_eq!(i, len);
+ //FIXME(fee1-dead): use debug_assert_eq
+ debug_assert!(i == len);
out
}
@@ -150,8 +151,9 @@ impl SipHasher {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
- pub fn new() -> SipHasher {
+ pub const fn new() -> SipHasher {
SipHasher::new_with_keys(0, 0)
}
@@ -162,8 +164,9 @@ impl SipHasher {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
- pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
SipHasher(SipHasher24 { hasher: Hasher::new_with_keys(key0, key1) })
}
}
@@ -176,7 +179,8 @@ impl SipHasher13 {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
- pub fn new() -> SipHasher13 {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ pub const fn new() -> SipHasher13 {
SipHasher13::new_with_keys(0, 0)
}
@@ -187,14 +191,15 @@ impl SipHasher13 {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
- pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
}
}
impl<S: Sip> Hasher<S> {
#[inline]
- fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ const fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
let mut state = Hasher {
k0: key0,
k1: key1,
@@ -209,7 +214,7 @@ impl<S: Sip> Hasher<S> {
}
#[inline]
- fn reset(&mut self) {
+ const fn reset(&mut self) {
self.length = 0;
self.state.v0 = self.k0 ^ 0x736f6d6570736575;
self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
@@ -220,7 +225,8 @@ impl<S: Sip> Hasher<S> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl super::Hasher for SipHasher {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const super::Hasher for SipHasher {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.hasher.write(msg)
@@ -238,7 +244,8 @@ impl super::Hasher for SipHasher {
}
#[unstable(feature = "hashmap_internals", issue = "none")]
-impl super::Hasher for SipHasher13 {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const super::Hasher for SipHasher13 {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.hasher.write(msg)
@@ -255,7 +262,7 @@ impl super::Hasher for SipHasher13 {
}
}
-impl<S: Sip> super::Hasher for Hasher<S> {
+impl<S: ~const Sip> const super::Hasher for Hasher<S> {
// Note: no integer hashing methods (`write_u*`, `write_i*`) are defined
// for this type. We could add them, copy the `short_write` implementation
// in librustc_data_structures/sip128.rs, and add `write_u*`/`write_i*`
@@ -335,7 +342,7 @@ impl<S: Sip> super::Hasher for Hasher<S> {
}
}
-impl<S: Sip> Clone for Hasher<S> {
+impl<S: Sip> const Clone for Hasher<S> {
#[inline]
fn clone(&self) -> Hasher<S> {
Hasher {
@@ -359,6 +366,7 @@ impl<S: Sip> Default for Hasher<S> {
}
#[doc(hidden)]
+#[const_trait]
trait Sip {
fn c_rounds(_: &mut State);
fn d_rounds(_: &mut State);
@@ -367,7 +375,7 @@ trait Sip {
#[derive(Debug, Clone, Default)]
struct Sip13Rounds;
-impl Sip for Sip13Rounds {
+impl const Sip for Sip13Rounds {
#[inline]
fn c_rounds(state: &mut State) {
compress!(state);
@@ -384,7 +392,7 @@ impl Sip for Sip13Rounds {
#[derive(Debug, Clone, Default)]
struct Sip24Rounds;
-impl Sip for Sip24Rounds {
+impl const Sip for Sip24Rounds {
#[inline]
fn c_rounds(state: &mut State) {
compress!(state);
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index c53175ba4..e8d724ab1 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -160,7 +160,7 @@ pub const unsafe fn unreachable_unchecked() -> ! {
/// ```
///
/// [`thread::yield_now`]: ../../std/thread/fn.yield_now.html
-#[inline]
+#[inline(always)]
#[stable(feature = "renamed_spin_loop", since = "1.49.0")]
pub fn spin_loop() {
#[cfg(target_arch = "x86")]
@@ -345,6 +345,7 @@ pub const fn black_box<T>(dummy: T) -> T {
#[unstable(feature = "hint_must_use", issue = "94745")]
#[rustc_const_unstable(feature = "hint_must_use", issue = "94745")]
#[must_use] // <-- :)
+#[inline(always)]
pub const fn must_use<T>(value: T) -> T {
value
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 1dc79afe8..7ed7d767f 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -55,8 +55,13 @@
#![allow(missing_docs)]
use crate::marker::DiscriminantKind;
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
use crate::mem;
+#[cfg(not(bootstrap))]
+pub mod mir;
+
// These imports are used for simplifying intra-doc links
#[allow(unused_imports)]
#[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
@@ -788,7 +793,7 @@ extern "rust-intrinsic" {
/// uninitialized at that point in the control flow.
///
/// This intrinsic should not be used outside of the compiler.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rustc_peek<T>(_: T) -> T;
/// Aborts the execution of the process.
@@ -806,7 +811,7 @@ extern "rust-intrinsic" {
/// On Unix, the
/// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
/// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn abort() -> !;
/// Informs the optimizer that this point in the code is not reachable,
@@ -845,7 +850,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn likely(b: bool) -> bool;
/// Hints to the compiler that branch condition is likely to be false.
@@ -860,7 +865,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn unlikely(b: bool) -> bool;
/// Executes a breakpoint trap, for inspection by a debugger.
@@ -880,7 +885,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn size_of<T>() -> usize;
/// The minimum alignment of a type.
@@ -892,7 +897,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::align_of`].
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn min_align_of<T>() -> usize;
/// The preferred alignment of a type.
///
@@ -921,7 +926,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::type_name`].
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn type_name<T: ?Sized>() -> &'static str;
/// Gets an identifier which is globally unique to the specified type. This
@@ -935,7 +940,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
@@ -943,7 +948,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_inhabited<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` does not permit
@@ -951,7 +956,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_zero_valid<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` has invalid
@@ -959,7 +964,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_uninit_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
@@ -971,7 +976,7 @@ extern "rust-intrinsic" {
///
/// Consider using [`core::panic::Location::caller`] instead.
#[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn caller_location() -> &'static crate::panic::Location<'static>;
/// Moves a value out of scope without running drop glue.
@@ -984,7 +989,7 @@ extern "rust-intrinsic" {
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn forget<T: ?Sized>(_: T);
/// Reinterprets the bits of a value of one type as another type.
@@ -1264,7 +1269,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
#[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn needs_drop<T: ?Sized>() -> bool;
/// Calculates the offset from a pointer.
@@ -1309,7 +1314,7 @@ extern "rust-intrinsic" {
/// any safety invariants.
///
/// Consider using [`pointer::mask`] instead.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T;
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
@@ -1501,7 +1506,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::min`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn minnumf32(x: f32, y: f32) -> f32;
/// Returns the minimum of two `f64` values.
///
@@ -1512,7 +1517,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::min`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn minnumf64(x: f64, y: f64) -> f64;
/// Returns the maximum of two `f32` values.
///
@@ -1523,7 +1528,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::max`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn maxnumf32(x: f32, y: f32) -> f32;
/// Returns the maximum of two `f64` values.
///
@@ -1534,7 +1539,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::max`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn maxnumf64(x: f64, y: f64) -> f64;
/// Copies the sign from `y` to `x` for `f32` values.
@@ -1655,7 +1660,7 @@ extern "rust-intrinsic" {
/// primitives via the `count_ones` method. For example,
/// [`u32::count_ones`]
#[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ctpop<T: Copy>(x: T) -> T;
/// Returns the number of leading unset bits (zeroes) in an integer type `T`.
@@ -1693,7 +1698,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_leading, 16);
/// ```
#[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ctlz<T: Copy>(x: T) -> T;
/// Like `ctlz`, but extra-unsafe as it returns `undef` when
@@ -1750,7 +1755,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_trailing, 16);
/// ```
#[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn cttz<T: Copy>(x: T) -> T;
/// Like `cttz`, but extra-unsafe as it returns `undef` when
@@ -1783,7 +1788,7 @@ extern "rust-intrinsic" {
/// primitives via the `swap_bytes` method. For example,
/// [`u32::swap_bytes`]
#[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn bswap<T: Copy>(x: T) -> T;
/// Reverses the bits in an integer type `T`.
@@ -1797,7 +1802,7 @@ extern "rust-intrinsic" {
/// primitives via the `reverse_bits` method. For example,
/// [`u32::reverse_bits`]
#[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn bitreverse<T: Copy>(x: T) -> T;
/// Performs checked integer addition.
@@ -1811,7 +1816,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_add` method. For example,
/// [`u32::overflowing_add`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer subtraction
@@ -1825,7 +1830,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_sub` method. For example,
/// [`u32::overflowing_sub`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer multiplication
@@ -1839,13 +1844,14 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_mul` method. For example,
/// [`u32::overflowing_mul`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs an exact division, resulting in undefined behavior where
/// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_exact_div", issue = "none")]
pub fn exact_div<T: Copy>(x: T, y: T) -> T;
/// Performs an unchecked division, resulting in undefined behavior
@@ -1914,7 +1920,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_left` method. For example,
/// [`u32::rotate_left`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
/// Performs rotate right.
@@ -1928,7 +1934,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_right` method. For example,
/// [`u32::rotate_right`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
/// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
@@ -1942,7 +1948,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_add` method. For example,
/// [`u32::wrapping_add`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
/// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1955,7 +1961,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_sub` method. For example,
/// [`u32::wrapping_sub`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
/// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1968,7 +1974,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_mul` method. For example,
/// [`u32::wrapping_mul`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
/// Computes `a + b`, saturating at numeric bounds.
@@ -1982,7 +1988,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_add` method. For example,
/// [`u32::saturating_add`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
/// Computes `a - b`, saturating at numeric bounds.
///
@@ -1995,7 +2001,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_sub` method. For example,
/// [`u32::saturating_sub`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
/// Returns the value of the discriminant for the variant in 'v';
@@ -2008,7 +2014,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::discriminant`].
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
/// Returns the number of variants of the type `T` cast to a `usize`;
@@ -2021,7 +2027,7 @@ extern "rust-intrinsic" {
///
/// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn variant_count<T>() -> usize;
/// Rust's "try catch" construct which invokes the function pointer `try_fn`
@@ -2055,7 +2061,7 @@ extern "rust-intrinsic" {
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
@@ -2106,7 +2112,7 @@ extern "rust-intrinsic" {
///
/// [`std::hint::black_box`]: crate::hint::black_box
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn black_box<T>(dummy: T) -> T;
/// `ptr` must point to a vtable.
@@ -2169,11 +2175,75 @@ extern "rust-intrinsic" {
/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
/// which violates the principle that a `const fn` must behave the same at
/// compile-time and at run-time. The unsafe code in crate B is fine.
+ #[cfg(bootstrap)]
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
+
+ /// Selects which function to call depending on the context.
+ ///
+ /// If this function is evaluated at compile-time, then a call to this
+ /// intrinsic will be replaced with a call to `called_in_const`. It gets
+ /// replaced with a call to `called_at_rt` otherwise.
+ ///
+ /// # Type Requirements
+ ///
+ /// The two functions must be both function items. They cannot be function
+ /// pointers or closures. The first function must be a `const fn`.
+ ///
+ /// `arg` will be the tupled arguments that will be passed to either one of
+ /// the two functions, therefore, both functions must accept the same type of
+ /// arguments. Both functions must return RET.
+ ///
+ /// # Safety
+ ///
+ /// The two functions must behave observably equivalent. Safe code in other
+ /// crates may assume that calling a `const fn` at compile-time and at run-time
+ /// produces the same result. A function that produces a different result when
+ /// evaluated at run-time, or has any other observable side-effects, is
+ /// *unsound*.
+ ///
+ /// Here is an example of how this could cause a problem:
+ /// ```no_run
+ /// #![feature(const_eval_select)]
+ /// #![feature(core_intrinsics)]
+ /// use std::hint::unreachable_unchecked;
+ /// use std::intrinsics::const_eval_select;
+ ///
+ /// // Crate A
+ /// pub const fn inconsistent() -> i32 {
+ /// fn runtime() -> i32 { 1 }
+ /// const fn compiletime() -> i32 { 2 }
+ ///
+ /// unsafe {
+ // // ⚠ This code violates the required equivalence of `compiletime`
+ /// // and `runtime`.
+ /// const_eval_select((), compiletime, runtime)
+ /// }
+ /// }
+ ///
+ /// // Crate B
+ /// const X: i32 = inconsistent();
+ /// let x = inconsistent();
+ /// if x != X { unsafe { unreachable_unchecked(); }}
+ /// ```
+ ///
+ /// This code causes Undefined Behavior when being run, since the
+ /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
+ /// which violates the principle that a `const fn` must behave the same at
+ /// compile-time and at run-time. The unsafe code in crate B is fine.
+ #[cfg(not(bootstrap))]
+ #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
+ pub fn const_eval_select<ARG: Tuple, F, G, RET>(
+ arg: ARG,
+ called_in_const: F,
+ called_at_rt: G,
+ ) -> RET
+ where
+ G: FnOnce<ARG, Output = RET>,
+ F: FnOnce<ARG, Output = RET>;
}
// Some functions are defined here because they accidentally got made
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
new file mode 100644
index 000000000..8ba1c1228
--- /dev/null
+++ b/library/core/src/intrinsics/mir.rs
@@ -0,0 +1,289 @@
+//! Rustc internal tooling for hand-writing MIR.
+//!
+//! If for some reasons you are not writing rustc tests and have found yourself considering using
+//! this feature, turn back. This is *exceptionally* unstable. There is no attempt at all to make
+//! anything work besides those things which the rustc test suite happened to need. If you make a
+//! typo you'll probably ICE. Really, this is not the solution to your problems. Consider instead
+//! supporting the [stable MIR project group](https://github.com/rust-lang/project-stable-mir).
+//!
+//! The documentation for this module describes how to use this feature. If you are interested in
+//! hacking on the implementation, most of that documentation lives at
+//! `rustc_mir_building/src/build/custom/mod.rs`.
+//!
+//! Typical usage will look like this:
+//!
+//! ```rust
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! pub fn simple(x: i32) -> i32 {
+//! mir!(
+//! let temp1: i32;
+//! let temp2: _;
+//!
+//! {
+//! temp1 = x;
+//! Goto(exit)
+//! }
+//!
+//! exit = {
+//! temp2 = Move(temp1);
+//! RET = temp2;
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! Hopefully most of this is fairly self-explanatory. Expanding on some notable details:
+//!
+//! - The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
+//! attribute only works on functions - there is no way to insert custom MIR into the middle of
+//! another function.
+//! - The `dialect` and `phase` parameters indicate which version of MIR you are inserting here.
+//! This will normally be the phase that corresponds to the thing you are trying to test. The
+//! phase can be omitted for dialects that have just one.
+//! - You should define your function signature like you normally would. Externally, this function
+//! can be called like any other function.
+//! - Type inference works - you don't have to spell out the type of all of your locals.
+//!
+//! For now, all statements and terminators are parsed from nested invocations of the special
+//! functions provided in this module. We additionally want to (but do not yet) support more
+//! "normal" Rust syntax in places where it makes sense. Also, most kinds of instructions are not
+//! supported yet.
+//!
+
+#![unstable(
+ feature = "custom_mir",
+ reason = "MIR is an implementation detail and extremely unstable",
+ issue = "none"
+)]
+#![allow(unused_variables, non_snake_case, missing_debug_implementations)]
+
+/// Type representing basic blocks.
+///
+/// All terminators will have this type as a return type. It helps achieve some type safety.
+pub struct BasicBlock;
+
+macro_rules! define {
+ ($name:literal, $($sig:tt)*) => {
+ #[rustc_diagnostic_item = $name]
+ pub $($sig)* { panic!() }
+ }
+}
+
+define!("mir_return", fn Return() -> BasicBlock);
+define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
+define!("mir_retag", fn Retag<T>(place: T));
+define!("mir_retag_raw", fn RetagRaw<T>(place: T));
+define!("mir_move", fn Move<T>(place: T) -> T);
+define!("mir_static", fn Static<T>(s: T) -> &'static T);
+define!("mir_static_mut", fn StaticMut<T>(s: T) -> *mut T);
+
+/// Convenience macro for generating custom MIR.
+///
+/// See the module documentation for syntax details. This macro is not magic - it only transforms
+/// your MIR into something that is easier to parse in the compiler.
+#[rustc_macro_transparency = "transparent"]
+pub macro mir {
+ (
+ $(let $local_decl:ident $(: $local_decl_ty:ty)? ;)*
+
+ {
+ $($entry:tt)*
+ }
+
+ $(
+ $block_name:ident = {
+ $($block:tt)*
+ }
+ )*
+ ) => {{
+ // First, we declare all basic blocks.
+ $(
+ let $block_name: ::core::intrinsics::mir::BasicBlock;
+ )*
+
+ {
+ // Now all locals
+ #[allow(non_snake_case)]
+ let RET;
+ $(
+ let $local_decl $(: $local_decl_ty)? ;
+ )*
+
+ ::core::intrinsics::mir::__internal_extract_let!($($entry)*);
+ $(
+ ::core::intrinsics::mir::__internal_extract_let!($($block)*);
+ )*
+
+ {
+ // Finally, the contents of the basic blocks
+ ::core::intrinsics::mir::__internal_remove_let!({
+ {}
+ { $($entry)* }
+ });
+ $(
+ ::core::intrinsics::mir::__internal_remove_let!({
+ {}
+ { $($block)* }
+ });
+ )*
+
+ RET
+ }
+ }
+ }}
+}
+
+/// Helper macro that extracts the `let` declarations out of a bunch of statements.
+///
+/// This macro is written using the "statement muncher" strategy. Each invocation parses the first
+/// statement out of the input, does the appropriate thing with it, and then recursively calls the
+/// same macro on the remainder of the input.
+#[doc(hidden)]
+pub macro __internal_extract_let {
+ // If it's a `let` like statement, keep the `let`
+ (
+ let $var:ident $(: $ty:ty)? = $expr:expr; $($rest:tt)*
+ ) => {
+ let $var $(: $ty)?;
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ // Due to #86730, we have to handle const blocks separately
+ (
+ let $var:ident $(: $ty:ty)? = const $block:block; $($rest:tt)*
+ ) => {
+ let $var $(: $ty)?;
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ // Otherwise, output nothing
+ (
+ $stmt:stmt; $($rest:tt)*
+ ) => {
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ (
+ $expr:expr
+ ) => {}
+}
+
+/// Helper macro that removes the `let` declarations from a bunch of statements.
+///
+/// Because expression position macros cannot expand to statements + expressions, we need to be
+/// slightly creative here. The general strategy is also statement munching as above, but the output
+/// of the macro is "stored" in the subsequent macro invocation. Easiest understood via example:
+/// ```text
+/// invoke!(
+/// {
+/// {
+/// x = 5;
+/// }
+/// {
+/// let d = e;
+/// Call()
+/// }
+/// }
+/// )
+/// ```
+/// becomes
+/// ```text
+/// invoke!(
+/// {
+/// {
+/// x = 5;
+/// d = e;
+/// }
+/// {
+/// Call()
+/// }
+/// }
+/// )
+/// ```
+#[doc(hidden)]
+pub macro __internal_remove_let {
+ // If it's a `let` like statement, remove the `let`
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ let $var:ident $(: $ty:ty)? = $expr:expr;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $var = $expr;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ // Due to #86730 , we have to handle const blocks separately
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ let $var:ident $(: $ty:ty)? = const $block:block;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $var = const $block;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ // Otherwise, keep going
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ $stmt:stmt;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $stmt;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ $expr:expr
+ }
+ }
+ ) => {
+ {
+ $($already_parsed)*
+ $expr
+ }
+ },
+}
diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs
index d4fb88610..5e4211058 100644
--- a/library/core/src/iter/adapters/array_chunks.rs
+++ b/library/core/src/iter/adapters/array_chunks.rs
@@ -1,6 +1,8 @@
use crate::array;
-use crate::iter::{ByRefSized, FusedIterator, Iterator};
-use crate::ops::{ControlFlow, Try};
+use crate::const_closure::ConstFnMutClosure;
+use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce};
+use crate::mem::{self, MaybeUninit};
+use crate::ops::{ControlFlow, NeverShortCircuit, Try};
/// An iterator over `N` elements of the iterator at a time.
///
@@ -82,7 +84,13 @@ where
}
}
- impl_fold_via_try_fold! { fold -> try_fold }
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ <Self as SpecFold>::fold(self, init, f)
+ }
}
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
@@ -168,3 +176,64 @@ where
self.iter.len() < N
}
}
+
+trait SpecFold: Iterator {
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B;
+}
+
+impl<I, const N: usize> SpecFold for ArrayChunks<I, N>
+where
+ I: Iterator,
+{
+ #[inline]
+ default fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let fold = ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp);
+ self.try_fold(init, fold).0
+ }
+}
+
+impl<I, const N: usize> SpecFold for ArrayChunks<I, N>
+where
+ I: Iterator + TrustedRandomAccessNoCoerce,
+{
+ #[inline]
+ fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ let inner_len = self.iter.size();
+ let mut i = 0;
+ // Use a while loop because (0..len).step_by(N) doesn't optimize well.
+ while inner_len - i >= N {
+ let mut chunk = MaybeUninit::uninit_array();
+ let mut guard = array::Guard { array_mut: &mut chunk, initialized: 0 };
+ while guard.initialized < N {
+ // SAFETY: The method consumes the iterator and the loop condition ensures that
+ // all accesses are in bounds and only happen once.
+ unsafe {
+ let idx = i + guard.initialized;
+ guard.push_unchecked(self.iter.__iterator_get_unchecked(idx));
+ }
+ }
+ mem::forget(guard);
+ // SAFETY: The loop above initialized all elements
+ let chunk = unsafe { MaybeUninit::array_assume_init(chunk) };
+ accum = f(accum, chunk);
+ i += N;
+ }
+
+ // unlike try_fold this method does not need to take care of the remainder
+ // since `self` will be dropped
+
+ accum
+ }
+}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index 58a0b9d7b..d947c7b0e 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -75,7 +75,6 @@ where
#[inline]
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
- Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
@@ -101,6 +100,26 @@ where
impl_fold_via_try_fold! { fold -> try_fold }
#[inline]
+ fn for_each<F: FnMut(Self::Item)>(mut self, f: F) {
+ // The default implementation would use a unit accumulator, so we can
+ // avoid a stateful closure by folding over the remaining number
+ // of items we wish to return instead.
+ fn check<'a, Item>(
+ mut action: impl FnMut(Item) + 'a,
+ ) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
+ move |more, x| {
+ action(x);
+ more.checked_sub(1)
+ }
+ }
+
+ let remaining = self.n;
+ if remaining > 0 {
+ self.iter.try_fold(remaining - 1, check(f));
+ }
+ }
+
+ #[inline]
#[rustc_inherit_overflow_checks]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let min = self.n.min(n);
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index ef0f39782..bb35d50b4 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -401,6 +401,8 @@ pub use self::sources::{once, Once};
pub use self::sources::{once_with, OnceWith};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::sources::{repeat, Repeat};
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+pub use self::sources::{repeat_n, RepeatN};
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub use self::sources::{repeat_with, RepeatWith};
#[stable(feature = "iter_successors", since = "1.34.0")]
diff --git a/library/core/src/iter/sources.rs b/library/core/src/iter/sources.rs
index d34772cd3..3ec426a3a 100644
--- a/library/core/src/iter/sources.rs
+++ b/library/core/src/iter/sources.rs
@@ -4,6 +4,7 @@ mod from_generator;
mod once;
mod once_with;
mod repeat;
+mod repeat_n;
mod repeat_with;
mod successors;
@@ -16,6 +17,9 @@ pub use self::empty::{empty, Empty};
#[stable(feature = "iter_once", since = "1.2.0")]
pub use self::once::{once, Once};
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+pub use self::repeat_n::{repeat_n, RepeatN};
+
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub use self::repeat_with::{repeat_with, RepeatWith};
diff --git a/library/core/src/iter/sources/repeat_n.rs b/library/core/src/iter/sources/repeat_n.rs
new file mode 100644
index 000000000..fd8d25ce1
--- /dev/null
+++ b/library/core/src/iter/sources/repeat_n.rs
@@ -0,0 +1,195 @@
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::mem::ManuallyDrop;
+
+/// Creates a new iterator that repeats a single element a given number of times.
+///
+/// The `repeat_n()` function repeats a single value exactly `n` times.
+///
+/// This is very similar to using [`repeat()`] with [`Iterator::take()`],
+/// but there are two differences:
+/// - `repeat_n()` can return the original value, rather than always cloning.
+/// - `repeat_n()` produces an [`ExactSizeIterator`].
+///
+/// [`repeat()`]: crate::iter::repeat
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// #![feature(iter_repeat_n)]
+/// use std::iter;
+///
+/// // four of the number four:
+/// let mut four_fours = iter::repeat_n(4, 4);
+///
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+///
+/// // no more fours
+/// assert_eq!(None, four_fours.next());
+/// ```
+///
+/// For non-`Copy` types,
+///
+/// ```
+/// #![feature(iter_repeat_n)]
+/// use std::iter;
+///
+/// let v: Vec<i32> = Vec::with_capacity(123);
+/// let mut it = iter::repeat_n(v, 5);
+///
+/// for i in 0..4 {
+/// // It starts by cloning things
+/// let cloned = it.next().unwrap();
+/// assert_eq!(cloned.len(), 0);
+/// assert_eq!(cloned.capacity(), 0);
+/// }
+///
+/// // ... but the last item is the original one
+/// let last = it.next().unwrap();
+/// assert_eq!(last.len(), 0);
+/// assert_eq!(last.capacity(), 123);
+///
+/// // ... and now we're done
+/// assert_eq!(None, it.next());
+/// ```
+#[inline]
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+#[doc(hidden)] // waiting on ACP#120 to decide whether to expose publicly
+pub fn repeat_n<T: Clone>(element: T, count: usize) -> RepeatN<T> {
+ let mut element = ManuallyDrop::new(element);
+
+ if count == 0 {
+ // SAFETY: we definitely haven't dropped it yet, since we only just got
+ // passed it in, and because the count is zero the instance we're about
+ // to create won't drop it, so to avoid leaking we need to now.
+ unsafe { ManuallyDrop::drop(&mut element) };
+ }
+
+ RepeatN { element, count }
+}
+
+/// An iterator that repeats an element an exact number of times.
+///
+/// This `struct` is created by the [`repeat_n()`] function.
+/// See its documentation for more.
+#[derive(Clone, Debug)]
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+#[doc(hidden)] // waiting on ACP#120 to decide whether to expose publicly
+pub struct RepeatN<A> {
+ count: usize,
+ // Invariant: has been dropped iff count == 0.
+ element: ManuallyDrop<A>,
+}
+
+impl<A> RepeatN<A> {
+ /// If we haven't already dropped the element, return it in an option.
+ ///
+ /// Clears the count so it won't be dropped again later.
+ #[inline]
+ fn take_element(&mut self) -> Option<A> {
+ if self.count > 0 {
+ self.count = 0;
+ // SAFETY: We just set count to zero so it won't be dropped again,
+ // and it used to be non-zero so it hasn't already been dropped.
+ unsafe { Some(ManuallyDrop::take(&mut self.element)) }
+ } else {
+ None
+ }
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A> Drop for RepeatN<A> {
+ fn drop(&mut self) {
+ self.take_element();
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> Iterator for RepeatN<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if self.count == 0 {
+ return None;
+ }
+
+ self.count -= 1;
+ Some(if self.count == 0 {
+ // SAFETY: the check above ensured that the count used to be non-zero,
+ // so element hasn't been dropped yet, and we just lowered the count to
+ // zero so it won't be dropped later, and thus it's okay to take it here.
+ unsafe { ManuallyDrop::take(&mut self.element) }
+ } else {
+ A::clone(&mut self.element)
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn advance_by(&mut self, skip: usize) -> Result<(), usize> {
+ let len = self.count;
+
+ if skip >= len {
+ self.take_element();
+ }
+
+ if skip > len {
+ Err(len)
+ } else {
+ self.count = len - skip;
+ Ok(())
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.take_element()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> ExactSizeIterator for RepeatN<A> {
+ fn len(&self) -> usize {
+ self.count
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> DoubleEndedIterator for RepeatN<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.advance_by(n)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ self.nth(n)
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> FusedIterator for RepeatN<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Clone> TrustedLen for RepeatN<A> {}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
index 6f62662d8..ab2d0472b 100644
--- a/library/core/src/iter/sources/repeat_with.rs
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -1,4 +1,5 @@
use crate::iter::{FusedIterator, TrustedLen};
+use crate::ops::Try;
/// Creates a new iterator that repeats elements of type `A` endlessly by
/// applying the provided closure, the repeater, `F: FnMut() -> A`.
@@ -89,6 +90,22 @@ impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, None)
}
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ // This override isn't strictly needed, but avoids the need to optimize
+ // away the `next`-always-returns-`Some` and emphasizes that the `?`
+ // is the only way to exit the loop.
+
+ loop {
+ let item = (self.repeater)();
+ init = fold(init, item)?;
+ }
+ }
}
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 789a87968..83c7e8977 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -14,7 +14,7 @@ use super::super::{
fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
-/// An interface for dealing with iterators.
+/// A trait for dealing with iterators.
///
/// This is the main iterator trait. For more about the concept of iterators
/// generally, please see the [module-level documentation]. In particular, you
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 659409557..1823fd300 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -89,6 +89,7 @@
// Lints:
#![deny(rust_2021_incompatible_or_patterns)]
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
@@ -98,20 +99,24 @@
// Library features:
#![feature(const_align_offset)]
#![feature(const_align_of_val)]
+#![feature(const_align_of_val_raw)]
+#![feature(const_alloc_layout)]
#![feature(const_arguments_as_str)]
#![feature(const_array_into_iter_constructors)]
#![feature(const_bigint_helper_methods)]
#![feature(const_black_box)]
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
-#![feature(const_char_convert)]
+#![feature(const_char_from_u32_unchecked)]
#![feature(const_clone)]
#![feature(const_cmp)]
#![feature(const_discriminant)]
#![feature(const_eval_select)]
+#![feature(const_exact_div)]
#![feature(const_float_bits_conv)]
#![feature(const_float_classify)]
#![feature(const_fmt_arguments_new)]
+#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_convert)]
#![feature(const_index_range_slice_index)]
@@ -128,14 +133,17 @@
#![feature(const_option)]
#![feature(const_option_ext)]
#![feature(const_pin)]
+#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_sub_ptr)]
#![feature(const_replace)]
+#![feature(const_result_drop)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
#![feature(const_raw_ptr_comparison)]
#![feature(const_size_of_val)]
+#![feature(const_size_of_val_raw)]
#![feature(const_slice_from_raw_parts_mut)]
#![feature(const_slice_ptr_len)]
#![feature(const_slice_split_at_mut)]
@@ -154,9 +162,11 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
+#![feature(set_ptr_value)]
#![feature(slice_ptr_get)]
#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
+#![feature(strict_provenance)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
#![feature(variant_count)]
@@ -184,6 +194,7 @@
#![feature(const_refs_to_cell)]
#![feature(decl_macro)]
#![feature(deprecated_suggestion)]
+#![cfg_attr(not(bootstrap), feature(derive_const))]
#![feature(doc_cfg)]
#![feature(doc_notable_trait)]
#![feature(rustdoc_internals)]
@@ -386,38 +397,8 @@ pub mod primitive;
#[unstable(feature = "stdsimd", issue = "48556")]
mod core_arch;
-#[doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
#[stable(feature = "simd_arch", since = "1.27.0")]
-pub mod arch {
- #[stable(feature = "simd_arch", since = "1.27.0")]
- pub use crate::core_arch::arch::*;
-
- /// Inline assembly.
- ///
- /// Refer to [rust by example] for a usage guide and the [reference] for
- /// detailed information about the syntax and available options.
- ///
- /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
- /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
- #[stable(feature = "asm", since = "1.59.0")]
- #[rustc_builtin_macro]
- pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
- /* compiler built-in */
- }
-
- /// Module-level inline assembly.
- ///
- /// Refer to [rust by example] for a usage guide and the [reference] for
- /// detailed information about the syntax and available options.
- ///
- /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
- /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
- #[stable(feature = "global_asm", since = "1.59.0")]
- #[rustc_builtin_macro]
- pub macro global_asm("assembly template", $(operands,)* $(options($(option),*))?) {
- /* compiler built-in */
- }
-}
+pub mod arch;
// Pull in the `core_simd` crate directly into libcore. The contents of
// `core_simd` are in a different repository: rust-lang/portable-simd.
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index fd96e1ff7..f29cd357d 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -820,7 +820,6 @@ pub(crate) mod builtin {
#[stable(feature = "compile_error_macro", since = "1.20.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "compile_error_macro")]
macro_rules! compile_error {
($msg:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -944,7 +943,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "env_macro")]
macro_rules! env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
($name:expr, $error_msg:expr $(,)?) => {{ /* compiler built-in */ }};
@@ -973,7 +971,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "option_env_macro")]
macro_rules! option_env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1058,7 +1055,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "concat_macro")]
macro_rules! concat {
($($e:expr),* $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1084,7 +1080,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "line_macro")]
macro_rules! line {
() => {
/* compiler built-in */
@@ -1124,7 +1119,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "column_macro")]
macro_rules! column {
() => {
/* compiler built-in */
@@ -1150,7 +1144,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "file_macro")]
macro_rules! file {
() => {
/* compiler built-in */
@@ -1175,7 +1168,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "stringify_macro")]
macro_rules! stringify {
($($t:tt)*) => {
/* compiler built-in */
@@ -1282,7 +1274,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "module_path_macro")]
macro_rules! module_path {
() => {
/* compiler built-in */
@@ -1316,7 +1307,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "cfg_macro")]
macro_rules! cfg {
($($cfg:tt)*) => {
/* compiler built-in */
@@ -1367,7 +1357,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "include_macro")]
macro_rules! include {
($file:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1464,6 +1453,19 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Attribute macro used to apply derive macros for implementing traits
+ /// in a const context.
+ ///
+ /// See [the reference] for more info.
+ ///
+ /// [the reference]: ../../../reference/attributes/derive.html
+ #[unstable(feature = "derive_const", issue = "none")]
+ #[rustc_builtin_macro]
+ #[cfg(not(bootstrap))]
+ pub macro derive_const($item:item) {
+ /* compiler built-in */
+ }
+
/// Attribute macro applied to a function to turn it into a unit test.
///
/// See [the reference] for more info.
@@ -1511,6 +1513,17 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Attribute macro applied to a function to register it as a handler for allocation failure.
+ ///
+ /// See also [`std::alloc::handle_alloc_error`](../../../std/alloc/fn.handle_alloc_error.html).
+ #[cfg(not(bootstrap))]
+ #[unstable(feature = "alloc_error_handler", issue = "51540")]
+ #[allow_internal_unstable(rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro alloc_error_handler($item:item) {
+ /* compiler built-in */
+ }
+
/// Keeps the item it's applied to if the passed path is accessible, and removes it otherwise.
#[unstable(
feature = "cfg_accessible",
@@ -1533,6 +1546,18 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Unstable placeholder for type ascription.
+ #[rustc_builtin_macro]
+ #[unstable(
+ feature = "type_ascription",
+ issue = "23416",
+ reason = "placeholder syntax for type ascription"
+ )]
+ #[cfg(not(bootstrap))]
+ pub macro type_ascribe($expr:expr, $ty:ty) {
+ /* compiler built-in */
+ }
+
/// Unstable implementation detail of the `rustc` compiler, do not use.
#[rustc_builtin_macro]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index ae4ebf444..42c342801 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -96,6 +96,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Sized {
// Empty.
}
@@ -127,6 +128,7 @@ pub trait Sized {
/// [nomicon-coerce]: ../../nomicon/coercions.html
#[unstable(feature = "unsize", issue = "27732")]
#[lang = "unsize"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -693,6 +695,7 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -793,6 +796,7 @@ impl<T: ?Sized> Unpin for *mut T {}
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
#[const_trait]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Destruct {}
/// A marker for tuple types.
@@ -802,8 +806,18 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Tuple {}
+/// A marker for things
+#[unstable(feature = "pointer_sized_trait", issue = "none")]
+#[cfg_attr(not(bootstrap), lang = "pointer_sized")]
+#[rustc_on_unimplemented(
+ message = "`{Self}` needs to be a pointer-sized type",
+ label = "`{Self}` needs to be a pointer-sized type"
+)]
+pub trait PointerSized {}
+
/// Implementations of `Copy` for primitive types.
///
/// Implementations that cannot be described in Rust
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 7757c95de..3f4918365 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -1172,7 +1172,7 @@ impl<T> MaybeUninit<T> {
/// #![feature(maybe_uninit_as_bytes, maybe_uninit_slice)]
/// use std::mem::MaybeUninit;
///
- /// let val = 0x12345678i32;
+ /// let val = 0x12345678_i32;
/// let uninit = MaybeUninit::new(val);
/// let uninit_bytes = uninit.as_bytes();
/// let bytes = unsafe { MaybeUninit::slice_assume_init_ref(uninit_bytes) };
@@ -1198,7 +1198,7 @@ impl<T> MaybeUninit<T> {
/// #![feature(maybe_uninit_as_bytes)]
/// use std::mem::MaybeUninit;
///
- /// let val = 0x12345678i32;
+ /// let val = 0x12345678_i32;
/// let mut uninit = MaybeUninit::new(val);
/// let uninit_bytes = uninit.as_bytes_mut();
/// if cfg!(target_endian = "little") {
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 9195da5a4..383bdc7b6 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -21,11 +21,6 @@ mod maybe_uninit;
#[stable(feature = "maybe_uninit", since = "1.36.0")]
pub use maybe_uninit::MaybeUninit;
-// FIXME: This is left here for now to avoid complications around pending reverts.
-// Once <https://github.com/rust-lang/rust/issues/101899> is fully resolved,
-// this should be removed and the references in `alloc::Layout` updated.
-pub(crate) use ptr::Alignment as ValidAlign;
-
mod transmutability;
#[unstable(feature = "transmutability", issue = "99571")]
pub use transmutability::{Assume, BikeshedIntrinsicFrom};
@@ -730,10 +725,7 @@ pub const fn swap<T>(x: &mut T, y: &mut T) {
// understanding `mem::replace`, `Option::take`, etc. - a better overall
// solution might be to make `ptr::swap_nonoverlapping` into an intrinsic, which
// a backend can choose to implement using the block optimization, or not.
- // NOTE(scottmcm) MIRI is disabled here as reading in smaller units is a
- // pessimization for it. Also, if the type contains any unaligned pointers,
- // copying those over multiple reads is difficult to support.
- #[cfg(not(any(target_arch = "spirv", miri)))]
+ #[cfg(not(any(target_arch = "spirv")))]
{
// For types that are larger multiples of their alignment, the simple way
// tends to copy the whole thing to stack rather than doing it one part
@@ -1004,7 +996,7 @@ pub fn drop<T>(_x: T) {}
/// ```
#[inline]
#[unstable(feature = "mem_copy_fn", issue = "98262")]
-pub fn copy<T: Copy>(x: &T) -> T {
+pub const fn copy<T: Copy>(x: &T) -> T {
*x
}
@@ -1121,7 +1113,10 @@ impl<T> fmt::Debug for Discriminant<T> {
/// # Stability
///
/// The discriminant of an enum variant may change if the enum definition changes. A discriminant
-/// of some variant will not change between compilations with the same compiler.
+/// of some variant will not change between compilations with the same compiler. See the [Reference]
+/// for more information.
+///
+/// [Reference]: ../../reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
///
/// # Examples
///
@@ -1137,6 +1132,62 @@ impl<T> fmt::Debug for Discriminant<T> {
/// assert_eq!(mem::discriminant(&Foo::B(1)), mem::discriminant(&Foo::B(2)));
/// assert_ne!(mem::discriminant(&Foo::B(3)), mem::discriminant(&Foo::C(3)));
/// ```
+///
+/// ## Accessing the numeric value of the discriminant
+///
+/// Note that it is *undefined behavior* to [`transmute`] from [`Discriminant`] to a primitive!
+///
+/// If an enum has only unit variants, then the numeric value of the discriminant can be accessed
+/// with an [`as`] cast:
+///
+/// ```
+/// enum Enum {
+/// Foo,
+/// Bar,
+/// Baz,
+/// }
+///
+/// assert_eq!(0, Enum::Foo as isize);
+/// assert_eq!(1, Enum::Bar as isize);
+/// assert_eq!(2, Enum::Baz as isize);
+/// ```
+///
+/// If an enum has opted-in to having a [primitive representation] for its discriminant,
+/// then it's possible to use pointers to read the memory location storing the discriminant.
+/// That **cannot** be done for enums using the [default representation], however, as it's
+/// undefined what layout the discriminant has and where it's stored — it might not even be
+/// stored at all!
+///
+/// [`as`]: ../../std/keyword.as.html
+/// [primitive representation]: ../../reference/type-layout.html#primitive-representations
+/// [default representation]: ../../reference/type-layout.html#the-default-representation
+/// ```
+/// #[repr(u8)]
+/// enum Enum {
+/// Unit,
+/// Tuple(bool),
+/// Struct { a: bool },
+/// }
+///
+/// impl Enum {
+/// fn discriminant(&self) -> u8 {
+/// // SAFETY: Because `Self` is marked `repr(u8)`, its layout is a `repr(C)` `union`
+/// // between `repr(C)` structs, each of which has the `u8` discriminant as its first
+/// // field, so we can read the discriminant without offsetting the pointer.
+/// unsafe { *<*const _>::from(self).cast::<u8>() }
+/// }
+/// }
+///
+/// let unit_like = Enum::Unit;
+/// let tuple_like = Enum::Tuple(true);
+/// let struct_like = Enum::Struct { a: false };
+/// assert_eq!(0, unit_like.discriminant());
+/// assert_eq!(1, tuple_like.discriminant());
+/// assert_eq!(2, struct_like.discriminant());
+///
+/// // ⚠️ This is undefined behavior. Don't do this. ⚠️
+/// // assert_eq!(0, unsafe { std::mem::transmute::<_, u8>(std::mem::discriminant(&unit_like)) });
+/// ```
#[stable(feature = "discriminant_value", since = "1.21.0")]
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
#[cfg_attr(not(test), rustc_diagnostic_item = "mem_discriminant")]
diff --git a/library/core/src/num/flt2dec/strategy/dragon.rs b/library/core/src/num/flt2dec/strategy/dragon.rs
index 8ced5971e..71b14d0ae 100644
--- a/library/core/src/num/flt2dec/strategy/dragon.rs
+++ b/library/core/src/num/flt2dec/strategy/dragon.rs
@@ -366,7 +366,7 @@ pub fn format_exact<'a>(
if order == Ordering::Greater
|| (order == Ordering::Equal
// SAFETY: `buf[len-1]` is initialized.
- && (len == 0 || unsafe { buf[len - 1].assume_init() } & 1 == 1))
+ && len > 0 && unsafe { buf[len - 1].assume_init() } & 1 == 1)
{
// if rounding up changes the length, the exponent should also change.
// but we've been requested a fixed number of digits, so do not alter the buffer...
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 914dca61b..57096f439 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -107,6 +107,9 @@ macro_rules! int_impl {
/// Returns the number of leading zeros in the binary representation of `self`.
///
+ /// Depending on what you're doing with the value, you might also be interested in the
+ /// [`ilog2`] function which returns a consistent number, even if the type widens.
+ ///
/// # Examples
///
/// Basic usage:
@@ -116,6 +119,7 @@ macro_rules! int_impl {
///
/// assert_eq!(n.leading_zeros(), 0);
/// ```
+ #[doc = concat!("[`ilog2`]: ", stringify!($SelfT), "::ilog2")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
#[must_use = "this returns the result of the operation, \
@@ -757,10 +761,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
- unsafe { intrinsics::unchecked_shl(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
@@ -804,10 +809,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
- unsafe { intrinsics::unchecked_shr(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked absolute value. Computes `self.abs()`, returning `None` if
@@ -1354,11 +1360,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shl(rhs & ($BITS - 1))
}
}
@@ -1383,11 +1390,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shr(rhs & ($BITS - 1))
}
}
@@ -2068,11 +2076,15 @@ macro_rules! int_impl {
pub const fn rem_euclid(self, rhs: Self) -> Self {
let r = self % rhs;
if r < 0 {
- if rhs < 0 {
- r - rhs
- } else {
- r + rhs
- }
+ // Semantically equivalent to `if rhs < 0 { r - rhs } else { r + rhs }`.
+ // If `rhs` is not `Self::MIN`, then `r + abs(rhs)` will not overflow
+ // and is clearly equivalent, because `r` is negative.
+ // Otherwise, `rhs` is `Self::MIN`, then we have
+ // `r.wrapping_add(Self::MIN.wrapping_abs())`, which evaluates
+ // to `r.wrapping_add(Self::MIN)`, which is equivalent to
+ // `r - Self::MIN`, which is what we wanted (and will not overflow
+ // for negative `r`).
+ r.wrapping_add(rhs.wrapping_abs())
} else {
r
}
@@ -2271,15 +2283,16 @@ macro_rules! int_impl {
/// # Panics
///
/// This function will panic if `self` is less than or equal to zero,
- /// or if `base` is less then 2.
+ /// or if `base` is less than 2.
///
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2298,10 +2311,11 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2319,10 +2333,11 @@ macro_rules! int_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2343,10 +2358,10 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2379,10 +2394,10 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2403,10 +2418,10 @@ macro_rules! int_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 311c5fa5b..ac7f579eb 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -3,12 +3,15 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::ascii;
-use crate::error::Error;
+use crate::convert::TryInto;
use crate::intrinsics;
use crate::mem;
use crate::ops::{Add, Mul, Sub};
use crate::str::FromStr;
+#[cfg(not(no_fp_fmt_parse))]
+use crate::error::Error;
+
// Used because the `?` operator is not allowed in a const context.
macro_rules! try_opt {
($e:expr) => {
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 6b6f3417f..fbda8f82b 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -321,7 +321,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -356,7 +355,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -391,8 +389,8 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
/// #![feature(nonzero_ops)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -420,7 +418,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -460,14 +457,13 @@ macro_rules! nonzero_unsigned_operations {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(7).unwrap().ilog2(), 2);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(8).unwrap().ilog2(), 3);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(9).unwrap().ilog2(), 3);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -485,14 +481,13 @@ macro_rules! nonzero_unsigned_operations {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(99).unwrap().ilog10(), 1);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(100).unwrap().ilog10(), 2);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(101).unwrap().ilog10(), 2);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -526,7 +521,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -556,7 +550,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -591,7 +584,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -626,7 +618,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -662,14 +653,13 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
- #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ #[doc = concat!("# let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(pos, pos.wrapping_abs());
@@ -905,7 +895,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -941,7 +930,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -986,8 +974,8 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
/// #![feature(nonzero_ops)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -1014,7 +1002,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
@@ -1058,7 +1045,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
@@ -1162,8 +1148,8 @@ macro_rules! nonzero_min_max_unsigned {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), 1", stringify!($Int), ");")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1177,8 +1163,8 @@ macro_rules! nonzero_min_max_unsigned {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1204,8 +1190,8 @@ macro_rules! nonzero_min_max_signed {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), ", stringify!($Int), "::MIN);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1223,8 +1209,8 @@ macro_rules! nonzero_min_max_signed {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1263,12 +1249,11 @@ macro_rules! nonzero_bits {
/// # Examples
///
/// ```
- /// #![feature(nonzero_bits)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::BITS, ", stringify!($Int), "::BITS);")]
/// ```
- #[unstable(feature = "nonzero_bits", issue = "94881")]
+ #[stable(feature = "nonzero_bits", since = "1.67.0")]
pub const BITS: u32 = <$Int>::BITS;
}
)+
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 335cc5124..1c97c4686 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -109,6 +109,9 @@ macro_rules! uint_impl {
/// Returns the number of leading zeros in the binary representation of `self`.
///
+ /// Depending on what you're doing with the value, you might also be interested in the
+ /// [`ilog2`] function which returns a consistent number, even if the type widens.
+ ///
/// # Examples
///
/// Basic usage:
@@ -118,6 +121,7 @@ macro_rules! uint_impl {
///
/// assert_eq!(n.leading_zeros(), 2);
/// ```
+ #[doc = concat!("[`ilog2`]: ", stringify!($SelfT), "::ilog2")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_math", since = "1.32.0")]
#[must_use = "this returns the result of the operation, \
@@ -692,15 +696,16 @@ macro_rules! uint_impl {
///
/// # Panics
///
- /// This function will panic if `self` is zero, or if `base` is less then 2.
+ /// This function will panic if `self` is zero, or if `base` is less than 2.
///
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -719,10 +724,11 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -740,10 +746,11 @@ macro_rules! uint_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -764,10 +771,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -800,10 +807,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -822,10 +829,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -901,10 +908,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
- unsafe { intrinsics::unchecked_shl(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked shift right. Computes `self >> rhs`, returning `None`
@@ -948,10 +956,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
- unsafe { intrinsics::unchecked_shr(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
@@ -1367,11 +1376,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shl(rhs & ($BITS - 1))
}
}
@@ -1399,11 +1409,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shr(rhs & ($BITS - 1))
}
}
diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs
index 72ebe653c..cd183540c 100644
--- a/library/core/src/ops/control_flow.rs
+++ b/library/core/src/ops/control_flow.rs
@@ -79,7 +79,9 @@ use crate::{convert, ops};
/// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue
#[stable(feature = "control_flow_enum_type", since = "1.55.0")]
-#[derive(Debug, Clone, Copy, PartialEq)]
+// ControlFlow should not implement PartialOrd or Ord, per RFC 3058:
+// https://rust-lang.github.io/rfcs/3058-try-trait-v2.html#traits-for-controlflow
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ControlFlow<B, C = ()> {
/// Move on to the next phase of the operation as normal.
#[stable(feature = "control_flow_enum_type", since = "1.55.0")]
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index 4f4c99c4a..c67867f44 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -61,7 +61,7 @@
#[doc(alias = "&*")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Deref"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Deref {
/// The resulting type after dereferencing.
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 2e0a752c8..127b047db 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -1,3 +1,6 @@
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
+
/// The version of the call operator that takes an immutable receiver.
///
/// Instances of `Fn` can be called repeatedly without mutating state.
@@ -51,9 +54,9 @@
/// let double = |x| x * 2;
/// assert_eq!(call_with_one(double), 2);
/// ```
+#[cfg(bootstrap)]
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "Fn"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -71,13 +74,93 @@
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Fn<Args>: FnMut<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call(&self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes an immutable receiver.
+///
+/// Instances of `Fn` can be called repeatedly without mutating state.
+///
+/// *This trait (`Fn`) is not to be confused with [function pointers]
+/// (`fn`).*
+///
+/// `Fn` is implemented automatically by closures which only take immutable
+/// references to captured variables or don't capture anything at all, as well
+/// as (safe) [function pointers] (with some caveats, see their documentation
+/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
+/// implements `Fn`, too.
+///
+/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
+/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
+/// is expected.
+///
+/// Use `Fn` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly and without mutating state (e.g., when
+/// calling it concurrently). If you do not need such strict requirements, use
+/// [`FnMut`] or [`FnOnce`] as bounds.
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a closure
+///
+/// ```
+/// let square = |x| x * x;
+/// assert_eq!(square(5), 25);
+/// ```
+///
+/// ## Using a `Fn` parameter
+///
+/// ```
+/// fn call_with_one<F>(func: F) -> usize
+/// where F: Fn(usize) -> usize {
+/// func(1)
+/// }
+///
+/// let double = |x| x * 2;
+/// assert_eq!(call_with_one(double), 2);
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
+ label = "expected an `Fn<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait Fn<Args: Tuple>: FnMut<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output;
+}
+
/// The version of the call operator that takes a mutable receiver.
///
/// Instances of `FnMut` can be called repeatedly and may mutate state.
@@ -139,9 +222,9 @@ pub trait Fn<Args>: FnMut<Args> {
///
/// assert_eq!(x, 5);
/// ```
+#[cfg(bootstrap)]
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "FnMut"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -159,13 +242,101 @@ pub trait Fn<Args>: FnMut<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait FnMut<Args>: FnOnce<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes a mutable receiver.
+///
+/// Instances of `FnMut` can be called repeatedly and may mutate state.
+///
+/// `FnMut` is implemented automatically by closures which take mutable
+/// references to captured variables, as well as all types that implement
+/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
+/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
+/// implements `FnMut`, too.
+///
+/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
+/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
+/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
+///
+/// Use `FnMut` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly, while allowing it to mutate state.
+/// If you don't want the parameter to mutate state, use [`Fn`] as a
+/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a mutably capturing closure
+///
+/// ```
+/// let mut x = 5;
+/// {
+/// let mut square_x = || x *= x;
+/// square_x();
+/// }
+/// assert_eq!(x, 25);
+/// ```
+///
+/// ## Using a `FnMut` parameter
+///
+/// ```
+/// fn do_twice<F>(mut func: F)
+/// where F: FnMut()
+/// {
+/// func();
+/// func();
+/// }
+///
+/// let mut x: usize = 1;
+/// {
+/// let add_two_to_x = || x += 2;
+/// do_twice(add_two_to_x);
+/// }
+///
+/// assert_eq!(x, 5);
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn_mut"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait FnMut<Args: Tuple>: FnOnce<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
/// The version of the call operator that takes a by-value receiver.
///
/// Instances of `FnOnce` can be called, but might not be callable multiple
@@ -219,9 +390,9 @@ pub trait FnMut<Args>: FnOnce<Args> {
///
/// // `consume_and_return_x` can no longer be invoked at this point
/// ```
+#[cfg(bootstrap)]
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "FnOnce"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -239,7 +410,7 @@ pub trait FnMut<Args>: FnOnce<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait FnOnce<Args> {
/// The returned type after the call operator is used.
#[lang = "fn_once_output"]
@@ -251,6 +422,92 @@ pub trait FnOnce<Args> {
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes a by-value receiver.
+///
+/// Instances of `FnOnce` can be called, but might not be callable multiple
+/// times. Because of this, if the only thing known about a type is that it
+/// implements `FnOnce`, it can only be called once.
+///
+/// `FnOnce` is implemented automatically by closures that might consume captured
+/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
+/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
+///
+/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
+/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
+///
+/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
+/// type and only need to call it once. If you need to call the parameter
+/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
+/// state, use [`Fn`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Using a `FnOnce` parameter
+///
+/// ```
+/// fn consume_with_relish<F>(func: F)
+/// where F: FnOnce() -> String
+/// {
+/// // `func` consumes its captured variables, so it cannot be run more
+/// // than once.
+/// println!("Consumed: {}", func());
+///
+/// println!("Delicious!");
+///
+/// // Attempting to invoke `func()` again will throw a `use of moved
+/// // value` error for `func`.
+/// }
+///
+/// let x = String::from("x");
+/// let consume_and_return_x = move || x;
+/// consume_with_relish(consume_and_return_x);
+///
+/// // `consume_and_return_x` can no longer be invoked at this point
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn_once"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait FnOnce<Args: Tuple> {
+ /// The returned type after the call operator is used.
+ #[lang = "fn_once_output"]
+ #[stable(feature = "fn_once_output", since = "1.12.0")]
+ type Output;
+
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[cfg(bootstrap)]
mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
@@ -310,3 +567,66 @@ mod impls {
}
}
}
+
+#[cfg(not(bootstrap))]
+mod impls {
+ use crate::marker::Tuple;
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const Fn<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ extern "rust-call" fn call(&self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnMut<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnOnce<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ type Output = F::Output;
+
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnMut<A> for &mut F
+ where
+ F: ~const FnMut<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnOnce<A> for &mut F
+ where
+ F: ~const FnMut<A>,
+ {
+ type Output = F::Output;
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+}
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index dd4e3ac1c..5e3dc48b6 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -55,7 +55,7 @@
#[doc(alias = "]")]
#[doc(alias = "[")]
#[doc(alias = "[]")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Index<Idx: ?Sized> {
/// The returned type after indexing.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -164,7 +164,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
#[doc(alias = "[")]
#[doc(alias = "]")]
#[doc(alias = "[]")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
/// Performs the mutable indexing (`container[index]`) operation.
///
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index f284b4359..505d964e5 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -512,7 +512,7 @@ use crate::{
};
/// The `Option` type. See [the module level documentation](self) for more.
-#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[derive(Copy, PartialOrd, Eq, Ord, Debug, Hash)]
#[rustc_diagnostic_item = "Option"]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
@@ -2035,6 +2035,72 @@ impl<'a, T> const From<&'a mut Option<T>> for Option<&'a mut T> {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> crate::marker::StructuralPartialEq for Option<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq> PartialEq for Option<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ SpecOptionPartialEq::eq(self, other)
+ }
+}
+
+#[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")]
+#[doc(hidden)]
+pub trait SpecOptionPartialEq: Sized {
+ fn eq(l: &Option<Self>, other: &Option<Self>) -> bool;
+}
+
+#[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")]
+impl<T: PartialEq> SpecOptionPartialEq for T {
+ #[inline]
+ default fn eq(l: &Option<T>, r: &Option<T>) -> bool {
+ match (l, r) {
+ (Some(l), Some(r)) => *l == *r,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+}
+
+macro_rules! non_zero_option {
+ ( $( #[$stability: meta] $NZ:ty; )+ ) => {
+ $(
+ #[$stability]
+ impl SpecOptionPartialEq for $NZ {
+ #[inline]
+ fn eq(l: &Option<Self>, r: &Option<Self>) -> bool {
+ l.map(Self::get).unwrap_or(0) == r.map(Self::get).unwrap_or(0)
+ }
+ }
+ )+
+ };
+}
+
+non_zero_option! {
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU8;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU16;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU32;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU64;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU128;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroUsize;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI8;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI16;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI32;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI64;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI128;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroIsize;
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T> SpecOptionPartialEq for crate::ptr::NonNull<T> {
+ #[inline]
+ fn eq(l: &Option<Self>, r: &Option<Self>) -> bool {
+ l.map(Self::as_ptr).unwrap_or_else(|| crate::ptr::null_mut())
+ == r.map(Self::as_ptr).unwrap_or_else(|| crate::ptr::null_mut())
+ }
+}
+
/////////////////////////////////////////////////////////////////////////////
// The Option Iterators
/////////////////////////////////////////////////////////////////////////////
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 00b63dfbd..461b70c32 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -80,7 +80,6 @@ pub macro unreachable_2015 {
#[doc(hidden)]
#[unstable(feature = "edition_panic", issue = "none", reason = "use unreachable!() instead")]
#[allow_internal_unstable(core_panic)]
-#[rustc_diagnostic_item = "unreachable_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
pub macro unreachable_2021 {
() => (
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index a9de7c94e..a704a00fa 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -38,10 +38,9 @@ use crate::panic::{Location, PanicInfo};
/// site as much as possible (so that `panic!()` has as low an impact
/// on (e.g.) the inlining of other functions as possible), by moving
/// the actual formatting into this shared place.
-#[cold]
// If panic_immediate_abort, inline the abort call,
// otherwise avoid inlining because of it is cold path.
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_fmt"] // needed for const-evaluated panics
@@ -67,11 +66,9 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize.
/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed.
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cfg_attr(not(bootstrap), rustc_nounwind)]
-#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+#[rustc_nounwind]
pub fn panic_str_nounwind(msg: &'static str) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
@@ -97,10 +94,9 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
// above.
/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
-#[cold]
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
@@ -139,8 +135,8 @@ pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
}
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access
fn panic_bounds_check(index: usize, len: usize) -> ! {
@@ -155,11 +151,10 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
///
/// This function is called directly by the codegen backend, and must not have
/// any extra arguments (including those synthesized by track_caller).
-#[cold]
-#[inline(never)]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
-#[cfg_attr(not(bootstrap), rustc_nounwind)]
-#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+#[rustc_nounwind]
fn panic_no_unwind() -> ! {
panic_str_nounwind("panic in a function that cannot unwind")
}
@@ -187,7 +182,8 @@ pub enum AssertKind {
}
/// Internal function for `assert_eq!` and `assert_ne!` macros
-#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[doc(hidden)]
pub fn assert_failed<T, U>(
@@ -204,7 +200,8 @@ where
}
/// Internal function for `assert_match!`
-#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[doc(hidden)]
pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
@@ -223,6 +220,8 @@ pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
}
/// Non-generic version of the above functions, to avoid code bloat.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
fn assert_failed_inner(
kind: AssertKind,
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index ccef35b45..4524fa4c4 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -543,7 +543,7 @@ impl<P: Deref> Pin<P> {
/// let p: Pin<&mut T> = Pin::new_unchecked(&mut a);
/// // This should mean the pointee `a` can never move again.
/// }
- /// mem::swap(&mut a, &mut b);
+ /// mem::swap(&mut a, &mut b); // Potential UB down the road ⚠️
/// // The address of `a` changed to `b`'s stack slot, so `a` got moved even
/// // though we have previously pinned it! We have violated the pinning API contract.
/// }
@@ -563,13 +563,66 @@ impl<P: Deref> Pin<P> {
/// // This should mean the pointee can never move again.
/// }
/// drop(pinned);
- /// let content = Rc::get_mut(&mut x).unwrap();
+ /// let content = Rc::get_mut(&mut x).unwrap(); // Potential UB down the road ⚠️
/// // Now, if `x` was the only reference, we have a mutable reference to
/// // data that we pinned above, which we could use to move it as we have
/// // seen in the previous example. We have violated the pinning API contract.
/// }
/// ```
///
+ /// ## Pinning of closure captures
+ ///
+ /// Particular care is required when using `Pin::new_unchecked` in a closure:
+ /// `Pin::new_unchecked(&mut var)` where `var` is a by-value (moved) closure capture
+ /// implicitly makes the promise that the closure itself is pinned, and that *all* uses
+ /// of this closure capture respect that pinning.
+ /// ```
+ /// use std::pin::Pin;
+ /// use std::task::Context;
+ /// use std::future::Future;
+ ///
+ /// fn move_pinned_closure(mut x: impl Future, cx: &mut Context<'_>) {
+ /// // Create a closure that moves `x`, and then internally uses it in a pinned way.
+ /// let mut closure = move || unsafe {
+ /// let _ignore = Pin::new_unchecked(&mut x).poll(cx);
+ /// };
+ /// // Call the closure, so the future can assume it has been pinned.
+ /// closure();
+ /// // Move the closure somewhere else. This also moves `x`!
+ /// let mut moved = closure;
+ /// // Calling it again means we polled the future from two different locations,
+ /// // violating the pinning API contract.
+ /// moved(); // Potential UB ⚠️
+ /// }
+ /// ```
+ /// When passing a closure to another API, it might be moving the closure any time, so
+ /// `Pin::new_unchecked` on closure captures may only be used if the API explicitly documents
+ /// that the closure is pinned.
+ ///
+ /// The better alternative is to avoid all that trouble and do the pinning in the outer function
+ /// instead (here using the unstable `pin` macro):
+ /// ```
+ /// #![feature(pin_macro)]
+ /// use std::pin::pin;
+ /// use std::task::Context;
+ /// use std::future::Future;
+ ///
+ /// fn move_pinned_closure(mut x: impl Future, cx: &mut Context<'_>) {
+ /// let mut x = pin!(x);
+ /// // Create a closure that captures `x: Pin<&mut _>`, which is safe to move.
+ /// let mut closure = move || {
+ /// let _ignore = x.as_mut().poll(cx);
+ /// };
+ /// // Call the closure, so the future can assume it has been pinned.
+ /// closure();
+ /// // Move the closure somewhere else.
+ /// let mut moved = closure;
+ /// // Calling it again here is fine (except that we might be polling a future that already
+ /// // returned `Poll::Ready`, but that is a separate problem).
+ /// moved();
+ /// }
+ /// ```
+ ///
/// [`mem::swap`]: crate::mem::swap
#[lang = "new_unchecked"]
#[inline(always)]
@@ -1059,7 +1112,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// 8 | let x: Pin<&mut Foo> = {
/// | - borrow later stored here
/// 9 | let x: Pin<&mut Foo> = pin!(Foo { /* … */ });
-/// | ^^^^^^^^^^^^^^^^^^^^^ creates a temporary which is freed while still in use
+/// | ^^^^^^^^^^^^^^^^^^^^^ creates a temporary value which is freed while still in use
/// 10 | x
/// 11 | }; // <- Foo is dropped
/// | - temporary value is freed at the end of this statement
diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs
index b566e211c..2d67d742c 100644
--- a/library/core/src/prelude/v1.rs
+++ b/library/core/src/prelude/v1.rs
@@ -75,9 +75,16 @@ pub use crate::macros::builtin::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
+#[cfg(not(bootstrap))]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+pub use crate::macros::builtin::alloc_error_handler;
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
pub use crate::macros::builtin::{bench, derive, global_allocator, test, test_case};
+#[unstable(feature = "derive_const", issue = "none")]
+#[cfg(not(bootstrap))]
+pub use crate::macros::builtin::derive_const;
+
#[unstable(
feature = "cfg_accessible",
issue = "64797",
@@ -91,3 +98,11 @@ pub use crate::macros::builtin::cfg_accessible;
reason = "`cfg_eval` is a recently implemented feature"
)]
pub use crate::macros::builtin::cfg_eval;
+
+#[unstable(
+ feature = "type_ascription",
+ issue = "23416",
+ reason = "placeholder syntax for type ascription"
+)]
+#[cfg(not(bootstrap))]
+pub use crate::macros::builtin::type_ascribe;
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 331714a99..d6e9da187 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -1493,11 +1493,13 @@ mod prim_ref {}
/// However, a direct cast back is not possible. You need to use `transmute`:
///
/// ```rust
+/// # #[cfg(not(miri))] { // FIXME: use strict provenance APIs once they are stable, then remove this `cfg`
/// # let fnptr: fn(i32) -> i32 = |x| x+2;
/// # let fnptr_addr = fnptr as usize;
/// let fnptr = fnptr_addr as *const ();
/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
/// assert_eq!(fnptr(40), 42);
+/// # }
/// ```
///
/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 1390e09dd..64a5290c3 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -9,7 +9,9 @@ use crate::{cmp, fmt, hash, mem, num};
/// Note that particularly large alignments, while representable in this type,
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -167,16 +169,18 @@ impl From<Alignment> for usize {
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::Ord for Alignment {
+impl const cmp::Ord for Alignment {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.as_nonzero().cmp(&other.as_nonzero())
+ self.as_nonzero().get().cmp(&other.as_nonzero().get())
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::PartialOrd for Alignment {
+impl const cmp::PartialOrd for Alignment {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
@@ -198,7 +202,9 @@ type AlignmentEnum = AlignmentEnum32;
#[cfg(target_pointer_width = "64")]
type AlignmentEnum = AlignmentEnum64;
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -219,7 +225,9 @@ enum AlignmentEnum16 {
_Align1Shl15 = 1 << 15,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -256,7 +264,9 @@ enum AlignmentEnum32 {
_Align1Shl31 = 1 << 31,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 5a083227b..d34813599 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -45,7 +45,7 @@ impl<T: ?Sized> *const T {
/// Casts to a pointer of another type.
#[stable(feature = "ptr_cast", since = "1.38.0")]
#[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
- #[inline]
+ #[inline(always)]
pub const fn cast<U>(self) -> *const U {
self as _
}
@@ -79,19 +79,14 @@ impl<T: ?Sized> *const T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, mut val: *const U) -> *const U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
where
U: ?Sized,
{
- let target = &mut val as *mut *const U as *mut *const u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *const u8 };
- val
+ from_raw_parts::<U>(self as *const (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -100,6 +95,7 @@ impl<T: ?Sized> *const T {
/// refactored.
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_mut(self) -> *mut T {
self as _
}
@@ -117,13 +113,21 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let array = [13, 42];
/// let p0: *const i32 = &array[0];
/// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
/// let p1: *const i32 = &array[1];
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -140,11 +144,20 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *const u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*const u8>::from_bits(1), dangling);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr` function, or update \
+ your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -176,7 +189,7 @@ impl<T: ?Sized> *const T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -213,7 +226,7 @@ impl<T: ?Sized> *const T {
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -478,8 +491,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -559,7 +571,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -568,12 +580,36 @@ impl<T: ?Sized> *const T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let v = 17_u32;
+ /// let ptr: *const u32 = &v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *const T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask);
- from_raw_parts::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
}
/// Calculates the distance between two pointers. The returned value is in
@@ -684,7 +720,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -914,8 +950,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1001,8 +1036,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1082,7 +1116,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1162,7 +1196,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1304,6 +1338,8 @@ impl<T: ?Sized> *const T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1314,32 +1350,149 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *const T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *const T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
+ usize::MAX
+ }
- const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
- usize::MAX
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
+ }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1350,16 +1503,121 @@ impl<T: ?Sized> *const T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index caa10f181..a8604843e 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,6 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 565c38d22..48b2e88da 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -35,7 +35,8 @@
//! be used for inter-thread synchronization.
//! * The result of casting a reference to a pointer is valid for as long as the
//! underlying object is live and no reference (just raw pointers) is used to
-//! access the same memory.
+//! access the same memory. That is, reference and pointer accesses cannot be
+//! interleaved.
//!
//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
@@ -64,7 +65,6 @@
//! separate allocated object), heap allocations (each allocation created by the global allocator is
//! a separate allocated object), and `static` variables.
//!
-//!
//! # Strict Provenance
//!
//! **The following text is non-normative, insufficiently formal, and is an extremely strict
@@ -613,9 +613,10 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -650,9 +651,10 @@ where
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -908,21 +910,15 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
);
}
- // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a
- // pessimization for it. Also, if the type contains any unaligned pointers,
- // copying those over multiple reads is difficult to support.
- #[cfg(not(miri))]
+ // Split up the slice into small power-of-two-sized chunks that LLVM is able
+ // to vectorize (unless it's a special type with more-than-pointer alignment,
+ // because we don't want to pessimize things like slices of SIMD vectors.)
+ if mem::align_of::<T>() <= mem::size_of::<usize>()
+ && (!mem::size_of::<T>().is_power_of_two()
+ || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
{
- // Split up the slice into small power-of-two-sized chunks that LLVM is able
- // to vectorize (unless it's a special type with more-than-pointer alignment,
- // because we don't want to pessimize things like slices of SIMD vectors.)
- if mem::align_of::<T>() <= mem::size_of::<usize>()
- && (!mem::size_of::<T>().is_power_of_two()
- || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
- {
- attempt_swap_as_chunks!(usize);
- attempt_swap_as_chunks!(u8);
- }
+ attempt_swap_as_chunks!(usize);
+ attempt_swap_as_chunks!(u8);
}
// SAFETY: Same preconditions as this function
@@ -1580,10 +1576,14 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
/// Align pointer `p`.
///
-/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
+/// Calculate offset (in terms of elements of `size_of::<T>()` stride) that has to be applied
/// to pointer `p` so that pointer `p` would get aligned to `a`.
///
-/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
+/// # Safety
+/// `a` must be a power of two.
+///
+/// # Notes
+/// This implementation has been carefully tailored to not panic. It is UB for this to panic.
/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
/// constants.
///
@@ -1593,12 +1593,12 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
///
/// Any questions go to @nagisa.
#[lang = "align_offset"]
-pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
+pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub,
- wrapping_add, wrapping_mul, wrapping_sub,
+ cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
+ unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1610,7 +1610,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
///
/// Implementation of this function shall not panic. Ever.
#[inline]
- unsafe fn mod_inv(x: usize, m: usize) -> usize {
+ const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
@@ -1618,40 +1618,48 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
- /// INV_TABLE_MOD²
- const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
- let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
- if m <= INV_TABLE_MOD {
- table_inverse & m_minus_one
- } else {
- // We iterate "up" using the following formula:
- //
- // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
+ let mut mod_gate = INV_TABLE_MOD;
+ // We iterate "up" using the following formula:
+ //
+ // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ //
+ // This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
+ // finally reduce the computation to our desired `m` by taking `inverse mod m`.
+ //
+ // This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
+ // will always finish in at most 4 iterations.
+ loop {
+ // y = y * (2 - xy) mod n
//
- // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
- let mut inverse = table_inverse;
- let mut going_mod = INV_TABLE_MOD_SQUARED;
- loop {
- // y = y * (2 - xy) mod n
- //
- // Note, that we use wrapping operations here intentionally – the original formula
- // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
- // usize::MAX` instead, because we take the result `mod n` at the end
- // anyway.
- inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
- if going_mod >= m {
- return inverse & m_minus_one;
- }
- going_mod = wrapping_mul(going_mod, going_mod);
+ // Note, that we use wrapping operations here intentionally – the original formula
+ // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
+ // usize::MAX` instead, because we take the result `mod n` at the end
+ // anyway.
+ if mod_gate >= m {
+ break;
}
+ inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
+ let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
+ if overflow {
+ break;
+ }
+ mod_gate = new_gate;
}
+ inverse & m_minus_one
}
- let addr = p.addr();
let stride = mem::size_of::<T>();
+
+ // SAFETY: This is just an inlined `p.addr()` (which is not
+ // a `const fn` so we cannot call it).
+ // During const eval, we hook this function to ensure that the pointer never
+ // has provenance, making this sound.
+ let addr: usize = unsafe { mem::transmute(p) };
+
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
@@ -1761,7 +1769,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// (which is what the `PartialEq for &T` implementation does).
///
/// When comparing wide pointers, both the address and the metadata are tested for equality.
-/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers
+/// However, note that comparing trait object pointers (`*const dyn Trait`) is unreliable: pointers
/// to values of the same underlying type can compare inequal (because vtables are duplicated in
/// multiple codegen units), and pointers to values of *different* underlying type can compare equal
/// (since identical vtables can be deduplicated within a codegen unit).
@@ -1793,7 +1801,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
-#[inline]
+#[inline(always)]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1862,7 +1870,6 @@ macro_rules! fnptr_impls_safety_abi {
fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
};
(@c_unwind $FnTy: ty, $($Arg: ident),*) => {
- #[cfg(not(bootstrap))]
fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
};
(#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 6764002bc..c924a90b1 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -78,23 +78,14 @@ impl<T: ?Sized> *mut T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, val: *const U) -> *mut U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *mut U
where
U: ?Sized,
{
- // Prepare in the type system that we will replace the pointer value with a mutable
- // pointer, taking the mutable provenance from the `self` pointer.
- let mut val = val as *mut U;
- // Pointer to the pointer value within the value.
- let target = &mut val as *mut *mut U as *mut *mut u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *mut u8 };
- val
+ from_raw_parts_mut::<U>(self as *mut (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -109,6 +100,7 @@ impl<T: ?Sized> *mut T {
/// [`cast_mut`]: #method.cast_mut
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_const(self) -> *const T {
self as _
}
@@ -126,14 +118,22 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let mut array = [13, 42];
/// let mut it = array.iter_mut();
/// let p0: *mut i32 = it.next().unwrap();
/// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0);
/// let p1: *mut i32 = it.next().unwrap();
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -150,11 +150,20 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *mut u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*mut u8>::from_bits(1), dangling);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr_mut` function, or \
+ update your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -186,7 +195,7 @@ impl<T: ?Sized> *mut T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -223,7 +232,7 @@ impl<T: ?Sized> *mut T {
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -496,8 +505,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -576,10 +584,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts_mut::<T>(
- self.cast::<u8>().wrapping_offset(count).cast::<()>(),
- metadata(self),
- )
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -588,12 +593,39 @@ impl<T: ?Sized> *mut T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let mut v = 17_u32;
+ /// let ptr: *mut u32 = &mut v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ ///
+ /// unsafe { *masked_ptr = 0 };
+ /// assert_eq!(v, 0);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *mut T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask) as *mut ();
- from_raw_parts_mut::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).cast_mut().with_metadata_of(self)
}
/// Returns `None` if the pointer is null, or else returns a unique reference to
@@ -861,7 +893,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -1020,8 +1052,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1107,8 +1138,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1188,7 +1218,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1268,7 +1298,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1576,6 +1606,8 @@ impl<T: ?Sized> *mut T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1586,32 +1618,151 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *mut T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *mut T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
+ usize::MAX
+ }
+
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
- usize::MAX
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
}
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1622,16 +1773,123 @@ impl<T: ?Sized> *mut T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index c18264d13..c4348169c 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -330,7 +330,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const fn as_ptr(self) -> *mut T {
self.pointer as *mut T
}
@@ -378,7 +378,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
@@ -429,7 +429,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
@@ -703,7 +703,7 @@ impl<T> NonNull<[T]> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for NonNull<T> {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 6d2f7330d..c295a0e06 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -31,9 +31,8 @@ where
}
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
@@ -48,19 +47,20 @@ const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_start_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range start index {index} out of range for slice of length {len}");
}
+#[inline]
#[track_caller]
const fn slice_start_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice start index is out of range for slice");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
@@ -71,19 +71,20 @@ const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_end_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range end index {index} out of range for slice of length {len}");
}
+#[inline]
#[track_caller]
const fn slice_end_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice end index is out of range for slice");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_index_order_fail(index: usize, end: usize) -> ! {
@@ -92,27 +93,27 @@ const fn slice_index_order_fail(index: usize, end: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_index_order_fail_rt(index: usize, end: usize) -> ! {
panic!("slice index starts at {index} but ends at {end}");
}
+#[inline]
#[track_caller]
const fn slice_index_order_fail_ct(_: usize, _: usize) -> ! {
panic!("slice index start is larger than end");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
const fn slice_start_index_overflow_fail() -> ! {
panic!("attempted to index slice from after maximum usize");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
const fn slice_end_index_overflow_fail() -> ! {
panic!("attempted to index slice up to maximum usize");
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 8a8962828..062289767 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -1834,6 +1834,20 @@ impl<'a, T> ChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks_exact(2);
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), Some(&['l', 'o'][..]));
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), Some(&['r', 'e'][..]));
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// ```
#[must_use]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
@@ -2869,7 +2883,7 @@ unsafe impl<T> Sync for RChunksMut<'_, T> where T: Sync {}
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
-/// [`remainder`]: ChunksExact::remainder
+/// [`remainder`]: RChunksExact::remainder
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
@@ -2892,6 +2906,20 @@ impl<'a, T> RChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks_exact(2);
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), Some(&['e', 'm'][..]));
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), Some(&['o', 'r'][..]));
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// ```
#[must_use]
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
@@ -3031,7 +3059,7 @@ unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunksExact<'a, T> {
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
-/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [`into_remainder`]: RChunksExactMut::into_remainder
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 4f1bb1734..d9281a925 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -7,6 +7,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
+use crate::fmt;
use crate::intrinsics::{assert_unsafe_precondition, exact_div};
use crate::marker::Copy;
use crate::mem::{self, SizedTypeProperties};
@@ -464,7 +465,7 @@ impl<T> [T] {
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
- #[inline]
+ #[inline(always)]
#[must_use]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
@@ -494,7 +495,7 @@ impl<T> [T] {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs)]
- #[inline]
+ #[inline(always)]
#[must_use]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
@@ -3467,10 +3468,11 @@ impl<T> [T] {
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
- /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
- /// length possible for a given type and input slice, but only your algorithm's performance
- /// should depend on that, not its correctness. It is permissible for all of the input data to
- /// be returned as the prefix or suffix slice.
+ /// slice of a new type, and the suffix slice. How exactly the slice is split up is not
+ /// specified; the middle part may be smaller than necessary. However, if this fails to return a
+ /// maximal middle part, that is because code is running in a context where performance does not
+ /// matter, such as a sanitizer attempting to find alignment bugs. Regular code running
+ /// in a default (debug or release) execution *will* return a maximal middle part.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
@@ -3524,14 +3526,15 @@ impl<T> [T] {
}
}
- /// Transmute the slice to a slice of another type, ensuring alignment of the types is
- /// maintained.
+ /// Transmute the mutable slice to a mutable slice of another type, ensuring alignment of the
+ /// types is maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
- /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
- /// length possible for a given type and input slice, but only your algorithm's performance
- /// should depend on that, not its correctness. It is permissible for all of the input data to
- /// be returned as the prefix or suffix slice.
+ /// slice of a new type, and the suffix slice. How exactly the slice is split up is not
+ /// specified; the middle part may be smaller than necessary. However, if this fails to return a
+ /// maximal middle part, that is because code is running in a context where performance does not
+ /// matter, such as a sanitizer attempting to find alignment bugs. Regular code running
+ /// in a default (debug or release) execution *will* return a maximal middle part.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
@@ -3667,7 +3670,8 @@ impl<T> [T] {
unsafe { self.align_to() }
}
- /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+ /// Split a mutable slice into a mutable prefix, a middle of aligned SIMD types,
+ /// and a mutable suffix.
///
/// This is a safe wrapper around [`slice::align_to_mut`], so has the same weak
/// postconditions as that method. You're only assured that
@@ -3751,9 +3755,9 @@ impl<T> [T] {
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
#[must_use]
- pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
+ pub fn is_sorted_by<'a, F>(&'a self, mut compare: F) -> bool
where
- F: FnMut(&T, &T) -> Option<Ordering>,
+ F: FnMut(&'a T, &'a T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
@@ -3777,9 +3781,9 @@ impl<T> [T] {
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
#[must_use]
- pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
+ pub fn is_sorted_by_key<'a, F, K>(&'a self, f: F) -> bool
where
- F: FnMut(&T) -> K,
+ F: FnMut(&'a T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
@@ -4081,6 +4085,88 @@ impl<T> [T] {
*self = rem;
Some(last)
}
+
+ /// Returns mutable references to many indices at once, without doing any checks.
+ ///
+ /// For a safe alternative see [`get_many_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping or out-of-bounds indices is *[undefined behavior]*
+ /// even if the resulting references are not used.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_many_mut)]
+ ///
+ /// let x = &mut [1, 2, 4];
+ ///
+ /// unsafe {
+ /// let [a, b] = x.get_many_unchecked_mut([0, 2]);
+ /// *a *= 10;
+ /// *b *= 100;
+ /// }
+ /// assert_eq!(x, &[10, 2, 400]);
+ /// ```
+ ///
+ /// [`get_many_mut`]: slice::get_many_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "get_many_mut", issue = "104642")]
+ #[inline]
+ pub unsafe fn get_many_unchecked_mut<const N: usize>(
+ &mut self,
+ indices: [usize; N],
+ ) -> [&mut T; N] {
+ // NB: This implementation is written as it is because any variation of
+ // `indices.map(|i| self.get_unchecked_mut(i))` would make miri unhappy,
+ // or generate worse code otherwise. This is also why we need to go
+ // through a raw pointer here.
+ let slice: *mut [T] = self;
+ let mut arr: mem::MaybeUninit<[&mut T; N]> = mem::MaybeUninit::uninit();
+ let arr_ptr = arr.as_mut_ptr();
+
+ // SAFETY: We expect `indices` to contain disjunct values that are
+ // in bounds of `self`.
+ unsafe {
+ for i in 0..N {
+ let idx = *indices.get_unchecked(i);
+ *(*arr_ptr).get_unchecked_mut(i) = &mut *slice.get_unchecked_mut(idx);
+ }
+ arr.assume_init()
+ }
+ }
+
+ /// Returns mutable references to many indices at once.
+ ///
+ /// Returns an error if any index is out-of-bounds, or if the same index was
+ /// passed more than once.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_many_mut)]
+ ///
+ /// let v = &mut [1, 2, 3];
+ /// if let Ok([a, b]) = v.get_many_mut([0, 2]) {
+ /// *a = 413;
+ /// *b = 612;
+ /// }
+ /// assert_eq!(v, &[413, 2, 612]);
+ /// ```
+ #[unstable(feature = "get_many_mut", issue = "104642")]
+ #[inline]
+ pub fn get_many_mut<const N: usize>(
+ &mut self,
+ indices: [usize; N],
+ ) -> Result<[&mut T; N], GetManyMutError<N>> {
+ if !get_many_check_valid(&indices, self.len()) {
+ return Err(GetManyMutError { _private: () });
+ }
+ // SAFETY: The `get_many_check_valid()` call checked that all indices
+ // are disjunct and in bounds.
+ unsafe { Ok(self.get_many_unchecked_mut(indices)) }
+ }
}
impl<T, const N: usize> [[T; N]] {
@@ -4303,3 +4389,56 @@ impl<T, const N: usize> SlicePattern for [T; N] {
self
}
}
+
+/// This checks every index against each other, and against `len`.
+///
+/// This will do `binomial(N + 1, 2) = N * (N + 1) / 2 = 0, 1, 3, 6, 10, ..`
+/// comparison operations.
+fn get_many_check_valid<const N: usize>(indices: &[usize; N], len: usize) -> bool {
+ // NB: The optimzer should inline the loops into a sequence
+ // of instructions without additional branching.
+ let mut valid = true;
+ for (i, &idx) in indices.iter().enumerate() {
+ valid &= idx < len;
+ for &idx2 in &indices[..i] {
+ valid &= idx != idx2;
+ }
+ }
+ valid
+}
+
+/// The error type returned by [`get_many_mut<N>`][`slice::get_many_mut`].
+///
+/// It indicates one of two possible errors:
+/// - An index is out-of-bounds.
+/// - The same index appeared multiple times in the array.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(get_many_mut)]
+///
+/// let v = &mut [1, 2, 3];
+/// assert!(v.get_many_mut([0, 999]).is_err());
+/// assert!(v.get_many_mut([1, 1]).is_err());
+/// ```
+#[unstable(feature = "get_many_mut", issue = "104642")]
+// NB: The N here is there to be forward-compatible with adding more details
+// to the error type at a later point
+pub struct GetManyMutError<const N: usize> {
+ _private: (),
+}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> fmt::Debug for GetManyMutError<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GetManyMutError").finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> fmt::Display for GetManyMutError<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("an index is out of bounds or appeared multiple times in the array", f)
+ }
+}
diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs
index b0c55ca4f..5f8748206 100644
--- a/library/core/src/str/converts.rs
+++ b/library/core/src/str/converts.rs
@@ -77,7 +77,7 @@ use super::Utf8Error;
/// let sparkle_heart = [240, 159, 146, 150];
///
/// // We know these bytes are valid, so just use `unwrap()`.
-/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+/// let sparkle_heart: &str = str::from_utf8(&sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index fbc0fc397..45fd2caae 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -396,7 +396,7 @@ impl str {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
@@ -411,7 +411,7 @@ impl str {
/// modified in a way that it remains valid UTF-8.
#[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self as *mut str as *mut u8
}
@@ -902,6 +902,12 @@ impl str {
///
/// assert_eq!(None, iter.next());
/// ```
+ ///
+ /// If the string is empty or all whitespace, the iterator yields no string slices:
+ /// ```
+ /// assert_eq!("".split_whitespace().next(), None);
+ /// assert_eq!(" ".split_whitespace().next(), None);
+ /// ```
#[must_use = "this returns the split string as an iterator, \
without modifying the original"]
#[stable(feature = "split_whitespace", since = "1.1.0")]
@@ -946,6 +952,12 @@ impl str {
///
/// assert_eq!(None, iter.next());
/// ```
+ ///
+ /// If the string is empty or all ASCII whitespace, the iterator yields no string slices:
+ /// ```
+ /// assert_eq!("".split_ascii_whitespace().next(), None);
+ /// assert_eq!(" ".split_ascii_whitespace().next(), None);
+ /// ```
#[must_use = "this returns the split string as an iterator, \
without modifying the original"]
#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index ec2cb429e..19da6d2fb 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -39,6 +39,7 @@
)]
use crate::cmp;
+use crate::cmp::Ordering;
use crate::fmt;
use crate::slice::memchr;
@@ -946,6 +947,32 @@ impl<'a, 'b> Pattern<'a> for &'b str {
haystack.as_bytes().starts_with(self.as_bytes())
}
+ /// Checks whether the pattern matches anywhere in the haystack
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ if self.len() == 0 {
+ return true;
+ }
+
+ match self.len().cmp(&haystack.len()) {
+ Ordering::Less => {
+ if self.len() == 1 {
+ return haystack.as_bytes().contains(&self.as_bytes()[0]);
+ }
+
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ if self.len() <= 32 {
+ if let Some(result) = simd_contains(self, haystack) {
+ return result;
+ }
+ }
+
+ self.into_searcher(haystack).next_match().is_some()
+ }
+ _ => self == haystack,
+ }
+ }
+
/// Removes the pattern from the front of haystack, if it matches.
#[inline]
fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
@@ -1684,3 +1711,210 @@ impl TwoWayStrategy for RejectAndMatch {
SearchStep::Match(a, b)
}
}
+
+/// SIMD search for short needles based on
+/// Wojciech Muła's "SIMD-friendly algorithms for substring searching"[0]
+///
+/// It skips ahead by the vector width on each iteration (rather than the needle length as two-way
+/// does) by probing the first and last byte of the needle for the whole vector width
+/// and only doing full needle comparisons when the vectorized probe indicated potential matches.
+///
+/// Since the x86_64 baseline only offers SSE2 we only use u8x16 here.
+/// If we ever ship std with for x86-64-v3 or adapt this for other platforms then wider vectors
+/// should be evaluated.
+///
+/// For haystacks smaller than vector-size + needle length it falls back to
+/// a naive O(n*m) search so this implementation should not be called on larger needles.
+///
+/// [0]: http://0x80.pl/articles/simd-strfind.html#sse-avx2
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+#[inline]
+fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
+ let needle = needle.as_bytes();
+ let haystack = haystack.as_bytes();
+
+ debug_assert!(needle.len() > 1);
+
+ use crate::ops::BitAnd;
+ use crate::simd::mask8x16 as Mask;
+ use crate::simd::u8x16 as Block;
+ use crate::simd::{SimdPartialEq, ToBitMask};
+
+ let first_probe = needle[0];
+ let last_byte_offset = needle.len() - 1;
+
+ // the offset used for the 2nd vector
+ let second_probe_offset = if needle.len() == 2 {
+ // never bail out on len=2 needles because the probes will fully cover them and have
+ // no degenerate cases.
+ 1
+ } else {
+ // try a few bytes in case first and last byte of the needle are the same
+ let Some(second_probe_offset) = (needle.len().saturating_sub(4)..needle.len()).rfind(|&idx| needle[idx] != first_probe) else {
+ // fall back to other search methods if we can't find any different bytes
+ // since we could otherwise hit some degenerate cases
+ return None;
+ };
+ second_probe_offset
+ };
+
+ // do a naive search if the haystack is too small to fit
+ if haystack.len() < Block::LANES + last_byte_offset {
+ return Some(haystack.windows(needle.len()).any(|c| c == needle));
+ }
+
+ let first_probe: Block = Block::splat(first_probe);
+ let second_probe: Block = Block::splat(needle[second_probe_offset]);
+ // first byte are already checked by the outer loop. to verify a match only the
+ // remainder has to be compared.
+ let trimmed_needle = &needle[1..];
+
+ // this #[cold] is load-bearing, benchmark before removing it...
+ let check_mask = #[cold]
+ |idx, mask: u16, skip: bool| -> bool {
+ if skip {
+ return false;
+ }
+
+ // and so is this. optimizations are weird.
+ let mut mask = mask;
+
+ while mask != 0 {
+ let trailing = mask.trailing_zeros();
+ let offset = idx + trailing as usize + 1;
+ // SAFETY: mask is between 0 and 15 trailing zeroes, we skip one additional byte that was already compared
+ // and then take trimmed_needle.len() bytes. This is within the bounds defined by the outer loop
+ unsafe {
+ let sub = haystack.get_unchecked(offset..).get_unchecked(..trimmed_needle.len());
+ if small_slice_eq(sub, trimmed_needle) {
+ return true;
+ }
+ }
+ mask &= !(1 << trailing);
+ }
+ return false;
+ };
+
+ let test_chunk = |idx| -> u16 {
+ // SAFETY: this requires at least LANES bytes being readable at idx
+ // that is ensured by the loop ranges (see comments below)
+ let a: Block = unsafe { haystack.as_ptr().add(idx).cast::<Block>().read_unaligned() };
+ // SAFETY: this requires LANES + block_offset bytes being readable at idx
+ let b: Block = unsafe {
+ haystack.as_ptr().add(idx).add(second_probe_offset).cast::<Block>().read_unaligned()
+ };
+ let eq_first: Mask = a.simd_eq(first_probe);
+ let eq_last: Mask = b.simd_eq(second_probe);
+ let both = eq_first.bitand(eq_last);
+ let mask = both.to_bitmask();
+
+ return mask;
+ };
+
+ let mut i = 0;
+ let mut result = false;
+ // The loop condition must ensure that there's enough headroom to read LANE bytes,
+ // and not only at the current index but also at the index shifted by block_offset
+ const UNROLL: usize = 4;
+ while i + last_byte_offset + UNROLL * Block::LANES < haystack.len() && !result {
+ let mut masks = [0u16; UNROLL];
+ for j in 0..UNROLL {
+ masks[j] = test_chunk(i + j * Block::LANES);
+ }
+ for j in 0..UNROLL {
+ let mask = masks[j];
+ if mask != 0 {
+ result |= check_mask(i + j * Block::LANES, mask, result);
+ }
+ }
+ i += UNROLL * Block::LANES;
+ }
+ while i + last_byte_offset + Block::LANES < haystack.len() && !result {
+ let mask = test_chunk(i);
+ if mask != 0 {
+ result |= check_mask(i, mask, result);
+ }
+ i += Block::LANES;
+ }
+
+ // Process the tail that didn't fit into LANES-sized steps.
+ // This simply repeats the same procedure but as right-aligned chunk instead
+ // of a left-aligned one. The last byte must be exactly flush with the string end so
+ // we don't miss a single byte or read out of bounds.
+ let i = haystack.len() - last_byte_offset - Block::LANES;
+ let mask = test_chunk(i);
+ if mask != 0 {
+ result |= check_mask(i, mask, result);
+ }
+
+ Some(result)
+}
+
+/// Compares short slices for equality.
+///
+/// It avoids a call to libc's memcmp which is faster on long slices
+/// due to SIMD optimizations but it incurs a function call overhead.
+///
+/// # Safety
+///
+/// Both slices must have the same length.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] // only called on x86
+#[inline]
+unsafe fn small_slice_eq(x: &[u8], y: &[u8]) -> bool {
+ debug_assert_eq!(x.len(), y.len());
+ // This function is adapted from
+ // https://github.com/BurntSushi/memchr/blob/8037d11b4357b0f07be2bb66dc2659d9cf28ad32/src/memmem/util.rs#L32
+
+ // If we don't have enough bytes to do 4-byte at a time loads, then
+ // fall back to the naive slow version.
+ //
+ // Potential alternative: We could do a copy_nonoverlapping combined with a mask instead
+ // of a loop. Benchmark it.
+ if x.len() < 4 {
+ for (&b1, &b2) in x.iter().zip(y) {
+ if b1 != b2 {
+ return false;
+ }
+ }
+ return true;
+ }
+ // When we have 4 or more bytes to compare, then proceed in chunks of 4 at
+ // a time using unaligned loads.
+ //
+ // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is
+ // that this particular version of memcmp is likely to be called with tiny
+ // needles. That means that if we do 8 byte loads, then a higher proportion
+ // of memcmp calls will use the slower variant above. With that said, this
+ // is a hypothesis and is only loosely supported by benchmarks. There's
+ // likely some improvement that could be made here. The main thing here
+ // though is to optimize for latency, not throughput.
+
+ // SAFETY: Via the conditional above, we know that both `px` and `py`
+ // have the same length, so `px < pxend` implies that `py < pyend`.
+ // Thus, derefencing both `px` and `py` in the loop below is safe.
+ //
+ // Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
+ // end of `px` and `py`. Thus, the final dereference outside of the
+ // loop is guaranteed to be valid. (The final comparison will overlap with
+ // the last comparison done in the loop for lengths that aren't multiples
+ // of four.)
+ //
+ // Finally, we needn't worry about alignment here, since we do unaligned
+ // loads.
+ unsafe {
+ let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
+ let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
+ while px < pxend {
+ let vx = (px as *const u32).read_unaligned();
+ let vy = (py as *const u32).read_unaligned();
+ if vx != vy {
+ return false;
+ }
+ px = px.add(4);
+ py = py.add(4);
+ }
+ let vx = (pxend as *const u32).read_unaligned();
+ let vy = (pyend as *const u32).read_unaligned();
+ vx == vy
+ }
+}
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index 41f0a25db..f1dc4f7b5 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -9,6 +9,7 @@ use crate::task::Ready;
/// scheduled to receive a wakeup instead.
#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[cfg_attr(not(bootstrap), lang = "Poll")]
#[stable(feature = "futures_api", since = "1.36.0")]
pub enum Poll<T> {
/// Represents that a value is immediately ready.
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index fc91fe468..28275798f 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -22,7 +22,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:PartialEq),+> PartialEq for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const PartialEq),+> const PartialEq for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
@@ -40,7 +41,7 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:Eq),+> Eq for ($($T,)+)
+ impl<$($T: Eq),+> Eq for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{}
@@ -49,7 +50,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const PartialOrd + ~const PartialEq),+> const PartialOrd for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
@@ -79,7 +81,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:Ord),+> Ord for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const Ord),+> const Ord for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index 9538b8139..e98dac8d1 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -131,6 +131,24 @@ fn distinct_type_names() {
assert_ne!(type_name_of_val(Velocity), type_name_of_val(Velocity(0.0, -9.8)),);
}
+#[cfg(not(bootstrap))]
+#[test]
+fn dyn_type_name() {
+ trait Foo {
+ type Bar;
+ }
+
+ assert_eq!(
+ "dyn core::ops::function::Fn(i32, i32) -> i32",
+ std::any::type_name::<dyn Fn(i32, i32) -> i32>()
+ );
+ assert_eq!(
+ "dyn coretests::any::dyn_type_name::Foo<Bar = i32> \
+ + core::marker::Send + core::marker::Sync",
+ std::any::type_name::<dyn Foo<Bar = i32> + Send + Sync>()
+ );
+}
+
// Test the `Provider` API.
struct SomeConcreteType {
diff --git a/library/core/tests/fmt/float.rs b/library/core/tests/fmt/float.rs
index 47a7400f7..003782f34 100644
--- a/library/core/tests/fmt/float.rs
+++ b/library/core/tests/fmt/float.rs
@@ -5,7 +5,7 @@ fn test_format_f64() {
assert_eq!("10", format!("{:.0}", 9.9f64));
assert_eq!("9.8", format!("{:.1}", 9.849f64));
assert_eq!("9.9", format!("{:.1}", 9.851f64));
- assert_eq!("1", format!("{:.0}", 0.5f64));
+ assert_eq!("0", format!("{:.0}", 0.5f64));
assert_eq!("1.23456789e6", format!("{:e}", 1234567.89f64));
assert_eq!("1.23456789e3", format!("{:e}", 1234.56789f64));
assert_eq!("1.23456789E6", format!("{:E}", 1234567.89f64));
@@ -25,13 +25,73 @@ fn test_format_f64() {
}
#[test]
+fn test_format_f64_rounds_ties_to_even() {
+ assert_eq!("0", format!("{:.0}", 0.5f64));
+ assert_eq!("2", format!("{:.0}", 1.5f64));
+ assert_eq!("2", format!("{:.0}", 2.5f64));
+ assert_eq!("4", format!("{:.0}", 3.5f64));
+ assert_eq!("4", format!("{:.0}", 4.5f64));
+ assert_eq!("6", format!("{:.0}", 5.5f64));
+ assert_eq!("128", format!("{:.0}", 127.5f64));
+ assert_eq!("128", format!("{:.0}", 128.5f64));
+ assert_eq!("0.2", format!("{:.1}", 0.25f64));
+ assert_eq!("0.8", format!("{:.1}", 0.75f64));
+ assert_eq!("0.12", format!("{:.2}", 0.125f64));
+ assert_eq!("0.88", format!("{:.2}", 0.875f64));
+ assert_eq!("0.062", format!("{:.3}", 0.062f64));
+ assert_eq!("-0", format!("{:.0}", -0.5f64));
+ assert_eq!("-2", format!("{:.0}", -1.5f64));
+ assert_eq!("-2", format!("{:.0}", -2.5f64));
+ assert_eq!("-4", format!("{:.0}", -3.5f64));
+ assert_eq!("-4", format!("{:.0}", -4.5f64));
+ assert_eq!("-6", format!("{:.0}", -5.5f64));
+ assert_eq!("-128", format!("{:.0}", -127.5f64));
+ assert_eq!("-128", format!("{:.0}", -128.5f64));
+ assert_eq!("-0.2", format!("{:.1}", -0.25f64));
+ assert_eq!("-0.8", format!("{:.1}", -0.75f64));
+ assert_eq!("-0.12", format!("{:.2}", -0.125f64));
+ assert_eq!("-0.88", format!("{:.2}", -0.875f64));
+ assert_eq!("-0.062", format!("{:.3}", -0.062f64));
+
+ assert_eq!("2e0", format!("{:.0e}", 1.5f64));
+ assert_eq!("2e0", format!("{:.0e}", 2.5f64));
+ assert_eq!("4e0", format!("{:.0e}", 3.5f64));
+ assert_eq!("4e0", format!("{:.0e}", 4.5f64));
+ assert_eq!("6e0", format!("{:.0e}", 5.5f64));
+ assert_eq!("1.28e2", format!("{:.2e}", 127.5f64));
+ assert_eq!("1.28e2", format!("{:.2e}", 128.5f64));
+ assert_eq!("-2e0", format!("{:.0e}", -1.5f64));
+ assert_eq!("-2e0", format!("{:.0e}", -2.5f64));
+ assert_eq!("-4e0", format!("{:.0e}", -3.5f64));
+ assert_eq!("-4e0", format!("{:.0e}", -4.5f64));
+ assert_eq!("-6e0", format!("{:.0e}", -5.5f64));
+ assert_eq!("-1.28e2", format!("{:.2e}", -127.5f64));
+ assert_eq!("-1.28e2", format!("{:.2e}", -128.5f64));
+
+ assert_eq!("2E0", format!("{:.0E}", 1.5f64));
+ assert_eq!("2E0", format!("{:.0E}", 2.5f64));
+ assert_eq!("4E0", format!("{:.0E}", 3.5f64));
+ assert_eq!("4E0", format!("{:.0E}", 4.5f64));
+ assert_eq!("6E0", format!("{:.0E}", 5.5f64));
+ assert_eq!("1.28E2", format!("{:.2E}", 127.5f64));
+ assert_eq!("1.28E2", format!("{:.2E}", 128.5f64));
+ assert_eq!("-2E0", format!("{:.0E}", -1.5f64));
+ assert_eq!("-2E0", format!("{:.0E}", -2.5f64));
+ assert_eq!("-4E0", format!("{:.0E}", -3.5f64));
+ assert_eq!("-4E0", format!("{:.0E}", -4.5f64));
+ assert_eq!("-6E0", format!("{:.0E}", -5.5f64));
+ assert_eq!("-1.28E2", format!("{:.2E}", -127.5f64));
+ assert_eq!("-1.28E2", format!("{:.2E}", -128.5f64));
+}
+
+#[test]
fn test_format_f32() {
assert_eq!("1", format!("{:.0}", 1.0f32));
assert_eq!("9", format!("{:.0}", 9.4f32));
assert_eq!("10", format!("{:.0}", 9.9f32));
assert_eq!("9.8", format!("{:.1}", 9.849f32));
assert_eq!("9.9", format!("{:.1}", 9.851f32));
- assert_eq!("1", format!("{:.0}", 0.5f32));
+ assert_eq!("0", format!("{:.0}", 0.5f32));
assert_eq!("1.2345679e6", format!("{:e}", 1234567.89f32));
assert_eq!("1.2345679e3", format!("{:e}", 1234.56789f32));
assert_eq!("1.2345679E6", format!("{:E}", 1234567.89f32));
@@ -50,6 +110,66 @@ fn test_format_f32() {
assert_eq!("1234.6", format!("{:.1?}", 1234.56789f32));
}
+#[test]
+fn test_format_f32_rounds_ties_to_even() {
+ assert_eq!("0", format!("{:.0}", 0.5f32));
+ assert_eq!("2", format!("{:.0}", 1.5f32));
+ assert_eq!("2", format!("{:.0}", 2.5f32));
+ assert_eq!("4", format!("{:.0}", 3.5f32));
+ assert_eq!("4", format!("{:.0}", 4.5f32));
+ assert_eq!("6", format!("{:.0}", 5.5f32));
+ assert_eq!("128", format!("{:.0}", 127.5f32));
+ assert_eq!("128", format!("{:.0}", 128.5f32));
+ assert_eq!("0.2", format!("{:.1}", 0.25f32));
+ assert_eq!("0.8", format!("{:.1}", 0.75f32));
+ assert_eq!("0.12", format!("{:.2}", 0.125f32));
+ assert_eq!("0.88", format!("{:.2}", 0.875f32));
+ assert_eq!("0.062", format!("{:.3}", 0.062f32));
+ assert_eq!("-0", format!("{:.0}", -0.5f32));
+ assert_eq!("-2", format!("{:.0}", -1.5f32));
+ assert_eq!("-2", format!("{:.0}", -2.5f32));
+ assert_eq!("-4", format!("{:.0}", -3.5f32));
+ assert_eq!("-4", format!("{:.0}", -4.5f32));
+ assert_eq!("-6", format!("{:.0}", -5.5f32));
+ assert_eq!("-128", format!("{:.0}", -127.5f32));
+ assert_eq!("-128", format!("{:.0}", -128.5f32));
+ assert_eq!("-0.2", format!("{:.1}", -0.25f32));
+ assert_eq!("-0.8", format!("{:.1}", -0.75f32));
+ assert_eq!("-0.12", format!("{:.2}", -0.125f32));
+ assert_eq!("-0.88", format!("{:.2}", -0.875f32));
+ assert_eq!("-0.062", format!("{:.3}", -0.062f32));
+
+ assert_eq!("2e0", format!("{:.0e}", 1.5f32));
+ assert_eq!("2e0", format!("{:.0e}", 2.5f32));
+ assert_eq!("4e0", format!("{:.0e}", 3.5f32));
+ assert_eq!("4e0", format!("{:.0e}", 4.5f32));
+ assert_eq!("6e0", format!("{:.0e}", 5.5f32));
+ assert_eq!("1.28e2", format!("{:.2e}", 127.5f32));
+ assert_eq!("1.28e2", format!("{:.2e}", 128.5f32));
+ assert_eq!("-2e0", format!("{:.0e}", -1.5f32));
+ assert_eq!("-2e0", format!("{:.0e}", -2.5f32));
+ assert_eq!("-4e0", format!("{:.0e}", -3.5f32));
+ assert_eq!("-4e0", format!("{:.0e}", -4.5f32));
+ assert_eq!("-6e0", format!("{:.0e}", -5.5f32));
+ assert_eq!("-1.28e2", format!("{:.2e}", -127.5f32));
+ assert_eq!("-1.28e2", format!("{:.2e}", -128.5f32));
+
+ assert_eq!("2E0", format!("{:.0E}", 1.5f32));
+ assert_eq!("2E0", format!("{:.0E}", 2.5f32));
+ assert_eq!("4E0", format!("{:.0E}", 3.5f32));
+ assert_eq!("4E0", format!("{:.0E}", 4.5f32));
+ assert_eq!("6E0", format!("{:.0E}", 5.5f32));
+ assert_eq!("1.28E2", format!("{:.2E}", 127.5f32));
+ assert_eq!("1.28E2", format!("{:.2E}", 128.5f32));
+ assert_eq!("-2E0", format!("{:.0E}", -1.5f32));
+ assert_eq!("-2E0", format!("{:.0E}", -2.5f32));
+ assert_eq!("-4E0", format!("{:.0E}", -3.5f32));
+ assert_eq!("-4E0", format!("{:.0E}", -4.5f32));
+ assert_eq!("-6E0", format!("{:.0E}", -5.5f32));
+ assert_eq!("-1.28E2", format!("{:.2E}", -127.5f32));
+ assert_eq!("-1.28E2", format!("{:.2E}", -128.5f32));
+}
+
fn is_exponential(s: &str) -> bool {
s.contains("e") || s.contains("E")
}
diff --git a/library/core/tests/hash/mod.rs b/library/core/tests/hash/mod.rs
index f7934d062..267245f05 100644
--- a/library/core/tests/hash/mod.rs
+++ b/library/core/tests/hash/mod.rs
@@ -9,16 +9,19 @@ struct MyHasher {
hash: u64,
}
-impl Default for MyHasher {
+impl const Default for MyHasher {
fn default() -> MyHasher {
MyHasher { hash: 0 }
}
}
-impl Hasher for MyHasher {
+impl const Hasher for MyHasher {
fn write(&mut self, buf: &[u8]) {
- for byte in buf {
- self.hash += *byte as u64;
+ // FIXME(const_trait_impl): change to for loop
+ let mut i = 0;
+ while i < buf.len() {
+ self.hash += buf[i] as u64;
+ i += 1;
}
}
fn write_str(&mut self, s: &str) {
@@ -32,12 +35,25 @@ impl Hasher for MyHasher {
#[test]
fn test_writer_hasher() {
- fn hash<T: Hash>(t: &T) -> u64 {
+ const fn hash<T: ~const Hash>(t: &T) -> u64 {
let mut s = MyHasher { hash: 0 };
t.hash(&mut s);
s.finish()
}
+ const {
+ // FIXME(fee1-dead): assert_eq
+ assert!(hash(&()) == 0);
+ assert!(hash(&5_u8) == 5);
+ assert!(hash(&5_u16) == 5);
+ assert!(hash(&5_u32) == 5);
+
+ assert!(hash(&'a') == 97);
+
+ let s: &str = "a";
+ assert!(hash(&s) == 97 + 0xFF);
+ };
+
assert_eq!(hash(&()), 0);
assert_eq!(hash(&5_u8), 5);
@@ -97,7 +113,7 @@ struct CustomHasher {
output: u64,
}
-impl Hasher for CustomHasher {
+impl const Hasher for CustomHasher {
fn finish(&self) -> u64 {
self.output
}
@@ -109,27 +125,29 @@ impl Hasher for CustomHasher {
}
}
-impl Default for CustomHasher {
+impl const Default for CustomHasher {
fn default() -> CustomHasher {
CustomHasher { output: 0 }
}
}
-impl Hash for Custom {
- fn hash<H: Hasher>(&self, state: &mut H) {
+impl const Hash for Custom {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u64(self.hash);
}
}
#[test]
fn test_custom_state() {
- fn hash<T: Hash>(t: &T) -> u64 {
+ const fn hash<T: ~const Hash>(t: &T) -> u64 {
let mut c = CustomHasher { output: 0 };
t.hash(&mut c);
c.finish()
}
assert_eq!(hash(&Custom { hash: 5 }), 5);
+
+ const { assert!(hash(&Custom { hash: 6 }) == 6) };
}
// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
diff --git a/library/core/tests/hash/sip.rs b/library/core/tests/hash/sip.rs
index 877d08418..3abf6efcf 100644
--- a/library/core/tests/hash/sip.rs
+++ b/library/core/tests/hash/sip.rs
@@ -8,7 +8,6 @@ use core::{mem, slice};
struct Bytes<'a>(&'a [u8]);
impl<'a> Hash for Bytes<'a> {
- #[allow(unused_must_use)]
fn hash<H: Hasher>(&self, state: &mut H) {
let Bytes(v) = *self;
state.write(v);
@@ -25,6 +24,20 @@ fn hash<T: Hash>(x: &T) -> u64 {
}
#[test]
+const fn test_const_sip() {
+ let val1 = 0x45;
+ let val2 = 0xfeed;
+
+ const fn const_hash<T: ~const Hash>(x: &T) -> u64 {
+ let mut st = SipHasher::new();
+ x.hash(&mut st);
+ st.finish()
+ }
+
+ assert!(const_hash(&(val1)) != const_hash(&(val2)));
+}
+
+#[test]
#[allow(unused_must_use)]
fn test_siphash_1_3() {
let vecs: [[u8; 8]; 64] = [
diff --git a/library/core/tests/iter/adapters/array_chunks.rs b/library/core/tests/iter/adapters/array_chunks.rs
index 4e9d89e1e..ef4a7e53b 100644
--- a/library/core/tests/iter/adapters/array_chunks.rs
+++ b/library/core/tests/iter/adapters/array_chunks.rs
@@ -139,7 +139,8 @@ fn test_iterator_array_chunks_fold() {
let result =
(0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>().fold(0, |acc, _item| acc + 1);
assert_eq!(result, 3);
- assert_eq!(count.get(), 10);
+ // fold impls may or may not process the remainder
+ assert!(count.get() <= 10 && count.get() >= 9);
}
#[test]
diff --git a/library/core/tests/iter/adapters/take.rs b/library/core/tests/iter/adapters/take.rs
index bfb659f0a..3e26b43a2 100644
--- a/library/core/tests/iter/adapters/take.rs
+++ b/library/core/tests/iter/adapters/take.rs
@@ -146,3 +146,23 @@ fn test_take_try_folds() {
assert_eq!(iter.try_for_each(Err), Err(2));
assert_eq!(iter.try_for_each(Err), Ok(()));
}
+
+#[test]
+fn test_byref_take_consumed_items() {
+ let mut inner = 10..90;
+
+ let mut count = 0;
+ inner.by_ref().take(0).for_each(|_| count += 1);
+ assert_eq!(count, 0);
+ assert_eq!(inner, 10..90);
+
+ let mut count = 0;
+ inner.by_ref().take(10).for_each(|_| count += 1);
+ assert_eq!(count, 10);
+ assert_eq!(inner, 20..90);
+
+ let mut count = 0;
+ inner.by_ref().take(100).for_each(|_| count += 1);
+ assert_eq!(count, 70);
+ assert_eq!(inner, 90..90);
+}
diff --git a/library/core/tests/iter/sources.rs b/library/core/tests/iter/sources.rs
index d0114ade6..a15f3a514 100644
--- a/library/core/tests/iter/sources.rs
+++ b/library/core/tests/iter/sources.rs
@@ -106,3 +106,52 @@ fn test_empty() {
let mut it = empty::<i32>();
assert_eq!(it.next(), None);
}
+
+#[test]
+fn test_repeat_n_drop() {
+ #[derive(Clone, Debug)]
+ struct DropCounter<'a>(&'a Cell<usize>);
+ impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1);
+ }
+ }
+
+ // `repeat_n(x, 0)` drops `x` immediately
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let mut it = repeat_n(item, 0);
+ assert_eq!(count.get(), 1);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 1);
+ drop(it);
+ assert_eq!(count.get(), 1);
+
+ // Dropping the iterator needs to drop the item if it's non-empty
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let it = repeat_n(item, 3);
+ assert_eq!(count.get(), 0);
+ drop(it);
+ assert_eq!(count.get(), 1);
+
+ // Dropping the iterator doesn't drop the item if it was exhausted
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let mut it = repeat_n(item, 3);
+ assert_eq!(count.get(), 0);
+ let x0 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ let x1 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ let x2 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 0);
+ drop(it);
+ assert_eq!(count.get(), 0);
+ drop((x0, x1, x2));
+ assert_eq!(count.get(), 3);
+}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 51f858ade..99d4a40c4 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -4,18 +4,22 @@
#![feature(array_windows)]
#![feature(bigint_helper_methods)]
#![feature(cell_update)]
+#![feature(const_align_offset)]
#![feature(const_assume)]
+#![feature(const_align_of_val_raw)]
#![feature(const_black_box)]
#![feature(const_bool_to_option)]
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_convert)]
+#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_assume_init_read)]
#![feature(const_nonnull_new)]
#![feature(const_num_from_num)]
#![feature(const_pointer_byte_offsets)]
+#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
@@ -42,6 +46,7 @@
#![feature(try_find)]
#![feature(inline_const)]
#![feature(is_sorted)]
+#![feature(layout_for_ptr)]
#![feature(pattern)]
#![feature(pin_macro)]
#![feature(sort_internals)]
@@ -62,7 +67,6 @@
#![feature(try_trait_v2)]
#![feature(slice_internals)]
#![feature(slice_partition_dedup)]
-#![feature(int_log)]
#![feature(iter_advance_by)]
#![feature(iter_array_chunks)]
#![feature(iter_collect_into)]
@@ -71,6 +75,7 @@
#![feature(iter_is_partitioned)]
#![feature(iter_next_chunk)]
#![feature(iter_order_by)]
+#![feature(iter_repeat_n)]
#![feature(iterator_try_collect)]
#![feature(iterator_try_reduce)]
#![feature(const_mut_refs)]
@@ -79,6 +84,7 @@
#![feature(never_type)]
#![feature(unwrap_infallible)]
#![feature(pointer_byte_offsets)]
+#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(ptr_metadata)]
#![feature(once_cell)]
@@ -102,7 +108,9 @@
#![feature(provide_any)]
#![feature(utf8_chunks)]
#![feature(is_ascii_octdigit)]
+#![feature(get_many_mut)]
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
extern crate test;
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 0362e1c8a..1cfb4fd9f 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -1,4 +1,5 @@
use core::mem::*;
+use core::ptr;
#[cfg(panic = "unwind")]
use std::rc::Rc;
@@ -76,6 +77,25 @@ fn align_of_val_basic() {
}
#[test]
+#[cfg(not(bootstrap))] // stage 0 doesn't have the fix yet, so the test fails
+fn align_of_val_raw_packed() {
+ #[repr(C, packed)]
+ struct B {
+ f: [u32],
+ }
+ let storage = [0u8; 4];
+ let b: *const B = ptr::from_raw_parts(storage.as_ptr().cast(), 1);
+ assert_eq!(unsafe { align_of_val_raw(b) }, 1);
+
+ const ALIGN_OF_VAL_RAW: usize = {
+ let storage = [0u8; 4];
+ let b: *const B = ptr::from_raw_parts(storage.as_ptr().cast(), 1);
+ unsafe { align_of_val_raw(b) }
+ };
+ assert_eq!(ALIGN_OF_VAL_RAW, 1);
+}
+
+#[test]
fn test_swap() {
let mut x = 31337;
let mut y = 42;
diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
index 798473bbd..30843cc3d 100644
--- a/library/core/tests/num/flt2dec/mod.rs
+++ b/library/core/tests/num/flt2dec/mod.rs
@@ -138,7 +138,7 @@ where
// check exact rounding for zero- and negative-width cases
let start;
- if expected[0] >= b'5' {
+ if expected[0] > b'5' {
try_fixed!(f(&decoded) => &mut buf, expectedk, b"1", expectedk + 1;
"zero-width rounding-up mismatch for v={v}: \
actual {actual:?}, expected {expected:?}",
@@ -1007,7 +1007,7 @@ where
assert_eq!(to_string(f, 999.5, Minus, 3), "999.500");
assert_eq!(to_string(f, 999.5, Minus, 30), "999.500000000000000000000000000000");
- assert_eq!(to_string(f, 0.5, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.5, Minus, 0), "0");
assert_eq!(to_string(f, 0.5, Minus, 1), "0.5");
assert_eq!(to_string(f, 0.5, Minus, 2), "0.50");
assert_eq!(to_string(f, 0.5, Minus, 3), "0.500");
diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs
index f36f7c268..dca6321cf 100644
--- a/library/core/tests/option.rs
+++ b/library/core/tests/option.rs
@@ -57,7 +57,7 @@ fn test_get_resource() {
}
#[test]
-#[cfg_attr(not(bootstrap), allow(for_loops_over_fallibles))]
+#[allow(for_loops_over_fallibles)]
fn test_option_dance() {
let x = Some(());
let mut y = Some(5);
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index 97a369810..90bc83510 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -359,6 +359,23 @@ fn align_offset_zst() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_zst_const() {
+ const {
+ // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
+ // all, because no amount of elements will align the pointer.
+ let mut p = 1;
+ while p < 1024 {
+ assert!(ptr::invalid::<()>(p).align_offset(p) == 0);
+ if p != 1 {
+ assert!(ptr::invalid::<()>(p + 1).align_offset(p) == !0);
+ }
+ p = (p + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
fn align_offset_stride_one() {
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
// number of bytes.
@@ -380,6 +397,26 @@ fn align_offset_stride_one() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_stride_one_const() {
+ const {
+ // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
+ // number of bytes.
+ let mut align = 1;
+ while align < 1024 {
+ let mut ptr = 1;
+ while ptr < 2 * align {
+ let expected = ptr % align;
+ let offset = if expected == 0 { 0 } else { align - expected };
+ assert!(ptr::invalid::<u8>(ptr).align_offset(align) == offset);
+ ptr += 1;
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
fn align_offset_various_strides() {
unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
let numptr = ptr as usize;
@@ -456,6 +493,260 @@ fn align_offset_various_strides() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_various_strides_const() {
+ const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
+ let mut expected = usize::MAX;
+ // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
+ let mut el = 0;
+ while el < align {
+ if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
+ expected = el;
+ break;
+ }
+ el += 1;
+ }
+ let got = ptr.align_offset(align);
+ assert!(got == expected);
+ }
+
+ const {
+ // For pointers of stride != 1, we verify the algorithm against the naivest possible
+ // implementation
+ let mut align = 1;
+ let limit = 32;
+ while align < limit {
+ let mut ptr = 1;
+ while ptr < 4 * align {
+ unsafe {
+ #[repr(packed)]
+ struct A3(u16, u8);
+ test_stride::<A3>(ptr::invalid::<A3>(ptr), ptr, align);
+
+ struct A4(u32);
+ test_stride::<A4>(ptr::invalid::<A4>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A5(u32, u8);
+ test_stride::<A5>(ptr::invalid::<A5>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A6(u32, u16);
+ test_stride::<A6>(ptr::invalid::<A6>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A7(u32, u16, u8);
+ test_stride::<A7>(ptr::invalid::<A7>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A8(u32, u32);
+ test_stride::<A8>(ptr::invalid::<A8>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A9(u32, u32, u8);
+ test_stride::<A9>(ptr::invalid::<A9>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A10(u32, u32, u16);
+ test_stride::<A10>(ptr::invalid::<A10>(ptr), ptr, align);
+
+ test_stride::<u32>(ptr::invalid::<u32>(ptr), ptr, align);
+ test_stride::<u128>(ptr::invalid::<u128>(ptr), ptr, align);
+ }
+ ptr += 1;
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_with_provenance_const() {
+ const {
+ // On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
+ #[repr(align(4))]
+ struct AlignedI32(i32);
+
+ let data = AlignedI32(42);
+
+ // `stride % align == 0` (usual case)
+
+ let ptr: *const i32 = &data.0;
+ assert!(ptr.align_offset(1) == 0);
+ assert!(ptr.align_offset(2) == 0);
+ assert!(ptr.align_offset(4) == 0);
+ assert!(ptr.align_offset(8) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX);
+
+ assert!(ptr.wrapping_add(42).align_offset(4) == 0);
+ assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX);
+
+ let ptr1: *const i8 = ptr.cast();
+ assert!(ptr1.align_offset(1) == 0);
+ assert!(ptr1.align_offset(2) == 0);
+ assert!(ptr1.align_offset(4) == 0);
+ assert!(ptr1.align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+
+ let ptr2: *const i16 = ptr.cast();
+ assert!(ptr2.align_offset(1) == 0);
+ assert!(ptr2.align_offset(2) == 0);
+ assert!(ptr2.align_offset(4) == 0);
+ assert!(ptr2.align_offset(8) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX);
+
+ let ptr3: *const i64 = ptr.cast();
+ assert!(ptr3.align_offset(1) == 0);
+ assert!(ptr3.align_offset(2) == 0);
+ assert!(ptr3.align_offset(4) == 0);
+ assert!(ptr3.align_offset(8) == usize::MAX);
+ assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+
+ // `stride % align != 0` (edge case)
+
+ let ptr4: *const [u8; 3] = ptr.cast();
+ assert!(ptr4.align_offset(1) == 0);
+ assert!(ptr4.align_offset(2) == 0);
+ assert!(ptr4.align_offset(4) == 0);
+ assert!(ptr4.align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(4) == 1);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(4) == 3);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+
+ let ptr5: *const [u8; 5] = ptr.cast();
+ assert!(ptr5.align_offset(1) == 0);
+ assert!(ptr5.align_offset(2) == 0);
+ assert!(ptr5.align_offset(4) == 0);
+ assert!(ptr5.align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(4) == 3);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(4) == 1);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+ }
+}
+
+#[test]
+fn align_offset_issue_103361() {
+ #[cfg(target_pointer_width = "64")]
+ const SIZE: usize = 1 << 47;
+ #[cfg(target_pointer_width = "32")]
+ const SIZE: usize = 1 << 30;
+ #[cfg(target_pointer_width = "16")]
+ const SIZE: usize = 1 << 13;
+ struct HugeSize([u8; SIZE - 1]);
+ let _ = ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE);
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_issue_103361_const() {
+ #[cfg(target_pointer_width = "64")]
+ const SIZE: usize = 1 << 47;
+ #[cfg(target_pointer_width = "32")]
+ const SIZE: usize = 1 << 30;
+ #[cfg(target_pointer_width = "16")]
+ const SIZE: usize = 1 << 13;
+ struct HugeSize([u8; SIZE - 1]);
+
+ const {
+ assert!(ptr::invalid::<HugeSize>(SIZE - 1).align_offset(SIZE) == SIZE - 1);
+ assert!(ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE) == 0);
+ assert!(ptr::invalid::<HugeSize>(SIZE + 1).align_offset(SIZE) == 1);
+ }
+}
+
+#[test]
+fn is_aligned() {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ assert!(ptr.is_aligned());
+ assert!(ptr.is_aligned_to(1));
+ assert!(ptr.is_aligned_to(2));
+ assert!(ptr.is_aligned_to(4));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+
+ // At runtime either `ptr` or `ptr+1` is aligned to 8.
+ assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn is_aligned_const() {
+ const {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ assert!(ptr.is_aligned());
+ assert!(ptr.is_aligned_to(1));
+ assert!(ptr.is_aligned_to(2));
+ assert!(ptr.is_aligned_to(4));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+
+ // At comptime neither `ptr` nor `ptr+1` is aligned to 8.
+ assert!(!ptr.is_aligned_to(8));
+ assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ }
+}
+
+#[test]
+#[cfg(bootstrap)]
+fn is_aligned_const() {
+ const {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ // The bootstrap compiler always returns false for is_aligned.
+ assert!(!ptr.is_aligned());
+ assert!(!ptr.is_aligned_to(1));
+ }
+}
+
+#[test]
fn offset_from() {
let mut a = [0; 5];
let ptr1: *mut i32 = &mut a[1];
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 9e1fbea79..4e06e0f43 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -2595,3 +2595,63 @@ fn test_flatten_mut_size_overflow() {
let x = &mut [[(); usize::MAX]; 2][..];
let _ = x.flatten_mut();
}
+
+#[test]
+fn test_get_many_mut_normal_2() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a, b] = v.get_many_mut([3, 0]).unwrap();
+ *a += 10;
+ *b += 100;
+ assert_eq!(v, vec![101, 2, 3, 14, 5]);
+}
+
+#[test]
+fn test_get_many_mut_normal_3() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a, b, c] = v.get_many_mut([0, 4, 2]).unwrap();
+ *a += 10;
+ *b += 100;
+ *c += 1000;
+ assert_eq!(v, vec![11, 2, 1003, 4, 105]);
+}
+
+#[test]
+fn test_get_many_mut_empty() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [] = v.get_many_mut([]).unwrap();
+ assert_eq!(v, vec![1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_get_many_mut_single_first() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a] = v.get_many_mut([0]).unwrap();
+ *a += 10;
+ assert_eq!(v, vec![11, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_get_many_mut_single_last() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a] = v.get_many_mut([4]).unwrap();
+ *a += 10;
+ assert_eq!(v, vec![1, 2, 3, 4, 15]);
+}
+
+#[test]
+fn test_get_many_mut_oob_nonempty() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ assert!(v.get_many_mut([5]).is_err());
+}
+
+#[test]
+fn test_get_many_mut_oob_empty() {
+ let mut v: Vec<i32> = vec![];
+ assert!(v.get_many_mut([0]).is_err());
+}
+
+#[test]
+fn test_get_many_mut_duplicate() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ assert!(v.get_many_mut([1, 3, 3, 4]).is_err());
+}