summaryrefslogtreecommitdiffstats
path: root/library/core/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
commit9918693037dce8aa4bb6f08741b6812923486c18 (patch)
tree21d2b40bec7e6a7ea664acee056eb3d08e15a1cf /library/core/src
parentReleasing progress-linux version 1.75.0+dfsg1-5~progress7.99u1. (diff)
downloadrustc-9918693037dce8aa4bb6f08741b6812923486c18.tar.xz
rustc-9918693037dce8aa4bb6f08741b6812923486c18.zip
Merging upstream version 1.76.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src')
-rw-r--r--library/core/src/alloc/layout.rs6
-rw-r--r--library/core/src/any.rs44
-rw-r--r--library/core/src/array/iter.rs2
-rw-r--r--library/core/src/array/mod.rs4
-rw-r--r--library/core/src/async_iter/async_iter.rs25
-rw-r--r--library/core/src/cell.rs11
-rw-r--r--library/core/src/char/methods.rs1
-rw-r--r--library/core/src/clone.rs2
-rw-r--r--library/core/src/cmp.rs14
-rw-r--r--library/core/src/convert/num.rs2
-rw-r--r--library/core/src/escape.rs16
-rw-r--r--library/core/src/ffi/c_str.rs4
-rw-r--r--library/core/src/ffi/mod.rs6
-rw-r--r--library/core/src/fmt/num.rs17
-rw-r--r--library/core/src/future/future.rs1
-rw-r--r--library/core/src/hash/mod.rs29
-rw-r--r--library/core/src/hash/sip.rs26
-rw-r--r--library/core/src/internal_macros.rs4
-rw-r--r--library/core/src/intrinsics.rs49
-rw-r--r--library/core/src/intrinsics/mir.rs90
-rw-r--r--library/core/src/intrinsics/simd.rs473
-rw-r--r--library/core/src/iter/adapters/array_chunks.rs34
-rw-r--r--library/core/src/iter/adapters/chain.rs2
-rw-r--r--library/core/src/iter/adapters/copied.rs2
-rw-r--r--library/core/src/iter/adapters/enumerate.rs10
-rw-r--r--library/core/src/iter/adapters/filter.rs11
-rw-r--r--library/core/src/iter/adapters/filter_map.rs18
-rw-r--r--library/core/src/iter/adapters/flatten.rs125
-rw-r--r--library/core/src/iter/adapters/fuse.rs24
-rw-r--r--library/core/src/iter/adapters/inspect.rs11
-rw-r--r--library/core/src/iter/adapters/map.rs11
-rw-r--r--library/core/src/iter/adapters/map_while.rs7
-rw-r--r--library/core/src/iter/adapters/map_windows.rs2
-rw-r--r--library/core/src/iter/adapters/mod.rs13
-rw-r--r--library/core/src/iter/adapters/scan.rs7
-rw-r--r--library/core/src/iter/adapters/skip.rs9
-rw-r--r--library/core/src/iter/adapters/skip_while.rs12
-rw-r--r--library/core/src/iter/adapters/step_by.rs6
-rw-r--r--library/core/src/iter/adapters/take.rs11
-rw-r--r--library/core/src/iter/adapters/take_while.rs12
-rw-r--r--library/core/src/iter/adapters/zip.rs16
-rw-r--r--library/core/src/iter/mod.rs2
-rw-r--r--library/core/src/iter/sources/from_coroutine.rs3
-rw-r--r--library/core/src/iter/traits/iterator.rs2
-rw-r--r--library/core/src/iter/traits/marker.rs28
-rw-r--r--library/core/src/iter/traits/mod.rs2
-rw-r--r--library/core/src/lib.rs16
-rw-r--r--library/core/src/mem/maybe_uninit.rs5
-rw-r--r--library/core/src/mem/mod.rs7
-rw-r--r--library/core/src/net/ip_addr.rs54
-rw-r--r--library/core/src/num/f32.rs14
-rw-r--r--library/core/src/num/f64.rs14
-rw-r--r--library/core/src/num/mod.rs6
-rw-r--r--library/core/src/num/nonzero.rs66
-rw-r--r--library/core/src/ops/arith.rs10
-rw-r--r--library/core/src/ops/coroutine.rs10
-rw-r--r--library/core/src/ops/function.rs12
-rw-r--r--library/core/src/ops/index_range.rs13
-rw-r--r--library/core/src/option.rs21
-rw-r--r--library/core/src/panic.rs30
-rw-r--r--library/core/src/panicking.rs66
-rw-r--r--library/core/src/pin.rs6
-rw-r--r--library/core/src/primitive_docs.rs113
-rw-r--r--library/core/src/ptr/alignment.rs57
-rw-r--r--library/core/src/ptr/const_ptr.rs59
-rw-r--r--library/core/src/ptr/mod.rs110
-rw-r--r--library/core/src/ptr/mut_ptr.rs50
-rw-r--r--library/core/src/ptr/non_null.rs1049
-rw-r--r--library/core/src/ptr/unique.rs1
-rw-r--r--library/core/src/result.rs8
-rw-r--r--library/core/src/slice/ascii.rs3
-rw-r--r--library/core/src/slice/index.rs72
-rw-r--r--library/core/src/slice/iter.rs2
-rw-r--r--library/core/src/slice/memchr.rs18
-rw-r--r--library/core/src/slice/mod.rs116
-rw-r--r--library/core/src/str/iter.rs50
-rw-r--r--library/core/src/str/mod.rs79
-rw-r--r--library/core/src/str/pattern.rs20
-rw-r--r--library/core/src/str/traits.rs46
-rw-r--r--library/core/src/task/poll.rs1
-rw-r--r--library/core/src/task/wake.rs8
-rw-r--r--library/core/src/time.rs21
-rw-r--r--library/core/src/tuple.rs158
83 files changed, 3031 insertions, 566 deletions
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 65946e09f..9ef0a7d76 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -450,7 +450,11 @@ impl Layout {
return Err(LayoutError);
}
- let array_size = element_size * n;
+ // SAFETY: We just checked that we won't overflow `usize` when we multiply.
+ // This is a useless hint inside this function, but after inlining this helps
+ // deduplicate checks for whether the overall capacity is zero (e.g., in RawVec's
+ // allocation path) before/after this multiplication.
+ let array_size = unsafe { element_size.unchecked_mul(n) };
// SAFETY: We just checked above that the `array_size` will not
// exceed `isize::MAX` even when rounded up to the alignment.
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 8f5404d97..e8f00e876 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -115,6 +115,11 @@ use crate::intrinsics;
pub trait Any: 'static {
/// Gets the `TypeId` of `self`.
///
+ /// If called on a `dyn Any` trait object
+ /// (or a trait object of a subtrait of `Any`),
+ /// this returns the `TypeId` of the underlying
+ /// concrete type, not that of `dyn Any` itself.
+ ///
/// # Examples
///
/// ```
@@ -690,44 +695,41 @@ pub const fn type_name<T: ?Sized>() -> &'static str {
intrinsics::type_name::<T>()
}
-/// Returns the name of the type of the pointed-to value as a string slice.
+/// Returns the type name of the pointed-to value as a string slice.
+///
/// This is the same as `type_name::<T>()`, but can be used where the type of a
/// variable is not easily available.
///
/// # Note
///
-/// This is intended for diagnostic use. The exact contents and format of the
-/// string are not specified, other than being a best-effort description of the
-/// type. For example, `type_name_of_val::<Option<String>>(None)` could return
-/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not
-/// `"foobar"`. In addition, the output may change between versions of the
-/// compiler.
+/// Like [`type_name`], this is intended for diagnostic use and the exact output is not
+/// guaranteed. It provides a best-effort description, but the output may change between
+/// versions of the compiler.
///
-/// This function does not resolve trait objects,
-/// meaning that `type_name_of_val(&7u32 as &dyn Debug)`
-/// may return `"dyn Debug"`, but not `"u32"`.
+/// In short: use this for debugging, avoid using the output to affect program behavior. More
+/// information is available at [`type_name`].
///
-/// The type name should not be considered a unique identifier of a type;
-/// multiple types may share the same type name.
-///
-/// The current implementation uses the same infrastructure as compiler
-/// diagnostics and debuginfo, but this is not guaranteed.
+/// Additionally, this function does not resolve trait objects. This means that
+/// `type_name_of_val(&7u32 as &dyn Debug)` may return `"dyn Debug"`, but will not return `"u32"`
+/// at this time.
///
/// # Examples
///
/// Prints the default integer and float types.
///
/// ```rust
-/// #![feature(type_name_of_val)]
/// use std::any::type_name_of_val;
///
-/// let x = 1;
-/// println!("{}", type_name_of_val(&x));
-/// let y = 1.0;
-/// println!("{}", type_name_of_val(&y));
+/// let s = "foo";
+/// let x: i32 = 1;
+/// let y: f32 = 1.0;
+///
+/// assert!(type_name_of_val(&s).contains("str"));
+/// assert!(type_name_of_val(&x).contains("i32"));
+/// assert!(type_name_of_val(&y).contains("f32"));
/// ```
#[must_use]
-#[unstable(feature = "type_name_of_val", issue = "66359")]
+#[stable(feature = "type_name_of_val", since = "1.76.0")]
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
pub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index 321357a15..2b22488b8 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -4,7 +4,7 @@ use crate::num::NonZeroUsize;
use crate::{
fmt,
intrinsics::transmute_unchecked,
- iter::{self, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce},
+ iter::{self, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce},
mem::MaybeUninit,
ops::{IndexRange, Range},
ptr,
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index ebd4a8c05..34213637a 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -206,7 +206,7 @@ where
#[inline]
fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
- <&Self>::try_from(slice).map(|r| *r)
+ <&Self>::try_from(slice).copied()
}
}
@@ -297,7 +297,7 @@ impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let a: [u8; 3] = [0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(a), b.hash_one(s));
diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs
index 12a47f9fc..8a45bd36f 100644
--- a/library/core/src/async_iter/async_iter.rs
+++ b/library/core/src/async_iter/async_iter.rs
@@ -13,6 +13,7 @@ use crate::task::{Context, Poll};
#[unstable(feature = "async_iterator", issue = "79024")]
#[must_use = "async iterators do nothing unless polled"]
#[doc(alias = "Stream")]
+#[cfg_attr(not(bootstrap), lang = "async_iterator")]
pub trait AsyncIterator {
/// The type of items yielded by the async iterator.
type Item;
@@ -109,3 +110,27 @@ where
(**self).size_hint()
}
}
+
+#[unstable(feature = "async_gen_internals", issue = "none")]
+impl<T> Poll<Option<T>> {
+ /// A helper function for internal desugaring -- produces `Ready(Some(t))`,
+ /// which corresponds to the async iterator yielding a value.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenReady")]
+ pub fn async_gen_ready(t: T) -> Self {
+ Poll::Ready(Some(t))
+ }
+
+ /// A helper constant for internal desugaring -- produces `Pending`,
+ /// which corresponds to the async iterator pending on an `.await`.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenPending")]
+ // FIXME(gen_blocks): This probably could be deduplicated.
+ pub const PENDING: Self = Poll::Pending;
+
+ /// A helper constant for internal desugaring -- produces `Ready(None)`,
+ /// which corresponds to the async iterator finishing its iteration.
+ #[unstable(feature = "async_gen_internals", issue = "none")]
+ #[cfg_attr(not(bootstrap), lang = "AsyncGenFinished")]
+ pub const FINISHED: Self = Poll::Ready(None);
+}
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 0978b3c92..030040ba0 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -143,17 +143,17 @@
//!
//! ```
//! # #![allow(dead_code)]
-//! use std::cell::RefCell;
+//! use std::cell::OnceCell;
//!
//! struct Graph {
//! edges: Vec<(i32, i32)>,
-//! span_tree_cache: RefCell<Option<Vec<(i32, i32)>>>
+//! span_tree_cache: OnceCell<Vec<(i32, i32)>>
//! }
//!
//! impl Graph {
//! fn minimum_spanning_tree(&self) -> Vec<(i32, i32)> {
-//! self.span_tree_cache.borrow_mut()
-//! .get_or_insert_with(|| self.calc_span_tree())
+//! self.span_tree_cache
+//! .get_or_init(|| self.calc_span_tree())
//! .clone()
//! }
//!
@@ -409,8 +409,7 @@ impl<T> Cell<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn set(&self, val: T) {
- let old = self.replace(val);
- drop(old);
+ self.replace(val);
}
/// Swaps the values of two `Cell`s.
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index 7ce33bdd4..a93b94867 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -1,6 +1,5 @@
//! impl char {}
-use crate::ascii;
use crate::slice;
use crate::str::from_utf8_unchecked_mut;
use crate::unicode::printable::is_printable;
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index d7ca9c22d..ba86334f9 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -210,8 +210,6 @@ pub struct AssertParamIsCopy<T: Copy + ?Sized> {
/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
/// in `rustc_trait_selection`.
mod impls {
- use super::Clone;
-
macro_rules! impl_clone {
($($t:ty)*) => {
$(
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index fadf2fcc9..d7c41ac5c 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -224,11 +224,13 @@ use self::Ordering::*;
append_const_msg
)]
#[rustc_diagnostic_item = "PartialEq"]
+#[cfg_attr(not(bootstrap), const_trait)]
pub trait PartialEq<Rhs: ?Sized = Self> {
/// This method tests for `self` and `other` values to be equal, and is used
/// by `==`.
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialeq_eq")]
fn eq(&self, other: &Rhs) -> bool;
/// This method tests for `!=`. The default implementation is almost always
@@ -236,6 +238,7 @@ pub trait PartialEq<Rhs: ?Sized = Self> {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialeq_ne")]
fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
@@ -1414,12 +1417,23 @@ mod impls {
macro_rules! partial_eq_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(bootstrap)]
impl PartialEq for $t {
#[inline]
fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
#[inline]
fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ #[cfg(not(bootstrap))]
+ impl const PartialEq for $t {
+ #[inline]
+ fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
+ #[inline]
+ fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
+ }
)*)
}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index b048b5135..08dc8f48d 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -1,4 +1,4 @@
-use super::{From, TryFrom};
+use super::TryFrom;
use crate::num::TryFromIntError;
mod private {
diff --git a/library/core/src/escape.rs b/library/core/src/escape.rs
index 24bb9ad1a..60b5df752 100644
--- a/library/core/src/escape.rs
+++ b/library/core/src/escape.rs
@@ -21,12 +21,16 @@ pub(crate) fn escape_ascii_into(output: &mut [ascii::Char; 4], byte: u8) -> Rang
b'\\' => backslash(ascii::Char::ReverseSolidus),
b'\'' => backslash(ascii::Char::Apostrophe),
b'\"' => backslash(ascii::Char::QuotationMark),
- _ => if let Some(a) = byte.as_ascii() && !byte.is_ascii_control() {
- ([a, ascii::Char::Null, ascii::Char::Null, ascii::Char::Null], 1)
- } else {
- let hi = HEX_DIGITS[usize::from(byte >> 4)];
- let lo = HEX_DIGITS[usize::from(byte & 0xf)];
- ([ascii::Char::ReverseSolidus, ascii::Char::SmallX, hi, lo], 4)
+ _ => {
+ if let Some(a) = byte.as_ascii()
+ && !byte.is_ascii_control()
+ {
+ ([a, ascii::Char::Null, ascii::Char::Null, ascii::Char::Null], 1)
+ } else {
+ let hi = HEX_DIGITS[usize::from(byte >> 4)];
+ let lo = HEX_DIGITS[usize::from(byte & 0xf)];
+ ([ascii::Char::ReverseSolidus, ascii::Char::SmallX, hi, lo], 4)
+ }
}
};
*output = data;
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index e7ec1fb73..bb839a71e 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -205,7 +205,7 @@ impl CStr {
/// * The memory pointed to by `ptr` must contain a valid nul terminator at the
/// end of the string.
///
- /// * `ptr` must be [valid] for reads of bytes up to and including the null terminator.
+ /// * `ptr` must be [valid] for reads of bytes up to and including the nul terminator.
/// This means in particular:
///
/// * The entire memory range of this `CStr` must be contained within a single allocated object!
@@ -415,7 +415,7 @@ impl CStr {
let mut i = bytes.len().saturating_sub(1);
assert!(!bytes.is_empty() && bytes[i] == 0, "input was not nul-terminated");
- // Ending null byte exists, skip to the rest.
+ // Ending nul byte exists, skip to the rest.
while i != 0 {
i -= 1;
let byte = bytes[i];
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index 6908c824f..7340ad90d 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -241,7 +241,6 @@ impl fmt::Debug for c_void {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -270,7 +269,6 @@ pub struct VaListImpl<'f> {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -395,7 +393,6 @@ pub struct VaList<'a, 'f: 'a> {
any(target_os = "macos", target_os = "ios", target_os = "tvos")
),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -413,7 +410,6 @@ pub struct VaList<'a, 'f: 'a> {
not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
),
not(target_family = "wasm"),
- not(target_arch = "asmjs"),
not(target_os = "uefi"),
not(windows),
))]
@@ -431,7 +427,6 @@ pub struct VaList<'a, 'f: 'a> {
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
- target_arch = "asmjs",
target_os = "uefi",
windows,
))]
@@ -461,7 +456,6 @@ impl<'f> VaListImpl<'f> {
not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
),
not(target_family = "wasm"),
- not(target_arch = "asmjs"),
not(target_os = "uefi"),
not(windows),
))]
diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs
index 4f42f73eb..ab2158394 100644
--- a/library/core/src/fmt/num.rs
+++ b/library/core/src/fmt/num.rs
@@ -15,7 +15,7 @@ trait DisplayInt:
fn zero() -> Self;
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
- fn to_u16(&self) -> u16;
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32;
fn to_u64(&self) -> u64;
fn to_u128(&self) -> u128;
@@ -27,7 +27,7 @@ macro_rules! impl_int {
fn zero() -> Self { 0 }
fn from_u8(u: u8) -> Self { u as Self }
fn to_u8(&self) -> u8 { *self as u8 }
- fn to_u16(&self) -> u16 { *self as u16 }
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
fn to_u128(&self) -> u128 { *self as u128 }
@@ -40,7 +40,7 @@ macro_rules! impl_uint {
fn zero() -> Self { 0 }
fn from_u8(u: u8) -> Self { u as Self }
fn to_u8(&self) -> u8 { *self as u8 }
- fn to_u16(&self) -> u16 { *self as u16 }
+ #[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
fn to_u128(&self) -> u128 { *self as u128 }
@@ -309,7 +309,6 @@ macro_rules! impl_Exp {
n /= 10;
exponent += 1;
}
-
let (added_precision, subtracted_precision) = match f.precision() {
Some(fmt_prec) => {
// number of decimal digits minus 1
@@ -331,9 +330,15 @@ macro_rules! impl_Exp {
let rem = n % 10;
n /= 10;
exponent += 1;
- // round up last digit
- if rem >= 5 {
+ // round up last digit, round to even on a tie
+ if rem > 5 || (rem == 5 && (n % 2 != 0 || subtracted_precision > 1 )) {
n += 1;
+ // if the digit is rounded to the next power
+ // instead adjust the exponent
+ if n.ilog10() > (n - 1).ilog10() {
+ n /= 10;
+ exponent += 1;
+ }
}
}
(n, exponent, exponent, added_precision)
diff --git a/library/core/src/future/future.rs b/library/core/src/future/future.rs
index 8c7111cb3..71b9464ef 100644
--- a/library/core/src/future/future.rs
+++ b/library/core/src/future/future.rs
@@ -1,6 +1,5 @@
#![stable(feature = "futures_api", since = "1.36.0")]
-use crate::marker::Unpin;
use crate::ops;
use crate::pin::Pin;
use crate::task::{Context, Poll};
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 35b757dc1..153971a59 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -12,8 +12,7 @@
//! # Examples
//!
//! ```rust
-//! use std::collections::hash_map::DefaultHasher;
-//! use std::hash::{Hash, Hasher};
+//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! #[derive(Hash)]
//! struct Person {
@@ -46,8 +45,7 @@
//! the [`Hash`] trait:
//!
//! ```rust
-//! use std::collections::hash_map::DefaultHasher;
-//! use std::hash::{Hash, Hasher};
+//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! struct Person {
//! id: u32,
@@ -194,8 +192,7 @@ pub trait Hash {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::{Hash, Hasher};
+ /// use std::hash::{DefaultHasher, Hash, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// 7920.hash(&mut hasher);
@@ -224,8 +221,7 @@ pub trait Hash {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::{Hash, Hasher};
+ /// use std::hash::{DefaultHasher, Hash, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// let numbers = [6, 28, 496, 8128];
@@ -300,8 +296,7 @@ pub use macros::Hash;
/// # Examples
///
/// ```
-/// use std::collections::hash_map::DefaultHasher;
-/// use std::hash::Hasher;
+/// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
///
@@ -329,8 +324,7 @@ pub trait Hasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::Hasher;
+ /// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// hasher.write(b"Cool!");
@@ -347,8 +341,7 @@ pub trait Hasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::DefaultHasher;
- /// use std::hash::Hasher;
+ /// use std::hash::{DefaultHasher, Hasher};
///
/// let mut hasher = DefaultHasher::new();
/// let data = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
@@ -627,8 +620,7 @@ impl<H: Hasher + ?Sized> Hasher for &mut H {
/// # Examples
///
/// ```
-/// use std::collections::hash_map::RandomState;
-/// use std::hash::{BuildHasher, Hasher};
+/// use std::hash::{BuildHasher, Hasher, RandomState};
///
/// let s = RandomState::new();
/// let mut hasher_1 = s.build_hasher();
@@ -656,8 +648,7 @@ pub trait BuildHasher {
/// # Examples
///
/// ```
- /// use std::collections::hash_map::RandomState;
- /// use std::hash::BuildHasher;
+ /// use std::hash::{BuildHasher, RandomState};
///
/// let s = RandomState::new();
/// let new_s = s.build_hasher();
@@ -690,7 +681,7 @@ pub trait BuildHasher {
/// }
///
/// // Then later, in a `#[test]` for the type...
- /// let bh = std::collections::hash_map::RandomState::new();
+ /// let bh = std::hash::RandomState::new();
/// assert_eq!(
/// bh.hash_one(OrderAmbivalentPair(1, 2)),
/// bh.hash_one(OrderAmbivalentPair(2, 1))
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
index 6b9f2e842..78a232faa 100644
--- a/library/core/src/hash/sip.rs
+++ b/library/core/src/hash/sip.rs
@@ -14,7 +14,7 @@ use crate::ptr;
///
/// See: <https://131002.net/siphash>
#[unstable(feature = "hashmap_internals", issue = "none")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
#[doc(hidden)]
pub struct SipHasher13 {
@@ -25,7 +25,7 @@ pub struct SipHasher13 {
///
/// See: <https://131002.net/siphash/>
#[unstable(feature = "hashmap_internals", issue = "none")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
struct SipHasher24 {
hasher: Hasher<Sip24Rounds>,
@@ -44,7 +44,7 @@ struct SipHasher24 {
/// it is not intended for cryptographic purposes. As such, all
/// cryptographic uses of this implementation are _strongly discouraged_.
#[stable(feature = "rust1", since = "1.0.0")]
-#[deprecated(since = "1.13.0", note = "use `std::collections::hash_map::DefaultHasher` instead")]
+#[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
pub struct SipHasher(SipHasher24);
@@ -147,10 +147,7 @@ impl SipHasher {
/// Creates a new `SipHasher` with the two initial keys set to 0.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
pub const fn new() -> SipHasher {
@@ -160,10 +157,7 @@ impl SipHasher {
/// Creates a `SipHasher` that is keyed off the provided keys.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
@@ -175,10 +169,7 @@ impl SipHasher13 {
/// Creates a new `SipHasher13` with the two initial keys set to 0.
#[inline]
#[unstable(feature = "hashmap_internals", issue = "none")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
pub const fn new() -> SipHasher13 {
SipHasher13::new_with_keys(0, 0)
@@ -187,10 +178,7 @@ impl SipHasher13 {
/// Creates a `SipHasher13` that is keyed off the provided keys.
#[inline]
#[unstable(feature = "hashmap_internals", issue = "none")]
- #[deprecated(
- since = "1.13.0",
- note = "use `std::collections::hash_map::DefaultHasher` instead"
- )]
+ #[deprecated(since = "1.13.0", note = "use `std::hash::DefaultHasher` instead")]
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
diff --git a/library/core/src/internal_macros.rs b/library/core/src/internal_macros.rs
index 5774107f5..bf53b2245 100644
--- a/library/core/src/internal_macros.rs
+++ b/library/core/src/internal_macros.rs
@@ -31,6 +31,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
@@ -41,6 +42,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
@@ -51,6 +53,7 @@ macro_rules! forward_ref_binop {
type Output = <$t as $imp<$u>>::Output;
#[inline]
+ #[track_caller]
fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
@@ -69,6 +72,7 @@ macro_rules! forward_ref_op_assign {
#[$attr]
impl $imp<&$u> for $t {
#[inline]
+ #[track_caller]
fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index c5aef67b5..5107ba1a9 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -59,6 +59,7 @@ use crate::marker::Tuple;
use crate::mem;
pub mod mir;
+pub mod simd;
// These imports are used for simplifying intra-doc links
#[allow(unused_imports)]
@@ -341,6 +342,9 @@ extern "rust-intrinsic" {
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
#[rustc_nounwind]
pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ /// Do NOT use this intrinsic; "unordered" operations do not exist in our memory model!
+ /// In terms of the Rust Abstract Machine, this operation is equivalent to `src.read()`,
+ /// i.e., it performs a non-atomic read.
#[rustc_nounwind]
pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
@@ -365,6 +369,9 @@ extern "rust-intrinsic" {
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
#[rustc_nounwind]
pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ /// Do NOT use this intrinsic; "unordered" operations do not exist in our memory model!
+ /// In terms of the Rust Abstract Machine, this operation is equivalent to `dst.write(val)`,
+ /// i.e., it performs a non-atomic write.
#[rustc_nounwind]
pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
@@ -1900,6 +1907,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz;
///
@@ -1912,6 +1920,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz;
///
@@ -1933,6 +1942,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::ctlz_nonzero;
///
@@ -1959,6 +1969,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz;
///
@@ -1971,6 +1982,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz;
///
@@ -1992,6 +2004,7 @@ extern "rust-intrinsic" {
///
/// ```
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
///
/// use std::intrinsics::cttz_nonzero;
///
@@ -2312,6 +2325,10 @@ extern "rust-intrinsic" {
/// Emits a `!nontemporal` store according to LLVM (see their docs).
/// Probably will never become stable.
+ ///
+ /// Do NOT use this intrinsic; "nontemporal" operations do not exist in our memory model!
+ /// It exists to support current stdarch, but the plan is to change stdarch and remove this intrinsic.
+ /// See <https://github.com/rust-lang/rust/issues/114582> for some more discussion.
#[rustc_nounwind]
pub fn nontemporal_store<T>(ptr: *mut T, val: T);
@@ -2453,6 +2470,7 @@ extern "rust-intrinsic" {
/// ```no_run
/// #![feature(const_eval_select)]
/// #![feature(core_intrinsics)]
+ /// # #![allow(internal_features)]
/// use std::hint::unreachable_unchecked;
/// use std::intrinsics::const_eval_select;
///
@@ -2487,12 +2505,6 @@ extern "rust-intrinsic" {
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
-
- /// This method creates a pointer to any `Some` value. If the argument is
- /// `None`, an invalid within-bounds pointer (that is still acceptable for
- /// constructing an empty slice) is returned.
- #[rustc_nounwind]
- pub fn option_payload_ptr<T>(arg: *const Option<T>) -> *const T;
}
// Some functions are defined here because they accidentally got made
@@ -2855,3 +2867,28 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
write_bytes(dst, val, count)
}
}
+
+/// Inform Miri that a given pointer definitely has a certain alignment.
+#[cfg(miri)]
+pub(crate) const fn miri_promise_symbolic_alignment(ptr: *const (), align: usize) {
+ extern "Rust" {
+ /// Miri-provided extern function to promise that a given pointer is properly aligned for
+ /// "symbolic" alignment checks. Will fail if the pointer is not actually aligned or `align` is
+ /// not a power of two. Has no effect when alignment checks are concrete (which is the default).
+ fn miri_promise_symbolic_alignment(ptr: *const (), align: usize);
+ }
+
+ fn runtime(ptr: *const (), align: usize) {
+ // SAFETY: this call is always safe.
+ unsafe {
+ miri_promise_symbolic_alignment(ptr, align);
+ }
+ }
+
+ const fn compiletime(_ptr: *const (), _align: usize) {}
+
+ // SAFETY: the extra behavior at runtime is for UB checks only.
+ unsafe {
+ const_eval_select((ptr, align), compiletime, runtime);
+ }
+}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index b26a17ec3..34a61e76f 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -110,15 +110,15 @@
//! let popped;
//!
//! {
-//! Call(_unused = Vec::push(v, value), pop)
+//! Call(_unused = Vec::push(v, value), pop, UnwindContinue())
//! }
//!
//! pop = {
-//! Call(popped = Vec::pop(v), drop)
+//! Call(popped = Vec::pop(v), drop, UnwindContinue())
//! }
//!
//! drop = {
-//! Drop(popped, ret)
+//! Drop(popped, ret, UnwindContinue())
//! }
//!
//! ret = {
@@ -193,7 +193,7 @@
//! 27 | | )
//! | |_____- binding declared here but left uninitialized
//!
-//! error: aborting due to previous error
+//! error: aborting due to 1 previous error
//!
//! For more information about this error, try `rustc --explain E0381`.
//! ```
@@ -238,10 +238,6 @@
//!
//! #### Terminators
//!
-//! Custom MIR does not currently support cleanup blocks or non-trivial unwind paths. As such, there
-//! are no resume and abort terminators, and terminators that might unwind do not have any way to
-//! indicate the unwind block.
-//!
//! - [`Goto`], [`Return`], [`Unreachable`] and [`Drop`](Drop()) have associated functions.
//! - `match some_int_operand` becomes a `SwitchInt`. Each arm should be `literal => basic_block`
//! - The exception is the last arm, which must be `_ => basic_block` and corresponds to the
@@ -260,7 +256,26 @@
/// Type representing basic blocks.
///
/// All terminators will have this type as a return type. It helps achieve some type safety.
-pub struct BasicBlock;
+#[rustc_diagnostic_item = "mir_basic_block"]
+pub enum BasicBlock {
+ /// A non-cleanup basic block.
+ Normal,
+ /// A basic block that lies on an unwind path.
+ Cleanup,
+}
+
+/// The reason we are terminating the process during unwinding.
+#[rustc_diagnostic_item = "mir_unwind_terminate_reason"]
+pub enum UnwindTerminateReason {
+ /// Unwinding is just not possible given the ABI of this function.
+ Abi,
+ /// We were already cleaning up for an ongoing unwind, and a *second*, *nested* unwind was
+ /// triggered by the drop glue.
+ InCleanup,
+}
+
+pub use UnwindTerminateReason::Abi as ReasonAbi;
+pub use UnwindTerminateReason::InCleanup as ReasonInCleanup;
macro_rules! define {
($name:literal, $( #[ $meta:meta ] )* fn $($sig:tt)*) => {
@@ -271,11 +286,41 @@ macro_rules! define {
}
}
+// Unwind actions
+define!(
+ "mir_unwind_continue",
+ /// An unwind action that continues unwinding.
+ fn UnwindContinue()
+);
+define!(
+ "mir_unwind_unreachable",
+ /// An unwind action that triggers undefined behaviour.
+ fn UnwindUnreachable() -> BasicBlock
+);
+define!(
+ "mir_unwind_terminate",
+ /// An unwind action that terminates the execution.
+ ///
+ /// `UnwindTerminate` can also be used as a terminator.
+ fn UnwindTerminate(reason: UnwindTerminateReason)
+);
+define!(
+ "mir_unwind_cleanup",
+ /// An unwind action that continues execution in a given basic blok.
+ fn UnwindCleanup(goto: BasicBlock)
+);
+
+// Terminators
define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
define!("mir_unreachable", fn Unreachable() -> BasicBlock);
-define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
-define!("mir_call", fn Call(call: (), goto: BasicBlock));
+define!("mir_drop", fn Drop<T, U>(place: T, goto: BasicBlock, unwind_action: U));
+define!("mir_call", fn Call<U>(call: (), goto: BasicBlock, unwind_action: U));
+define!("mir_unwind_resume",
+ /// A terminator that resumes the unwinding.
+ fn UnwindResume()
+);
+
define!("mir_storage_live", fn StorageLive<T>(local: T));
define!("mir_storage_dead", fn StorageDead<T>(local: T));
define!("mir_deinit", fn Deinit<T>(place: T));
@@ -382,16 +427,15 @@ pub macro mir {
}
$(
- $block_name:ident = {
+ $block_name:ident $(($block_cleanup:ident))? = {
$($block:tt)*
}
)*
) => {{
// First, we declare all basic blocks.
- $(
- let $block_name: ::core::intrinsics::mir::BasicBlock;
- )*
-
+ __internal_declare_basic_blocks!($(
+ $block_name $(($block_cleanup))?
+ )*);
{
// Now all locals
#[allow(non_snake_case)]
@@ -585,3 +629,17 @@ pub macro __internal_remove_let {
}
},
}
+
+/// Helper macro that declares the basic blocks.
+#[doc(hidden)]
+pub macro __internal_declare_basic_blocks {
+ () => {},
+ ($name:ident (cleanup) $($rest:tt)*) => {
+ let $name = ::core::intrinsics::mir::BasicBlock::Cleanup;
+ __internal_declare_basic_blocks!($($rest)*)
+ },
+ ($name:ident $($rest:tt)*) => {
+ let $name = ::core::intrinsics::mir::BasicBlock::Normal;
+ __internal_declare_basic_blocks!($($rest)*)
+ },
+}
diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs
new file mode 100644
index 000000000..68c8a335b
--- /dev/null
+++ b/library/core/src/intrinsics/simd.rs
@@ -0,0 +1,473 @@
+//! SIMD compiler intrinsics.
+//!
+//! In this module, a "vector" is any `repr(simd)` type.
+
+extern "platform-intrinsic" {
+ /// Add two simd vectors elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_add<T>(x: T, y: T) -> T;
+
+ /// Subtract `rhs` from `lhs` elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_sub<T>(lhs: T, rhs: T) -> T;
+
+ /// Multiply two simd vectors elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ pub fn simd_mul<T>(x: T, y: T) -> T;
+
+ /// Divide `lhs` by `rhs` elementwise.
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ ///
+ /// # Safety
+ /// For integers, `rhs` must not contain any zero elements.
+ /// Additionally for signed integers, `<int>::MIN / -1` is undefined behavior.
+ pub fn simd_div<T>(lhs: T, rhs: T) -> T;
+
+ /// Remainder of two vectors elementwise
+ ///
+ /// `T` must be a vector of integer or floating point primitive types.
+ ///
+ /// # Safety
+ /// For integers, `rhs` must not contain any zero elements.
+ /// Additionally for signed integers, `<int>::MIN / -1` is undefined behavior.
+ pub fn simd_rem<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector left shift, with UB on overflow.
+ ///
+ /// Shift `lhs` left by `rhs`, shifting in sign bits for signed types.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ ///
+ /// Each element of `rhs` must be less than `<int>::BITS`.
+ pub fn simd_shl<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector right shift, with UB on overflow.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// Shift `lhs` right by `rhs`, shifting in sign bits for signed types.
+ ///
+ /// # Safety
+ ///
+ /// Each element of `rhs` must be less than `<int>::BITS`.
+ pub fn simd_shr<T>(lhs: T, rhs: T) -> T;
+
+ /// Elementwise vector "and".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_and<T>(x: T, y: T) -> T;
+
+ /// Elementwise vector "or".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_or<T>(x: T, y: T) -> T;
+
+ /// Elementwise vector "exclusive or".
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_xor<T>(x: T, y: T) -> T;
+
+ /// Numerically cast a vector, elementwise.
+ ///
+ /// `T` and `U` must be vectors of integer or floating point primitive types, and must have the
+ /// same length.
+ ///
+ /// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
+ /// When casting integers to floats, the result is rounded.
+ /// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
+ ///
+ /// # Safety
+ /// Casting from integer types is always safe.
+ /// Casting between two float types is also always safe.
+ ///
+ /// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
+ /// Specifically, each element must:
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type, after truncating off its fractional part
+ pub fn simd_cast<T, U>(x: T) -> U;
+
+ /// Numerically cast a vector, elementwise.
+ ///
+ /// `T` and `U` be a vectors of integer or floating point primitive types, and must have the
+ /// same length.
+ ///
+ /// Like `simd_cast`, but saturates float-to-integer conversions (NaN becomes 0).
+ /// This matches regular `as` and is always safe.
+ ///
+ /// When casting floats to integers, the result is truncated.
+ /// When casting integers to floats, the result is rounded.
+ /// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
+ pub fn simd_as<T, U>(x: T) -> U;
+
+ /// Elementwise negation of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// Rust panics for `-<int>::Min` due to overflow, but it is not UB with this intrinsic.
+ pub fn simd_neg<T>(x: T) -> T;
+
+ /// Elementwise absolute value of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ pub fn simd_fabs<T>(x: T) -> T;
+
+ /// Elementwise minimum of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// Follows IEEE-754 `minNum` semantics.
+ pub fn simd_fmin<T>(x: T, y: T) -> T;
+
+ /// Elementwise maximum of a vector.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// Follows IEEE-754 `maxNum` semantics.
+ pub fn simd_fmax<T>(x: T, y: T) -> T;
+
+ /// Tests elementwise equality of two vectors.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_eq<T, U>(x: T, y: T) -> U;
+
+ /// Tests elementwise inequality equality of two vectors.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_ne<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is less than `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_lt<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is less than or equal to `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_le<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is greater than `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_gt<T, U>(x: T, y: T) -> U;
+
+ /// Tests if `x` is greater than or equal to `y`, elementwise.
+ ///
+ /// `T` must be a vector of floating-point primitive types.
+ ///
+ /// `U` must be a vector of integers with the same number of elements and element size as `T`.
+ ///
+ /// Returns `0` for false and `!0` for true.
+ pub fn simd_ge<T, U>(x: T, y: T) -> U;
+
+ /// Shuffle two vectors by const indices.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a const array of `i32`s.
+ ///
+ /// `V` must be a vector with the same element type as `T` and the same length as `U`.
+ ///
+ /// Concatenates `x` and `y`, then returns a new vector such that each element is selected from
+ /// the concatenation by the matching index in `idx`.
+ pub fn simd_shuffle<T, U, V>(x: T, y: T, idx: U) -> V;
+
+ /// Read a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// `idx` must be a constant: either naming a constant item, or an inline
+ /// `const {}` expression.
+ ///
+ /// For each pointer in `ptr`, if the corresponding value in `mask` is `!0`, read the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, return the corresponding value from
+ /// `val`.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be readable as if by `<ptr>::read` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ pub fn simd_gather<T, U, V>(val: T, ptr: U, mask: V) -> T;
+
+ /// Write to a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each pointer in `ptr`, if the corresponding value in `mask` is `!0`, write the
+ /// corresponding value in `val` to the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, do nothing.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be writeable as if by `<ptr>::write` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ pub fn simd_scatter<T, U, V>(val: T, ptr: U, mask: V);
+
+ /// Read a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, read the corresponding
+ /// pointer from `ptr`.
+ /// Otherwise if the corresponding value in `mask` is `0`, return the corresponding value from
+ /// `val`.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be readable as if by `<ptr>::read` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ #[cfg(not(bootstrap))]
+ pub fn simd_masked_load<V, U, T>(mask: V, ptr: U, val: T) -> T;
+
+ /// Write to a vector of pointers.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// `U` must be a vector of pointers to the element type of `T`, with the same length as `T`.
+ ///
+ /// `V` must be a vector of integers with the same length as `T` (but any element size).
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, write the corresponding
+ /// value in `val` to the pointer.
+ /// Otherwise if the corresponding value in `mask` is `0`, do nothing.
+ ///
+ /// # Safety
+ /// Unmasked values in `T` must be writeable as if by `<ptr>::write` (e.g. aligned to the element
+ /// type).
+ ///
+ /// `mask` must only contain `0` or `!0` values.
+ #[cfg(not(bootstrap))]
+ pub fn simd_masked_store<V, U, T>(mask: V, ptr: U, val: T);
+
+ /// Add two simd vectors elementwise, with saturation.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ pub fn simd_saturating_add<T>(x: T, y: T) -> T;
+
+ /// Subtract two simd vectors elementwise, with saturation.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// Subtract `rhs` from `lhs`.
+ pub fn simd_saturating_sub<T>(lhs: T, rhs: T) -> T;
+
+ /// Add elements within a vector from left to right.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// Starting with the value `y`, add the elements of `x` and accumulate.
+ pub fn simd_reduce_add_ordered<T, U>(x: T, y: U) -> U;
+
+ /// Multiply elements within a vector from left to right.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// Starting with the value `y`, multiply the elements of `x` and accumulate.
+ pub fn simd_reduce_mul_ordered<T, U>(x: T, y: U) -> U;
+
+ /// Check if all mask values are true.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` or `!0`.
+ pub fn simd_reduce_all<T>(x: T) -> bool;
+
+ /// Check if all mask values are true.
+ ///
+ /// `T` must be a vector of integer primitive types.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` or `!0`.
+ pub fn simd_reduce_any<T>(x: T) -> bool;
+
+ /// Return the maximum element of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// For floating-point values, uses IEEE-754 `maxNum`.
+ pub fn simd_reduce_max<T, U>(x: T) -> U;
+
+ /// Return the minimum element of a vector.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ ///
+ /// For floating-point values, uses IEEE-754 `minNum`.
+ pub fn simd_reduce_min<T, U>(x: T) -> U;
+
+ /// Logical "and" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_and<T, U>(x: T) -> U;
+
+ /// Logical "or" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_or<T, U>(x: T) -> U;
+
+ /// Logical "exclusive or" all elements together.
+ ///
+ /// `T` must be a vector of integer or floating-point primitive types.
+ ///
+ /// `U` must be the element type of `T`.
+ pub fn simd_reduce_xor<T, U>(x: T) -> U;
+
+ /// Truncate an integer vector to a bitmask.
+ ///
+ /// `T` must be an integer vector.
+ ///
+ /// `U` must be either the smallest unsigned integer with at least as many bits as the length
+ /// of `T`, or the smallest array of `u8` with as many bits as the length of `T`.
+ ///
+ /// Each element is truncated to a single bit and packed into the result.
+ ///
+ /// No matter whether the output is an array or an unsigned integer, it is treated as a single
+ /// contiguous list of bits. The bitmask is always packed on the least-significant side of the
+ /// output, and padded with 0s in the most-significant bits. The order of the bits depends on
+ /// endianess:
+ ///
+ /// * On little endian, the least significant bit corresponds to the first vector element.
+ /// * On big endian, the least significant bit corresponds to the last vector element.
+ ///
+ /// For example, `[!0, 0, !0, !0]` packs to `0b1101` on little endian and `0b1011` on big
+ /// endian.
+ ///
+ /// To consider a larger example, `[!0, 0, 0, 0, 0, 0, 0, 0, !0, !0, 0, 0, 0, 0, !0, 0]` packs
+ /// to `[0b00000001, 0b01000011]` or `0b0100001100000001` on little endian, and `[0b10000000,
+ /// 0b11000010]` or `0b1000000011000010` on big endian.
+ ///
+ /// # Safety
+ /// `x` must contain only `0` and `!0`.
+ pub fn simd_bitmask<T, U>(x: T) -> U;
+
+ /// Select elements from a mask.
+ ///
+ /// `M` must be an integer vector.
+ ///
+ /// `T` must be a vector with the same number of elements as `M`.
+ ///
+ /// For each element, if the corresponding value in `mask` is `!0`, select the element from
+ /// `if_true`. If the corresponding value in `mask` is `0`, select the element from
+ /// `if_false`.
+ ///
+ /// # Safety
+ /// `mask` must only contain `0` and `!0`.
+ pub fn simd_select<M, T>(mask: M, if_true: T, if_false: T) -> T;
+
+ /// Select elements from a bitmask.
+ ///
+ /// `M` must be an unsigned integer or array of `u8`, matching `simd_bitmask`.
+ ///
+ /// `T` must be a vector.
+ ///
+ /// For each element, if the bit in `mask` is `1`, select the element from
+ /// `if_true`. If the corresponding bit in `mask` is `0`, select the element from
+ /// `if_false`.
+ ///
+ /// The bitmask bit order matches `simd_bitmask`.
+ ///
+ /// # Safety
+ /// Padding bits must be all zero.
+ pub fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
+
+ /// Elementwise calculates the offset from a pointer vector, potentially wrapping.
+ ///
+ /// `T` must be a vector of pointers.
+ ///
+ /// `U` must be a vector of `isize` or `usize` with the same number of elements as `T`.
+ ///
+ /// Operates as if by `<ptr>::wrapping_offset`.
+ pub fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
+
+ /// Cast a vector of pointers.
+ ///
+ /// `T` and `U` must be vectors of pointers with the same number of elements.
+ pub fn simd_cast_ptr<T, U>(ptr: T) -> U;
+
+ /// Expose a vector of pointers as a vector of addresses.
+ ///
+ /// `T` must be a vector of pointers.
+ ///
+ /// `U` must be a vector of `usize` with the same length as `T`.
+ pub fn simd_expose_addr<T, U>(ptr: T) -> U;
+
+ /// Create a vector of pointers from a vector of addresses.
+ ///
+ /// `T` must be a vector of `usize`.
+ ///
+ /// `U` must be a vector of pointers, with the same length as `T`.
+ pub fn simd_from_exposed_addr<T, U>(addr: T) -> U;
+
+ /// Swap bytes of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_bswap<T>(x: T) -> T;
+
+ /// Reverse bits of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_bitreverse<T>(x: T) -> T;
+
+ /// Count the leading zeros of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_ctlz<T>(x: T) -> T;
+
+ /// Count the trailing zeros of each element.
+ ///
+ /// `T` must be a vector of integers.
+ pub fn simd_cttz<T>(x: T) -> T;
+}
diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs
index 13719c727..946d0051c 100644
--- a/library/core/src/iter/adapters/array_chunks.rs
+++ b/library/core/src/iter/adapters/array_chunks.rs
@@ -1,5 +1,9 @@
use crate::array;
-use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce};
+use crate::iter::adapters::SourceIter;
+use crate::iter::{
+ ByRefSized, FusedIterator, InPlaceIterable, TrustedFused, TrustedRandomAccessNoCoerce,
+};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, NeverShortCircuit, Try};
/// An iterator over `N` elements of the iterator at a time.
@@ -159,6 +163,9 @@ where
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
impl<I, const N: usize> FusedIterator for ArrayChunks<I, N> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I, const N: usize> TrustedFused for ArrayChunks<I, N> where I: TrustedFused + Iterator {}
+
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
impl<I, const N: usize> ExactSizeIterator for ArrayChunks<I, N>
where
@@ -229,3 +236,28 @@ where
accum
}
}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, const N: usize> SourceIter for ArrayChunks<I, N>
+where
+ I: SourceIter + Iterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable + Iterator, const N: usize> InPlaceIterable for ArrayChunks<I, N> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = const {
+ match (I::MERGE_BY, NonZeroUsize::new(N)) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+}
diff --git a/library/core/src/iter/adapters/chain.rs b/library/core/src/iter/adapters/chain.rs
index 26aa959e6..c748336cd 100644
--- a/library/core/src/iter/adapters/chain.rs
+++ b/library/core/src/iter/adapters/chain.rs
@@ -1,4 +1,4 @@
-use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen};
+use crate::iter::{FusedIterator, TrustedLen};
use crate::num::NonZeroUsize;
use crate::ops::Try;
diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs
index 8f6b2904e..7a2c9d839 100644
--- a/library/core/src/iter/adapters/copied.rs
+++ b/library/core/src/iter/adapters/copied.rs
@@ -193,7 +193,7 @@ where
T: Copy,
{
default fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> {
- array::iter_next_chunk(&mut self.map(|e| *e))
+ array::iter_next_chunk(&mut self.copied())
}
}
diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs
index 00c1c377b..92f465ccd 100644
--- a/library/core/src/iter/adapters/enumerate.rs
+++ b/library/core/src/iter/adapters/enumerate.rs
@@ -1,7 +1,7 @@
use crate::iter::adapters::{
zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
-use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen};
use crate::num::NonZeroUsize;
use crate::ops::Try;
@@ -243,6 +243,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Enumerate<I> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen {}
@@ -261,7 +264,10 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<I: Default> Default for Enumerate<I> {
diff --git a/library/core/src/iter/adapters/filter.rs b/library/core/src/iter/adapters/filter.rs
index 723657b9e..882f3e3bc 100644
--- a/library/core/src/iter/adapters/filter.rs
+++ b/library/core/src/iter/adapters/filter.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
use core::array;
use core::mem::{ManuallyDrop, MaybeUninit};
@@ -189,6 +190,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator, P> FusedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Filter<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for Filter<I, P>
where
@@ -204,4 +208,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs
index 693479977..81ac0eaa6 100644
--- a/library/core/src/iter/adapters/filter_map.rs
+++ b/library/core/src/iter/adapters/filter_map.rs
@@ -1,5 +1,6 @@
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
use crate::mem::{ManuallyDrop, MaybeUninit};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
use crate::{array, fmt};
@@ -97,9 +98,11 @@ where
// SAFETY: Loop conditions ensure the index is in bounds.
unsafe {
- let opt_payload_at = core::intrinsics::option_payload_ptr(&val);
+ let opt_payload_at: *const MaybeUninit<B> = (&val as *const Option<B>)
+ .byte_add(core::mem::offset_of!(Option<B>, Some.0))
+ .cast();
let dst = guard.array.as_mut_ptr().add(idx);
- crate::ptr::copy_nonoverlapping(opt_payload_at.cast(), dst, 1);
+ crate::ptr::copy_nonoverlapping(opt_payload_at, dst, 1);
crate::mem::forget(val);
};
@@ -188,6 +191,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B> {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for FilterMap<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I, F> SourceIter for FilterMap<I, F>
where
@@ -203,7 +209,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> where
- F: FnMut(I::Item) -> Option<B>
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index eee6e5bcc..6122332da 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -1,7 +1,13 @@
-use crate::fmt;
-use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
+use crate::iter::adapters::SourceIter;
+use crate::iter::{
+ Cloned, Copied, Filter, FilterMap, Fuse, FusedIterator, InPlaceIterable, Map, TrustedFused,
+ TrustedLen,
+};
+use crate::iter::{Once, OnceWith};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
+use crate::result;
+use crate::{array, fmt, option};
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
@@ -145,6 +151,91 @@ where
{
}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, U, F> InPlaceIterable for FlatMap<I, U, F>
+where
+ I: InPlaceIterable,
+ U: BoundedSize + IntoIterator,
+{
+ const EXPAND_BY: Option<NonZeroUsize> = const {
+ match (I::EXPAND_BY, U::UPPER_BOUND) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I, U, F> SourceIter for FlatMap<I, U, F>
+where
+ I: SourceIter + TrustedFused,
+ U: IntoIterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.inner.iter) }
+ }
+}
+
+/// Marker trait for iterators/iterables which have a statically known upper
+/// bound of the number of items they can produce.
+///
+/// # Safety
+///
+/// Implementations must not yield more elements than indicated by UPPER_BOUND if it is `Some`.
+/// Used in specializations. Implementations must not be conditional on lifetimes or
+/// user-implementable traits.
+#[rustc_specialization_trait]
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe trait BoundedSize {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for Option<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for option::IntoIter<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, U> BoundedSize for Result<T, U> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for result::IntoIter<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for Once<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T> BoundedSize for OnceWith<T> {}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, const N: usize> BoundedSize for [T; N] {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(N);
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<T, const N: usize> BoundedSize for array::IntoIter<T, N> {
+ const UPPER_BOUND: Option<NonZeroUsize> = NonZeroUsize::new(N);
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, P> BoundedSize for Filter<I, P> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, P> BoundedSize for FilterMap<I, P> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize, F> BoundedSize for Map<I, F> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize> BoundedSize for Copied<I> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: BoundedSize> BoundedSize for Cloned<I> {
+ const UPPER_BOUND: Option<NonZeroUsize> = I::UPPER_BOUND;
+}
+
/// An iterator that flattens one level of nesting in an iterator of things
/// that can be turned into iterators.
///
@@ -289,6 +380,36 @@ where
{
}
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> InPlaceIterable for Flatten<I>
+where
+ I: InPlaceIterable + Iterator,
+ <I as Iterator>::Item: IntoIterator + BoundedSize,
+{
+ const EXPAND_BY: Option<NonZeroUsize> = const {
+ match (I::EXPAND_BY, I::Item::UPPER_BOUND) {
+ (Some(m), Some(n)) => m.checked_mul(n),
+ _ => None,
+ }
+ };
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Flatten<I>
+where
+ I: SourceIter + TrustedFused + Iterator,
+ <I as Iterator>::Item: IntoIterator,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.inner.iter) }
+ }
+}
+
#[stable(feature = "default_iters", since = "1.70.0")]
impl<I> Default for Flatten<I>
where
diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs
index b1fa4f921..462a7e877 100644
--- a/library/core/src/iter/adapters/fuse.rs
+++ b/library/core/src/iter/adapters/fuse.rs
@@ -1,8 +1,8 @@
use crate::intrinsics;
use crate::iter::adapters::zip::try_get_unchecked;
+use crate::iter::adapters::SourceIter;
use crate::iter::{
- DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen, TrustedRandomAccess,
- TrustedRandomAccessNoCoerce,
+ FusedIterator, TrustedFused, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
use crate::ops::Try;
@@ -29,6 +29,9 @@ impl<I> Fuse<I> {
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Fuse<I> where I: Iterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I> TrustedFused for Fuse<I> where I: TrustedFused {}
+
// Any specialized implementation here is made internal
// to avoid exposing default fns outside this trait.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -418,6 +421,23 @@ where
}
}
+// This is used by Flatten's SourceIter impl
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I> SourceIter for Fuse<I>
+where
+ I: SourceIter + TrustedFused,
+{
+ type Source = I::Source;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut I::Source {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements.
+ // TrustedFused guarantees that we'll never encounter a case where `self.iter` would
+ // be set to None.
+ unsafe { SourceIter::as_inner(self.iter.as_mut().unwrap_unchecked()) }
+ }
+}
+
#[inline]
fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
let x = f(opt.as_mut()?);
diff --git a/library/core/src/iter/adapters/inspect.rs b/library/core/src/iter/adapters/inspect.rs
index 19839fdfe..fd2d830b6 100644
--- a/library/core/src/iter/adapters/inspect.rs
+++ b/library/core/src/iter/adapters/inspect.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that calls a function with a reference to each element before
@@ -148,6 +149,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator, F> FusedIterator for Inspect<I, F> where F: FnMut(&I::Item) {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Inspect<I, F> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I, F> SourceIter for Inspect<I, F>
where
@@ -163,4 +167,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> where F: FnMut(&I::Item) {}
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs
index 31d02a4da..e27fc7257 100644
--- a/library/core/src/iter/adapters/map.rs
+++ b/library/core/src/iter/adapters/map.rs
@@ -2,7 +2,8 @@ use crate::fmt;
use crate::iter::adapters::{
zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
-use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator};
+use crate::iter::{FusedIterator, InPlaceIterable, TrustedFused, TrustedLen, UncheckedIterator};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that maps the values of `iter` with `f`.
@@ -179,6 +180,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<B, I: FusedIterator, F> FusedIterator for Map<I, F> where F: FnMut(I::Item) -> B {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, F> TrustedFused for Map<I, F> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<B, I, F> TrustedLen for Map<I, F>
where
@@ -228,4 +232,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for Map<I, F> where F: FnMut(I::Item) -> B {}
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Map<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs
index fbdeca4d4..bcae73cbe 100644
--- a/library/core/src/iter/adapters/map_while.rs
+++ b/library/core/src/iter/adapters/map_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that only accepts elements while `predicate` returns `Some(_)`.
@@ -82,7 +83,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<B, I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> where
- P: FnMut(I::Item) -> Option<B>
-{
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/map_windows.rs b/library/core/src/iter/adapters/map_windows.rs
index 3c0e80b25..5f39b2458 100644
--- a/library/core/src/iter/adapters/map_windows.rs
+++ b/library/core/src/iter/adapters/map_windows.rs
@@ -1,6 +1,6 @@
use crate::{
fmt,
- iter::{ExactSizeIterator, FusedIterator},
+ iter::FusedIterator,
mem::{self, MaybeUninit},
ptr,
};
diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs
index 6f4fa7010..4037e2e28 100644
--- a/library/core/src/iter/adapters/mod.rs
+++ b/library/core/src/iter/adapters/mod.rs
@@ -1,4 +1,5 @@
-use crate::iter::{InPlaceIterable, Iterator};
+use crate::iter::InPlaceIterable;
+use crate::num::NonZeroUsize;
use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
mod array_chunks;
@@ -119,8 +120,9 @@ pub unsafe trait SourceIter {
///
/// # Safety
///
- /// Implementations of must return the same mutable reference for their lifetime, unless
+ /// Implementations must return the same mutable reference for their lifetime, unless
/// replaced by a caller.
+ ///
/// Callers may only replace the reference when they stopped iteration and drop the
/// iterator pipeline after extracting the source.
///
@@ -228,7 +230,10 @@ where
// in order to return `Some(_)`. Since `iter` has type `I: InPlaceIterable` it's
// guaranteed that at least one item will be moved out from the underlying source.
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I, T, R> InPlaceIterable for GenericShunt<'_, I, R> where
- I: Iterator<Item: Try<Output = T, Residual = R>> + InPlaceIterable
+unsafe impl<I, R> InPlaceIterable for GenericShunt<'_, I, R>
+where
+ I: InPlaceIterable,
{
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs
index 62470512c..635bad199 100644
--- a/library/core/src/iter/adapters/scan.rs
+++ b/library/core/src/iter/adapters/scan.rs
@@ -1,5 +1,6 @@
use crate::fmt;
use crate::iter::{adapters::SourceIter, InPlaceIterable};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator to maintain state while iterating another iterator.
@@ -92,7 +93,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<St, F, B, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> where
- F: FnMut(&mut St, I::Item) -> Option<B>
-{
+unsafe impl<St, F, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs
index 306338bc7..e6c946e7f 100644
--- a/library/core/src/iter/adapters/skip.rs
+++ b/library/core/src/iter/adapters/skip.rs
@@ -1,4 +1,5 @@
use crate::intrinsics::unlikely;
+use crate::iter::TrustedFused;
use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
@@ -214,6 +215,9 @@ where
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Skip<I> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I> SourceIter for Skip<I>
where
@@ -229,4 +233,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
diff --git a/library/core/src/iter/adapters/skip_while.rs b/library/core/src/iter/adapters/skip_while.rs
index f29661779..3a661973e 100644
--- a/library/core/src/iter/adapters/skip_while.rs
+++ b/library/core/src/iter/adapters/skip_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that rejects elements while `predicate` returns `true`.
@@ -104,6 +105,9 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, P> TrustedFused for SkipWhile<I, P> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for SkipWhile<I, P>
where
@@ -119,7 +123,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> where
- F: FnMut(&I::Item) -> bool
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/step_by.rs b/library/core/src/iter/adapters/step_by.rs
index 7f58f7d17..9e83584e3 100644
--- a/library/core/src/iter/adapters/step_by.rs
+++ b/library/core/src/iter/adapters/step_by.rs
@@ -83,11 +83,7 @@ where
// last element. Used in the `DoubleEndedIterator` implementation.
fn next_back_index(&self) -> usize {
let rem = self.iter.len() % (self.step + 1);
- if self.first_take {
- if rem == 0 { self.step } else { rem - 1 }
- } else {
- rem
- }
+ if self.first_take { if rem == 0 { self.step } else { rem - 1 } } else { rem }
}
}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index c1d8cc4ff..80e06066d 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -1,6 +1,7 @@
use crate::cmp;
use crate::iter::{
- adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen, TrustedRandomAccess,
+ adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused, TrustedLen,
+ TrustedRandomAccess,
};
use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
@@ -143,7 +144,10 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {}
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
+}
#[stable(feature = "double_ended_take_iterator", since = "1.38.0")]
impl<I> DoubleEndedIterator for Take<I>
@@ -241,6 +245,9 @@ impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
#[stable(feature = "fused", since = "1.26.0")]
impl<I> FusedIterator for Take<I> where I: FusedIterator {}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused> TrustedFused for Take<I> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I: TrustedLen> TrustedLen for Take<I> {}
diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs
index ec66dc3ae..e55d55a6d 100644
--- a/library/core/src/iter/adapters/take_while.rs
+++ b/library/core/src/iter/adapters/take_while.rs
@@ -1,5 +1,6 @@
use crate::fmt;
-use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedFused};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that only accepts elements while `predicate` returns `true`.
@@ -105,6 +106,9 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<I: TrustedFused, P> TrustedFused for TakeWhile<I, P> {}
+
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<P, I> SourceIter for TakeWhile<I, P>
where
@@ -120,7 +124,7 @@ where
}
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> where
- F: FnMut(&I::Item) -> bool
-{
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> {
+ const EXPAND_BY: Option<NonZeroUsize> = I::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = I::MERGE_BY;
}
diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs
index 77ccf5085..b33400fab 100644
--- a/library/core/src/iter/adapters/zip.rs
+++ b/library/core/src/iter/adapters/zip.rs
@@ -1,7 +1,8 @@
use crate::cmp;
use crate::fmt::{self, Debug};
-use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+use crate::iter::{FusedIterator, TrustedFused};
use crate::iter::{InPlaceIterable, SourceIter, TrustedLen, UncheckedIterator};
+use crate::num::NonZeroUsize;
/// An iterator that iterates two other iterators simultaneously.
///
@@ -446,6 +447,14 @@ where
{
}
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<A, B> TrustedFused for Zip<A, B>
+where
+ A: TrustedFused,
+ B: TrustedFused,
+{
+}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A, B> TrustedLen for Zip<A, B>
where
@@ -479,7 +488,10 @@ where
// Since SourceIter forwards the left hand side we do the same here
#[unstable(issue = "none", feature = "inplace_iteration")]
-unsafe impl<A: InPlaceIterable, B: Iterator> InPlaceIterable for Zip<A, B> {}
+unsafe impl<A: InPlaceIterable, B> InPlaceIterable for Zip<A, B> {
+ const EXPAND_BY: Option<NonZeroUsize> = A::EXPAND_BY;
+ const MERGE_BY: Option<NonZeroUsize> = A::MERGE_BY;
+}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Debug, B: Debug> Debug for Zip<A, B> {
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index 937a149ac..44fef3e14 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -417,6 +417,8 @@ pub use self::sources::{successors, Successors};
pub use self::traits::FusedIterator;
#[unstable(issue = "none", feature = "inplace_iteration")]
pub use self::traits::InPlaceIterable;
+#[unstable(issue = "none", feature = "trusted_fused")]
+pub use self::traits::TrustedFused;
#[unstable(feature = "trusted_len", issue = "37572")]
pub use self::traits::TrustedLen;
#[unstable(feature = "trusted_step", issue = "85731")]
diff --git a/library/core/src/iter/sources/from_coroutine.rs b/library/core/src/iter/sources/from_coroutine.rs
index 16fbca9b6..bf413b24d 100644
--- a/library/core/src/iter/sources/from_coroutine.rs
+++ b/library/core/src/iter/sources/from_coroutine.rs
@@ -11,8 +11,7 @@ use crate::pin::Pin;
/// # Examples
///
/// ```
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(iter_from_coroutine)]
///
/// let it = std::iter::from_coroutine(|| {
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 6adea4442..8e2c887a1 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -69,7 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
message = "`{Self}` is not an iterator"
)]
#[doc(notable_trait)]
-#[cfg_attr(not(bootstrap), lang = "iterator")]
+#[lang = "iterator"]
#[rustc_diagnostic_item = "Iterator"]
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub trait Iterator {
diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs
index c21a2aac1..e7c1f195a 100644
--- a/library/core/src/iter/traits/marker.rs
+++ b/library/core/src/iter/traits/marker.rs
@@ -1,4 +1,16 @@
use crate::iter::Step;
+use crate::num::NonZeroUsize;
+
+/// Same as FusedIterator
+///
+/// # Safety
+///
+/// This is used for specialization. Therefore implementations must not
+/// be lifetime-dependent.
+#[unstable(issue = "none", feature = "trusted_fused")]
+#[doc(hidden)]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedFused {}
/// An iterator that always continues to yield `None` when exhausted.
///
@@ -14,6 +26,8 @@ use crate::iter::Step;
/// [`Fuse`]: crate::iter::Fuse
#[stable(feature = "fused", since = "1.26.0")]
#[rustc_unsafe_specialization_marker]
+// FIXME: this should be a #[marker] and have another blanket impl for T: TrustedFused
+// but that ICEs iter::Fuse specializations.
pub trait FusedIterator: Iterator {}
#[stable(feature = "fused", since = "1.26.0")]
@@ -71,7 +85,19 @@ unsafe impl<I: TrustedLen + ?Sized> TrustedLen for &mut I {}
/// [`try_fold()`]: Iterator::try_fold
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-pub unsafe trait InPlaceIterable: Iterator {}
+#[rustc_specialization_trait]
+pub unsafe trait InPlaceIterable {
+ /// The product of one-to-many item expansions that happen throughout the iterator pipeline.
+ /// E.g. [[u8; 4]; 4].iter().flatten().flatten() would have a `EXPAND_BY` of 16.
+ /// This is an upper bound, i.e. the transformations will produce at most this many items per
+ /// input. It's meant for layout calculations.
+ const EXPAND_BY: Option<NonZeroUsize>;
+ /// The product of many-to-one item reductions that happen throughout the iterator pipeline.
+ /// E.g. [u8].iter().array_chunks::<4>().array_chunks::<4>() would have a `MERGE_BY` of 16.
+ /// This is a lower bound, i.e. the transformations will consume at least this many items per
+ /// output.
+ const MERGE_BY: Option<NonZeroUsize>;
+}
/// A type that upholds all invariants of [`Step`].
///
diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs
index 41ea29e6a..d4c9cc4b1 100644
--- a/library/core/src/iter/traits/mod.rs
+++ b/library/core/src/iter/traits/mod.rs
@@ -18,6 +18,8 @@ pub use self::{
#[unstable(issue = "none", feature = "inplace_iteration")]
pub use self::marker::InPlaceIterable;
+#[unstable(issue = "none", feature = "trusted_fused")]
+pub use self::marker::TrustedFused;
#[unstable(feature = "trusted_step", issue = "85731")]
pub use self::marker::TrustedStep;
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 5a6d242a7..07720f235 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -24,10 +24,9 @@
//! which are generated by Rust codegen backends. Additionally, this library can make explicit
//! calls to `strlen`. Their signatures are the same as found in C, but there are extra
//! assumptions about their semantics: For `memcpy`, `memmove`, `memset`, `memcmp`, and `bcmp`, if
-//! the `n` parameter is 0, the function is assumed to not be UB. Furthermore, for `memcpy`, if
-//! source and target pointer are equal, the function is assumed to not be UB.
-//! (Note that these are standard assumptions among compilers:
-//! [clang](https://reviews.llvm.org/D86993) and [GCC](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=32667) do the same.)
+//! the `n` parameter is 0, the function is assumed to not be UB, even if the pointers are NULL or
+//! dangling. (Note that making extra assumptions about these functions is common among compilers:
+//! [clang](https://reviews.llvm.org/D86993) and [GCC](https://gcc.gnu.org/onlinedocs/gcc/Standards.html#C-Language) do the same.)
//! These functions are often provided by the system libc, but can also be provided by the
//! [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
//! Note that the library does not guarantee that it will always make these assumptions, so Rust
@@ -69,7 +68,7 @@
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
)]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![doc(rust_logo)]
#![doc(cfg_hide(
not(test),
any(not(feature = "miri-test-libstd"), test, doctest),
@@ -178,6 +177,9 @@
#![feature(is_ascii_octdigit)]
#![feature(isqrt)]
#![feature(maybe_uninit_uninit_array)]
+#![feature(non_null_convenience)]
+#![feature(offset_of)]
+#![feature(offset_of_enum)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
#![feature(set_ptr_value)]
@@ -219,6 +221,7 @@
#![feature(doc_cfg)]
#![feature(doc_cfg_hide)]
#![feature(doc_notable_trait)]
+#![feature(effects)]
#![feature(exhaustive_patterns)]
#![feature(extern_types)]
#![feature(fundamental)]
@@ -443,9 +446,10 @@ pub mod arch;
#[unstable(feature = "portable_simd", issue = "86656")]
mod core_simd;
-#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
#[unstable(feature = "portable_simd", issue = "86656")]
pub mod simd {
+ #![doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+
#[unstable(feature = "portable_simd", issue = "86656")]
pub use crate::core_simd::simd::*;
}
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 8a210c195..53e9a32e3 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -691,10 +691,7 @@ impl<T> MaybeUninit<T> {
/// // they both get dropped!
/// ```
#[stable(feature = "maybe_uninit_extra", since = "1.60.0")]
- #[rustc_const_stable(
- feature = "const_maybe_uninit_assume_init_read",
- since = "1.75.0"
- )]
+ #[rustc_const_stable(feature = "const_maybe_uninit_assume_init_read", since = "1.75.0")]
#[inline(always)]
#[track_caller]
pub const unsafe fn assume_init_read(&self) -> T {
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index eef214528..c1687abb7 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -10,7 +10,7 @@ use crate::cmp;
use crate::fmt;
use crate::hash;
use crate::intrinsics;
-use crate::marker::{Copy, DiscriminantKind, Sized};
+use crate::marker::DiscriminantKind;
use crate::ptr;
mod manually_drop;
@@ -1360,7 +1360,7 @@ impl<T> SizedTypeProperties for T {}
///
/// ```
/// #![feature(offset_of)]
-/// # #![cfg_attr(not(bootstrap), feature(offset_of_enum))]
+/// # #![feature(offset_of_enum)]
///
/// use std::mem;
/// #[repr(C)]
@@ -1390,12 +1390,9 @@ impl<T> SizedTypeProperties for T {}
/// B { one: u8, two: u16 },
/// }
///
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Enum, A.0), 1);
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Enum, B.two), 2);
///
-/// # #[cfg(not(bootstrap))]
/// assert_eq!(mem::offset_of!(Option<&u8>, Some.0), 0);
/// ```
#[unstable(feature = "offset_of", issue = "106655")]
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 77f85215d..8bf15c736 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -468,7 +468,13 @@ impl Ipv4Addr {
#[unstable(feature = "ip_bits", issue = "113744")]
pub const BITS: u32 = 32;
- /// Converts an IPv4 address into host byte order `u32`.
+ /// Converts an IPv4 address into a `u32` representation using native byte order.
+ ///
+ /// Although IPv4 addresses are big-endian, the `u32` value will use the target platform's
+ /// native byte order. That is, the `u32` value is an integer representation of the IPv4
+ /// address and not an integer interpretation of the IPv4 address's big-endian bitstring. This
+ /// means that the `u32` value masked with `0xffffff00` will set the last octet in the address
+ /// to 0, regardless of the target platform's endianness.
///
/// # Examples
///
@@ -479,6 +485,16 @@ impl Ipv4Addr {
/// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
/// assert_eq!(0x12345678, addr.to_bits());
/// ```
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv4Addr;
+ ///
+ /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78);
+ /// let addr_bits = addr.to_bits() & 0xffffff00;
+ /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x00), Ipv4Addr::from_bits(addr_bits));
+ ///
+ /// ```
#[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
#[unstable(feature = "ip_bits", issue = "113744")]
#[must_use]
@@ -487,7 +503,9 @@ impl Ipv4Addr {
u32::from_be_bytes(self.octets)
}
- /// Converts a host byte order `u32` into an IPv4 address.
+ /// Converts a native byte order `u32` into an IPv4 address.
+ ///
+ /// See [`Ipv4Addr::to_bits`] for an explanation on endianness.
///
/// # Examples
///
@@ -1224,7 +1242,13 @@ impl Ipv6Addr {
#[unstable(feature = "ip_bits", issue = "113744")]
pub const BITS: u32 = 128;
- /// Converts an IPv6 address into host byte order `u128`.
+ /// Converts an IPv6 address into a `u128` representation using native byte order.
+ ///
+ /// Although IPv6 addresses are big-endian, the `u128` value will use the target platform's
+ /// native byte order. That is, the `u128` value is an integer representation of the IPv6
+ /// address and not an integer interpretation of the IPv6 address's big-endian bitstring. This
+ /// means that the `u128` value masked with `0xffffffffffffffffffffffffffff0000_u128` will set
+ /// the last segment in the address to 0, regardless of the target platform's endianness.
///
/// # Examples
///
@@ -1238,6 +1262,24 @@ impl Ipv6Addr {
/// );
/// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
/// ```
+ ///
+ /// ```
+ /// #![feature(ip_bits)]
+ /// use std::net::Ipv6Addr;
+ ///
+ /// let addr = Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
+ /// );
+ /// let addr_bits = addr.to_bits() & 0xffffffffffffffffffffffffffff0000_u128;
+ /// assert_eq!(
+ /// Ipv6Addr::new(
+ /// 0x1020, 0x3040, 0x5060, 0x7080,
+ /// 0x90A0, 0xB0C0, 0xD0E0, 0x0000,
+ /// ),
+ /// Ipv6Addr::from_bits(addr_bits));
+ ///
+ /// ```
#[rustc_const_unstable(feature = "ip_bits", issue = "113744")]
#[unstable(feature = "ip_bits", issue = "113744")]
#[must_use]
@@ -1246,7 +1288,9 @@ impl Ipv6Addr {
u128::from_be_bytes(self.octets)
}
- /// Converts a host byte order `u128` into an IPv6 address.
+ /// Converts a native byte order `u128` into an IPv6 address.
+ ///
+ /// See [`Ipv6Addr::to_bits`] for an explanation on endianness.
///
/// # Examples
///
@@ -1393,7 +1437,7 @@ impl Ipv6Addr {
/// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified))
/// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback))
/// - IPv4-mapped addresses
- /// - Addresses reserved for benchmarking
+ /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv6Addr::is_benchmarking))
/// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation))
/// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local))
/// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local))
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index f60626b00..709eba2ff 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -1424,9 +1424,17 @@ impl f32 {
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
- /// # assert!(bois.into_iter().map(|b| b.weight)
- /// # .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
- /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ ///
+ /// // `f32::NAN` could be positive or negative, which will affect the sort order.
+ /// if f32::NAN.is_sign_negative() {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([f32::NAN, -5.0, 0.1, 10.0, 99.0, f32::INFINITY].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// } else {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// }
/// ```
#[stable(feature = "total_cmp", since = "1.62.0")]
#[must_use]
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 0a87021d8..73fa61574 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -1422,9 +1422,17 @@ impl f64 {
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
- /// # assert!(bois.into_iter().map(|b| b.weight)
- /// # .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
- /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ ///
+ /// // `f64::NAN` could be positive or negative, which will affect the sort order.
+ /// if f64::NAN.is_sign_negative() {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([f64::NAN, -5.0, 0.1, 10.0, 99.0, f64::INFINITY].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// } else {
+ /// assert!(bois.into_iter().map(|b| b.weight)
+ /// .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
+ /// .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// }
/// ```
#[stable(feature = "total_cmp", since = "1.62.0")]
#[must_use]
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 2a0b31404..695e87aaa 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -474,7 +474,7 @@ impl isize {
}
}
-/// If 6th bit is set ascii is lower case.
+/// If the 6th bit is set ascii is lower case.
const ASCII_CASE_MASK: u8 = 0b0010_0000;
impl u8 {
@@ -549,7 +549,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
#[inline]
pub const fn to_ascii_uppercase(&self) -> u8 {
- // Toggle the fifth bit if this is a lowercase letter
+ // Toggle the 6th bit if this is a lowercase letter
*self ^ ((self.is_ascii_lowercase() as u8) * ASCII_CASE_MASK)
}
@@ -574,7 +574,7 @@ impl u8 {
#[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.52.0")]
#[inline]
pub const fn to_ascii_lowercase(&self) -> u8 {
- // Set the fifth bit if this is an uppercase letter
+ // Set the 6th bit if this is an uppercase letter
*self | (self.is_ascii_uppercase() as u8 * ASCII_CASE_MASK)
}
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 7f8d673c1..f5ecf501c 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -75,12 +75,12 @@ macro_rules! nonzero_integers {
#[must_use]
#[inline]
pub const unsafe fn new_unchecked(n: $Int) -> Self {
+ crate::panic::debug_assert_nounwind!(
+ n != 0,
+ concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument")
+ );
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
- core::intrinsics::assert_unsafe_precondition!(
- concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument"),
- (n: $Int) => n != 0
- );
Self(n)
}
}
@@ -353,8 +353,13 @@ macro_rules! nonzero_unsigned_operations {
#[inline]
pub const fn checked_add(self, other: $Int) -> Option<$Ty> {
if let Some(result) = self.get().checked_add(other) {
- // SAFETY: $Int::checked_add returns None on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_add` returns `None` on overflow
+ // - `self` is non-zero
+ // - the only way to get zero from an addition without overflow is for both
+ // sides to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -386,8 +391,13 @@ macro_rules! nonzero_unsigned_operations {
without modifying the original"]
#[inline]
pub const fn saturating_add(self, other: $Int) -> $Ty {
- // SAFETY: $Int::saturating_add returns $Int::MAX on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_add` returns `u*::MAX` on overflow, which is non-zero
+ // - `self` is non-zero
+ // - the only way to get zero from an addition without overflow is for both
+ // sides to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_add(other)) }
}
@@ -1000,9 +1010,13 @@ macro_rules! nonzero_unsigned_signed_operations {
#[inline]
pub const fn checked_mul(self, other: $Ty) -> Option<$Ty> {
if let Some(result) = self.get().checked_mul(other.get()) {
- // SAFETY: checked_mul returns None on overflow
- // and `other` is also non-null
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_mul` returns `None` on overflow
+ // - `self` and `other` are non-zero
+ // - the only way to get zero from a multiplication without overflow is for one
+ // of the sides to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -1034,9 +1048,14 @@ macro_rules! nonzero_unsigned_signed_operations {
without modifying the original"]
#[inline]
pub const fn saturating_mul(self, other: $Ty) -> $Ty {
- // SAFETY: saturating_mul returns u*::MAX on overflow
- // and `other` is also non-null
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_mul` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
+ // all of which are non-zero
+ // - `self` and `other` are non-zero
+ // - the only way to get zero from a multiplication without overflow is for one
+ // of the sides to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_mul(other.get())) }
}
@@ -1107,8 +1126,13 @@ macro_rules! nonzero_unsigned_signed_operations {
#[inline]
pub const fn checked_pow(self, other: u32) -> Option<$Ty> {
if let Some(result) = self.get().checked_pow(other) {
- // SAFETY: checked_pow returns None on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `checked_pow` returns `None` on overflow/underflow
+ // - `self` is non-zero
+ // - the only way to get zero from an exponentiation without overflow is
+ // for base to be zero
+ //
+ // So the result cannot be zero.
Some(unsafe { $Ty::new_unchecked(result) })
} else {
None
@@ -1149,8 +1173,14 @@ macro_rules! nonzero_unsigned_signed_operations {
without modifying the original"]
#[inline]
pub const fn saturating_pow(self, other: u32) -> $Ty {
- // SAFETY: saturating_pow returns u*::MAX on overflow
- // so the result cannot be zero.
+ // SAFETY:
+ // - `saturating_pow` returns `u*::MAX`/`i*::MAX`/`i*::MIN` on overflow/underflow,
+ // all of which are non-zero
+ // - `self` is non-zero
+ // - the only way to get zero from an exponentiation without overflow is
+ // for base to be zero
+ //
+ // So the result cannot be zero.
unsafe { $Ty::new_unchecked(self.get().saturating_pow(other)) }
}
}
diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs
index 840c8cd2f..1773fdbf3 100644
--- a/library/core/src/ops/arith.rs
+++ b/library/core/src/ops/arith.rs
@@ -98,6 +98,7 @@ macro_rules! add_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn add(self, other: $t) -> $t { self + other }
}
@@ -206,6 +207,7 @@ macro_rules! sub_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn sub(self, other: $t) -> $t { self - other }
}
@@ -335,6 +337,7 @@ macro_rules! mul_impl {
type Output = $t;
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn mul(self, other: $t) -> $t { self * other }
}
@@ -474,6 +477,7 @@ macro_rules! div_impl_integer {
type Output = $t;
#[inline]
+ #[track_caller]
fn div(self, other: $t) -> $t { self / other }
}
@@ -575,6 +579,7 @@ macro_rules! rem_impl_integer {
type Output = $t;
#[inline]
+ #[track_caller]
fn rem(self, other: $t) -> $t { self % other }
}
@@ -749,6 +754,7 @@ macro_rules! add_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl AddAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn add_assign(&mut self, other: $t) { *self += other }
}
@@ -815,6 +821,7 @@ macro_rules! sub_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl SubAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn sub_assign(&mut self, other: $t) { *self -= other }
}
@@ -872,6 +879,7 @@ macro_rules! mul_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl MulAssign for $t {
#[inline]
+ #[track_caller]
#[rustc_inherit_overflow_checks]
fn mul_assign(&mut self, other: $t) { *self *= other }
}
@@ -929,6 +937,7 @@ macro_rules! div_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl DivAssign for $t {
#[inline]
+ #[track_caller]
fn div_assign(&mut self, other: $t) { *self /= other }
}
@@ -989,6 +998,7 @@ macro_rules! rem_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl RemAssign for $t {
#[inline]
+ #[track_caller]
fn rem_assign(&mut self, other: $t) { *self %= other }
}
diff --git a/library/core/src/ops/coroutine.rs b/library/core/src/ops/coroutine.rs
index cd5ca988f..e58c9068a 100644
--- a/library/core/src/ops/coroutine.rs
+++ b/library/core/src/ops/coroutine.rs
@@ -1,4 +1,3 @@
-use crate::marker::Unpin;
use crate::pin::Pin;
/// The result of a coroutine resumption.
@@ -7,8 +6,7 @@ use crate::pin::Pin;
/// possible return values of a coroutine. Currently this corresponds to either
/// a suspension point (`Yielded`) or a termination point (`Complete`).
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
-#[cfg_attr(bootstrap, lang = "generator_state")]
-#[cfg_attr(not(bootstrap), lang = "coroutine_state")]
+#[lang = "coroutine_state"]
#[unstable(feature = "coroutine_trait", issue = "43122")]
pub enum CoroutineState<Y, R> {
/// The coroutine suspended with a value.
@@ -40,8 +38,7 @@ pub enum CoroutineState<Y, R> {
/// closure-like:
///
/// ```rust
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(coroutine_trait)]
///
/// use std::ops::{Coroutine, CoroutineState};
@@ -68,8 +65,7 @@ pub enum CoroutineState<Y, R> {
///
/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
/// [unstable book]: ../../unstable-book/language-features/coroutines.html
-#[cfg_attr(bootstrap, lang = "generator")]
-#[cfg_attr(not(bootstrap), lang = "coroutine")]
+#[lang = "coroutine"]
#[unstable(feature = "coroutine_trait", issue = "43122")]
#[fundamental]
pub trait Coroutine<R = ()> {
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 51e304dd7..3a3d3fcf1 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -56,7 +56,7 @@ use crate::marker::Tuple;
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -69,7 +69,7 @@ use crate::marker::Tuple;
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -143,7 +143,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -156,7 +156,7 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
@@ -222,7 +222,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
-#[cfg_attr(not(bootstrap), rustc_on_unimplemented(
+#[rustc_on_unimplemented(
on(
Args = "()",
note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
@@ -235,7 +235,7 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
),
message = "expected a `{Trait}` closure, found `{Self}`",
label = "expected an `{Trait}` closure, found `{Self}`"
-))]
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
// FIXME(effects) #[const_trait]
diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs
index 265022a39..743799c4b 100644
--- a/library/core/src/ops/index_range.rs
+++ b/library/core/src/ops/index_range.rs
@@ -1,4 +1,4 @@
-use crate::intrinsics::{assert_unsafe_precondition, unchecked_add, unchecked_sub};
+use crate::intrinsics::{unchecked_add, unchecked_sub};
use crate::iter::{FusedIterator, TrustedLen};
use crate::num::NonZeroUsize;
@@ -19,13 +19,10 @@ impl IndexRange {
/// - `start <= end`
#[inline]
pub const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
- // SAFETY: comparisons on usize are pure
- unsafe {
- assert_unsafe_precondition!(
- "IndexRange::new_unchecked requires `start <= end`",
- (start: usize, end: usize) => start <= end
- )
- };
+ crate::panic::debug_assert_nounwind!(
+ start <= end,
+ "IndexRange::new_unchecked requires `start <= end`"
+ );
IndexRange { start, end }
}
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 89d4532de..ff4353492 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -119,7 +119,7 @@
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
-//! [`Option<T>`] has the same size and alignment as `T`. In some
+//! [`Option<T>`] has the same size, alignment, and [function call ABI] as `T`. In some
//! of these cases, Rust further guarantees that
//! `transmute::<_, Option<T>>([0u8; size_of::<T>()])` is sound and
//! produces `Option::<T>::None`. These cases are identified by the
@@ -127,7 +127,7 @@
//!
//! | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
//! |---------------------------------------------------------------------|----------------------------------------------------------------------|
-//! | [`Box<U>`] | when `U: Sized` |
+//! | [`Box<U>`] (specifically, only `Box<U, Global>`) | when `U: Sized` |
//! | `&U` | when `U: Sized` |
//! | `&mut U` | when `U: Sized` |
//! | `fn`, `extern "C" fn`[^extern_fn] | always |
@@ -135,11 +135,12 @@
//! | [`ptr::NonNull<U>`] | when `U: Sized` |
//! | `#[repr(transparent)]` struct around one of the types in this list. | when it holds for the inner type |
//!
-//! [^extern_fn]: this remains true for any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
+//! [^extern_fn]: this remains true for any argument/return types and any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
//!
//! [`Box<U>`]: ../../std/boxed/struct.Box.html
//! [`num::NonZero*`]: crate::num
//! [`ptr::NonNull<U>`]: crate::ptr::NonNull
+//! [function call ABI]: ../primitive.fn.html#abi-compatibility
//!
//! This is called the "null pointer optimization" or NPO.
//!
@@ -779,7 +780,7 @@ impl<T> Option<T> {
// `None` case it's just padding).
unsafe {
slice::from_raw_parts(
- crate::intrinsics::option_payload_ptr(crate::ptr::from_ref(self)),
+ (self as *const Self).byte_add(core::mem::offset_of!(Self, Some.0)).cast(),
usize::from(self.is_some()),
)
}
@@ -835,8 +836,7 @@ impl<T> Option<T> {
// the `None` case it's just padding).
unsafe {
slice::from_raw_parts_mut(
- crate::intrinsics::option_payload_ptr(crate::ptr::from_mut(self).cast_const())
- .cast_mut(),
+ (self as *mut Self).byte_add(core::mem::offset_of!(Self, Some.0)).cast(),
usize::from(self.is_some()),
)
}
@@ -1079,8 +1079,6 @@ impl<T> Option<T> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// let v = vec![1, 2, 3, 4, 5];
///
/// // prints "got: 4"
@@ -1090,11 +1088,8 @@ impl<T> Option<T> {
/// let x: Option<&usize> = v.get(5).inspect(|x| println!("got: {x}"));
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
- pub fn inspect<F>(self, f: F) -> Self
- where
- F: FnOnce(&T),
- {
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
+ pub fn inspect<F: FnOnce(&T)>(self, f: F) -> Self {
if let Some(ref x) = self {
f(x);
}
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index a00fd322b..4ca5af1ea 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -47,7 +47,7 @@ pub macro panic_2015 {
#[allow_internal_unstable(core_panic, const_format_args)]
#[rustc_diagnostic_item = "core_panic_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
-#[cfg(any(bootstrap, feature = "panic_immediate_abort"))]
+#[cfg(feature = "panic_immediate_abort")]
pub macro panic_2021 {
() => (
$crate::panicking::panic("explicit panic")
@@ -75,7 +75,7 @@ pub macro panic_2021 {
)]
#[rustc_diagnostic_item = "core_panic_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
-#[cfg(not(any(bootstrap, feature = "panic_immediate_abort")))]
+#[cfg(not(feature = "panic_immediate_abort"))]
pub macro panic_2021 {
() => ({
// Create a function so that the argument for `track_caller`
@@ -139,6 +139,32 @@ pub macro unreachable_2021 {
),
}
+/// Asserts that a boolean expression is `true`, and perform a non-unwinding panic otherwise.
+///
+/// This macro is similar to `debug_assert!`, but is intended to be used in code that should not
+/// unwind. For example, checks in `_unchecked` functions that are intended for debugging but should
+/// not compromise unwind safety.
+#[doc(hidden)]
+#[unstable(feature = "core_panic", issue = "none")]
+#[allow_internal_unstable(core_panic, const_format_args)]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro debug_assert_nounwind {
+ ($cond:expr $(,)?) => {
+ if $crate::cfg!(debug_assertions) {
+ if !$cond {
+ $crate::panicking::panic_nounwind($crate::concat!("assertion failed: ", $crate::stringify!($cond)));
+ }
+ }
+ },
+ ($cond:expr, $($arg:tt)+) => {
+ if $crate::cfg!(debug_assertions) {
+ if !$cond {
+ $crate::panicking::panic_nounwind_fmt($crate::const_format_args!($($arg)+), false);
+ }
+ }
+ },
+}
+
/// An internal trait used by std to pass data from std to `panic_unwind` and
/// other panic runtimes. Not intended to be stabilized any time soon, do not
/// use.
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 39a5e8d9f..1b6e77b96 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -82,28 +82,45 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
// and unwinds anyway, we will hit the "unwinding out of nounwind function" guard,
// which causes a "panic in a function that cannot unwind".
#[rustc_nounwind]
-pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
- if cfg!(feature = "panic_immediate_abort") {
- super::intrinsics::abort()
- }
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
+ #[inline] // this should always be inlined into `panic_nounwind_fmt`
+ #[track_caller]
+ fn runtime(fmt: fmt::Arguments<'_>, force_no_backtrace: bool) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
- // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
- // that gets resolved to the `#[panic_handler]` function.
- extern "Rust" {
- #[lang = "panic_impl"]
- fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ // PanicInfo with the `can_unwind` flag set to false forces an abort.
+ let pi = PanicInfo::internal_constructor(
+ Some(&fmt),
+ Location::caller(),
+ /* can_unwind */ false,
+ force_no_backtrace,
+ );
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
}
- // PanicInfo with the `can_unwind` flag set to false forces an abort.
- let pi = PanicInfo::internal_constructor(
- Some(&fmt),
- Location::caller(),
- /* can_unwind */ false,
- force_no_backtrace,
- );
+ #[inline]
+ #[track_caller]
+ const fn comptime(fmt: fmt::Arguments<'_>, _force_no_backtrace: bool) -> ! {
+ // We don't unwind anyway at compile-time so we can call the regular `panic_fmt`.
+ panic_fmt(fmt);
+ }
- // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
- unsafe { panic_impl(&pi) }
+ // SAFETY: const panic does not care about unwinding
+ unsafe {
+ super::intrinsics::const_eval_select((fmt, force_no_backtrace), comptime, runtime);
+ }
}
// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions
@@ -127,12 +144,14 @@ pub const fn panic(expr: &'static str) -> ! {
panic_fmt(fmt::Arguments::new_const(&[expr]));
}
-/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize.
+/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize on the caller.
+/// If you want `#[track_caller]` for nicer errors, call `panic_nounwind_fmt` directly.
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics
#[rustc_nounwind]
-pub fn panic_nounwind(expr: &'static str) -> ! {
+#[rustc_const_unstable(feature = "core_panic", issue = "none")]
+pub const fn panic_nounwind(expr: &'static str) -> ! {
panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]), /* force_no_backtrace */ false);
}
@@ -170,9 +189,8 @@ pub fn unreachable_display<T: fmt::Display>(x: &T) -> ! {
#[inline]
#[track_caller]
#[rustc_do_not_const_check] // hooked by const-eval
-#[cfg_attr(bootstrap, lang = "panic_display")]
// enforce a &&str argument in const-check and hook this by const-eval
-#[cfg_attr(not(bootstrap), rustc_const_panic_str)]
+#[rustc_const_panic_str]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
@@ -190,8 +208,8 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
panic!("index out of bounds: the len is {len} but the index is {index}")
}
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_misaligned_pointer_dereference"] // needed by codegen for panic on misaligned pointer deref
#[rustc_nounwind] // `CheckAlignment` MIR pass requires this function to never unwind
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index bca97d4ee..7d8c881ea 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -381,10 +381,9 @@
#![stable(feature = "pin", since = "1.33.0")]
-use crate::cmp::{self, PartialEq, PartialOrd};
+use crate::cmp;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::marker::{Sized, Unpin};
use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Receiver};
/// A pinned pointer.
@@ -1088,8 +1087,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### With `Coroutine`s
///
/// ```rust
-/// #![cfg_attr(bootstrap, feature(generators))]
-/// #![cfg_attr(not(bootstrap), feature(coroutines))]
+/// #![feature(coroutines)]
/// #![feature(coroutine_trait)]
/// use core::{
/// ops::{Coroutine, CoroutineState},
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 87e492108..99208fba6 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -330,7 +330,7 @@ mod prim_never {}
/// the future ("reserved"); some will never be a character ("noncharacters"); and some may be given
/// different meanings by different users ("private use").
///
-/// `char` is guaranteed to have the same size and alignment as `u32` on all
+/// `char` is guaranteed to have the same size, alignment, and function call ABI as `u32` on all
/// platforms.
/// ```
/// use std::alloc::Layout;
@@ -1493,7 +1493,7 @@ mod prim_ref {}
///
/// ### Casting to and from integers
///
-/// You cast function pointers directly to integers:
+/// You can cast function pointers directly to integers:
///
/// ```rust
/// let fnptr: fn(i32) -> i32 = |x| x+2;
@@ -1519,9 +1519,116 @@ mod prim_ref {}
/// Note that all of this is not portable to platforms where function pointers and data pointers
/// have different sizes.
///
+/// ### ABI compatibility
+///
+/// Generally, when a function is declared with one signature and called via a function pointer with
+/// a different signature, the two signatures must be *ABI-compatible* or else calling the function
+/// via that function pointer is Undefined Behavior. ABI compatibility is a lot stricter than merely
+/// having the same memory layout; for example, even if `i32` and `f32` have the same size and
+/// alignment, they might be passed in different registers and hence not be ABI-compatible.
+///
+/// ABI compatibility as a concern only arises in code that alters the type of function pointers,
+/// code that imports functions via `extern` blocks, and in code that combines `#[target_feature]`
+/// with `extern fn`. Altering the type of function pointers is wildly unsafe (as in, a lot more
+/// unsafe than even [`transmute_copy`][mem::transmute_copy]), and should only occur in the most
+/// exceptional circumstances. Most Rust code just imports functions via `use`. `#[target_feature]`
+/// is also used rarely. So, most likely you do not have to worry about ABI compatibility.
+///
+/// But assuming such circumstances, what are the rules? For this section, we are only considering
+/// the ABI of direct Rust-to-Rust calls, not linking in general -- once functions are imported via
+/// `extern` blocks, there are more things to consider that we do not go into here.
+///
+/// For two signatures to be considered *ABI-compatible*, they must use a compatible ABI string,
+/// must take the same number of arguments, the individual argument types and the return types must
+/// be ABI-compatible, and the target feature requirements must be met (see the subsection below for
+/// the last point). The ABI string is declared via `extern "ABI" fn(...) -> ...`; note that
+/// `fn name(...) -> ...` implicitly uses the `"Rust"` ABI string and `extern fn name(...) -> ...`
+/// implicitly uses the `"C"` ABI string.
+///
+/// The ABI strings are guaranteed to be compatible if they are the same, or if the caller ABI
+/// string is `$X-unwind` and the callee ABI string is `$X`, where `$X` is one of the following:
+/// "C", "aapcs", "fastcall", "stdcall", "system", "sysv64", "thiscall", "vectorcall", "win64".
+///
+/// The following types are guaranteed to be ABI-compatible:
+///
+/// - `*const T`, `*mut T`, `&T`, `&mut T`, `Box<T>` (specifically, only `Box<T, Global>`), and
+/// `NonNull<T>` are all ABI-compatible with each other for all `T`. They are also ABI-compatible
+/// with each other for _different_ `T` if they have the same metadata type (`<T as
+/// Pointee>::Metadata`).
+/// - `usize` is ABI-compatible with the `uN` integer type of the same size, and likewise `isize` is
+/// ABI-compatible with the `iN` integer type of the same size.
+/// - `char` is ABI-compatible with `u32`.
+/// - Any two `fn` (function pointer) types are ABI-compatible with each other if they have the same
+/// ABI string or the ABI string only differs in a trailing `-unwind`, independent of the rest of
+/// their signature. (This means you can pass `fn()` to a function expecting `fn(i32)`, and the
+/// call will be valid ABI-wise. The callee receives the result of transmuting the function pointer
+/// from `fn()` to `fn(i32)`; that transmutation is itself a well-defined operation, it's just
+/// almost certainly UB to later call that function pointer.)
+/// - Any two types with size 0 and alignment 1 are ABI-compatible.
+/// - A `repr(transparent)` type `T` is ABI-compatible with its unique non-trivial field, i.e., the
+/// unique field that doesn't have size 0 and alignment 1 (if there is such a field).
+/// - `i32` is ABI-compatible with `NonZeroI32`, and similar for all other integer types with their
+/// matching `NonZero*` type.
+/// - If `T` is guaranteed to be subject to the [null pointer
+/// optimization](option/index.html#representation), then `T` and `Option<T>` are ABI-compatible.
+///
+/// Furthermore, ABI compatibility satisfies the following general properties:
+///
+/// - Every type is ABI-compatible with itself.
+/// - If `T1` and `T2` are ABI-compatible and `T2` and `T3` are ABI-compatible, then so are `T1` and
+/// `T3` (i.e., ABI-compatibility is transitive).
+/// - If `T1` and `T2` are ABI-compatible, then so are `T2` and `T1` (i.e., ABI-compatibility is
+/// symmetric).
+///
+/// More signatures can be ABI-compatible on specific targets, but that should not be relied upon
+/// since it is not portable and not a stable guarantee.
+///
+/// Noteworthy cases of types *not* being ABI-compatible in general are:
+/// * `bool` vs `u8`, `i32` vs `u32`, `char` vs `i32`: on some targets, the calling conventions for
+/// these types differ in terms of what they guarantee for the remaining bits in the register that
+/// are not used by the value.
+/// * `i32` vs `f32` are not compatible either, as has already been mentioned above.
+/// * `struct Foo(u32)` and `u32` are not compatible (without `repr(transparent)`) since structs are
+/// aggregate types and often passed in a different way than primitives like `i32`.
+///
+/// Note that these rules describe when two completely known types are ABI-compatible. When
+/// considering ABI compatibility of a type declared in another crate (including the standard
+/// library), consider that any type that has a private field or the `#[non_exhaustive]` attribute
+/// may change its layout as a non-breaking update unless documented otherwise -- so for instance,
+/// even if such a type is a 1-ZST or `repr(transparent)` right now, this might change with any
+/// library version bump.
+///
+/// If the declared signature and the signature of the function pointer are ABI-compatible, then the
+/// function call behaves as if every argument was [`transmute`d][mem::transmute] from the
+/// type in the function pointer to the type at the function declaration, and the return value is
+/// [`transmute`d][mem::transmute] from the type in the declaration to the type in the
+/// pointer. All the usual caveats and concerns around transmutation apply; for instance, if the
+/// function expects a `NonNullI32` and the function pointer uses the ABI-compatible type
+/// `Option<NonNullI32>`, and the value used for the argument is `None`, then this call is Undefined
+/// Behavior since transmuting `None::<NonNullI32>` to `NonNullI32` violates the non-null
+/// requirement.
+///
+/// #### Requirements concerning target features
+///
+/// Under some conditions, the signature used by the caller and the callee can be ABI-incompatible
+/// even if the exact same ABI string and types are being used. As an example, the
+/// `std::arch::x86_64::__m256` type has a different `extern "C"` ABI when the `avx` feature is
+/// enabled vs when it is not enabled.
+///
+/// Therefore, to ensure ABI compatibility when code using different target features is combined
+/// (such as via `#[target_feature]`), we further require that one of the following conditions is
+/// met:
+///
+/// - The function uses the `"Rust"` ABI string (which is the default without `extern`).
+/// - Caller and callee are using the exact same set of target features. For the callee we consider
+/// the features enabled (via `#[target_feature]` and `-C target-feature`/`-C target-cpu`) at the
+/// declaration site; for the caller we consider the features enabled at the call site.
+/// - Neither any argument nor the return value involves a SIMD type (`#[repr(simd)]`) that is not
+/// behind a pointer indirection (i.e., `*mut __m256` is fine, but `(i32, __m256)` is not).
+///
/// ### Trait implementations
///
-/// In this documentation the shorthand `fn (T₁, T₂, …, Tₙ)` is used to represent non-variadic
+/// In this documentation the shorthand `fn(T₁, T₂, …, Tₙ)` is used to represent non-variadic
/// function pointers of varying length. Note that this is a convenience notation to avoid
/// repetitive documentation, not valid Rust syntax.
///
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index bbf7199ff..ce176e6fc 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -1,5 +1,4 @@
use crate::convert::{TryFrom, TryInto};
-use crate::intrinsics::assert_unsafe_precondition;
use crate::num::NonZeroUsize;
use crate::{cmp, fmt, hash, mem, num};
@@ -42,6 +41,7 @@ impl Alignment {
/// This provides the same numerical value as [`mem::align_of`],
/// but in an `Alignment` instead of a `usize`.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn of<T>() -> Self {
// SAFETY: rustc ensures that type alignment is always a power of two.
@@ -53,6 +53,7 @@ impl Alignment {
///
/// Note that `0` is not a power of two, nor a valid alignment.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
@@ -75,13 +76,10 @@ impl Alignment {
#[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const unsafe fn new_unchecked(align: usize) -> Self {
- // SAFETY: Precondition passed to the caller.
- unsafe {
- assert_unsafe_precondition!(
- "Alignment::new_unchecked requires a power of two",
- (align: usize) => align.is_power_of_two()
- )
- };
+ crate::panic::debug_assert_nounwind!(
+ align.is_power_of_two(),
+ "Alignment::new_unchecked requires a power of two"
+ );
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
@@ -98,6 +96,7 @@ impl Alignment {
/// Returns the alignment as a [`NonZeroUsize`]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn as_nonzero(self) -> NonZeroUsize {
// SAFETY: All the discriminants are non-zero.
@@ -118,10 +117,42 @@ impl Alignment {
/// assert_eq!(Alignment::new(1024).unwrap().log2(), 10);
/// ```
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
- pub fn log2(self) -> u32 {
+ pub const fn log2(self) -> u32 {
self.as_nonzero().trailing_zeros()
}
+
+ /// Returns a bit mask that can be used to match this alignment.
+ ///
+ /// This is equivalent to `!(self.as_usize() - 1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// #![feature(ptr_mask)]
+ /// use std::ptr::{Alignment, NonNull};
+ ///
+ /// #[repr(align(1))] struct Align1(u8);
+ /// #[repr(align(2))] struct Align2(u16);
+ /// #[repr(align(4))] struct Align4(u32);
+ /// let one = <NonNull<Align1>>::dangling().as_ptr();
+ /// let two = <NonNull<Align2>>::dangling().as_ptr();
+ /// let four = <NonNull<Align4>>::dangling().as_ptr();
+ ///
+ /// assert_eq!(four.mask(Alignment::of::<Align1>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align2>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align4>().mask()), four);
+ /// assert_ne!(one.mask(Alignment::of::<Align4>().mask()), one);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn mask(self) -> usize {
+ // SAFETY: The alignment is always nonzero, and therefore decrementing won't overflow.
+ !(unsafe { self.as_usize().unchecked_sub(1) })
+ }
}
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
@@ -193,6 +224,14 @@ impl hash::Hash for Alignment {
}
}
+/// Returns [`Alignment::MIN`], which is valid for any type.
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl Default for Alignment {
+ fn default() -> Alignment {
+ Alignment::MIN
+ }
+}
+
#[cfg(target_pointer_width = "16")]
type AlignmentEnum = AlignmentEnum16;
#[cfg(target_pointer_width = "32")]
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 36685f756..a444c30c7 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,7 +1,7 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
-use crate::mem::{self, SizedTypeProperties};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
+use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *const T {
@@ -186,10 +186,10 @@ impl<T: ?Sized> *const T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -219,7 +219,8 @@ impl<T: ?Sized> *const T {
/// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`addr`][pointer::addr] wherever possible.
@@ -230,13 +231,13 @@ impl<T: ?Sized> *const T {
/// side-effect which is required for [`from_exposed_addr`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -1367,10 +1368,16 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1644,6 +1651,24 @@ impl<T> *const [T] {
metadata(self)
}
+ /// Returns `true` if the raw slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_len)]
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert!(!slice.is_empty());
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn is_empty(self) -> bool {
+ self.len() == 0
+ }
+
/// Returns a raw pointer to the slice's buffer.
///
/// This is equivalent to casting `self` to `*const T`, but more type-safe.
@@ -1747,6 +1772,7 @@ impl<T> *const [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*const T) -> bool {
*self == *other
}
@@ -1759,6 +1785,7 @@ impl<T: ?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
@@ -1778,21 +1805,25 @@ impl<T: ?Sized> PartialOrd for *const T {
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*const T) -> bool {
*self < *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*const T) -> bool {
*self <= *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*const T) -> bool {
*self > *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*const T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index d71079dd0..390e07371 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -312,22 +312,30 @@
//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits
//! that and should support it.
//!
-//! ## Pointer-usize-pointer roundtrips and 'exposed' provenance
+//! ## Exposed Provenance
//!
-//! **This section is *non-normative* and is part of the [Strict Provenance] experiment.**
+//! **This section is *non-normative* and is an extension to the [Strict Provenance] experiment.**
//!
//! As discussed above, pointer-usize-pointer roundtrips are not possible under [Strict Provenance].
-//! However, there exists legacy Rust code that is full of such roundtrips, and legacy platform APIs
-//! regularly assume that `usize` can capture all the information that makes up a pointer. There
-//! also might be code that cannot be ported to Strict Provenance (which is something we would [like
-//! to hear about][Strict Provenance]).
-//!
-//! For situations like this, there is a fallback plan, a way to 'opt out' of Strict Provenance.
-//! However, note that this makes your code a lot harder to specify, and the code will not work
-//! (well) with tools like [Miri] and [CHERI].
-//!
-//! This fallback plan is provided by the [`expose_addr`] and [`from_exposed_addr`] methods (which
-//! are equivalent to `as` casts between pointers and integers). [`expose_addr`] is a lot like
+//! This is by design: the goal of Strict Provenance is to provide a clear specification that we are
+//! confident can be formalized unambiguously and can be subject to precise formal reasoning.
+//!
+//! However, there exist situations where pointer-usize-pointer roundtrips cannot be avoided, or
+//! where avoiding them would require major refactoring. Legacy platform APIs also regularly assume
+//! that `usize` can capture all the information that makes up a pointer. The goal of Strict
+//! Provenance is not to rule out such code; the goal is to put all the *other* pointer-manipulating
+//! code onto a more solid foundation. Strict Provenance is about improving the situation where
+//! possible (all the code that can be written with Strict Provenance) without making things worse
+//! for situations where Strict Provenance is insufficient.
+//!
+//! For these situations, there is a highly experimental extension to Strict Provenance called
+//! *Exposed Provenance*. This extension permits pointer-usize-pointer roundtrips. However, its
+//! semantics are on much less solid footing than Strict Provenance, and at this point it is not yet
+//! clear where a satisfying unambiguous semantics can be defined for Exposed Provenance.
+//! Furthermore, Exposed Provenance will not work (well) with tools like [Miri] and [CHERI].
+//!
+//! Exposed Provenance is provided by the [`expose_addr`] and [`from_exposed_addr`] methods, which
+//! are meant to replace `as` casts between pointers and integers. [`expose_addr`] is a lot like
//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed'
//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but
//! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`]
@@ -341,10 +349,11 @@
//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will
//! be used, the program has undefined behavior.
//!
-//! Using [`expose_addr`] or [`from_exposed_addr`] (or the equivalent `as` casts) means that code is
+//! Using [`expose_addr`] or [`from_exposed_addr`] (or the `as` casts) means that code is
//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to
-//! determine whether it is possible to use Rust without [`expose_addr`] and [`from_exposed_addr`].
-//! If this is successful, it would be a major win for avoiding specification complexity and to
+//! determine how far one can get in Rust without the use of [`expose_addr`] and
+//! [`from_exposed_addr`], and to encourage code to be written with Strict Provenance APIs only.
+//! Maximizing the amount of such code is a major win for avoiding specification complexity and to
//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the
//! confidence in (unsafe) Rust code.
//!
@@ -619,12 +628,12 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr],
-/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract
-/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory
-/// is disjoint from memory that will be used by the abstract machine such as the stack, heap,
-/// and statics.
+/// This is a more rigorously specified alternative to `addr as *const T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously exposed by passing it to
+/// [`expose_addr`][pointer::expose_addr], or a `ptr as usize` cast. In addition, memory which is
+/// outside the control of the Rust abstract machine (MMIO registers, for example) is always
+/// considered to be exposed, so long as this memory is disjoint from memory that will be used by
+/// the abstract machine such as the stack, heap, and statics.
///
/// If there is no 'exposed' provenance that justifies the way this pointer will be used,
/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
@@ -639,7 +648,8 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -649,13 +659,13 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -666,18 +676,20 @@ where
/// Convert an address back to a mutable pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *mut T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
-/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
-/// used, the program has undefined behavior. Note that there is no algorithm that decides which
-/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
-/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
-/// behavior, then that is the guess that will be taken.
+/// This is a more rigorously specified alternative to `addr as *mut T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously passed to
+/// [`expose_addr`][pointer::expose_addr] or a `ptr as usize` cast. If there is no previously
+/// 'exposed' provenance that justifies the way this pointer will be used, the program has undefined
+/// behavior. Note that there is no algorithm that decides which provenance will be used. You can
+/// think of this as "guessing" the right provenance, and the guess will be "maximally in your
+/// favor", in the sense that if there is any way to avoid undefined behavior, then that is the
+/// guess that will be taken.
///
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -687,13 +699,13 @@ where
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -708,7 +720,8 @@ where
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
#[rustc_never_returns_null_ptr]
#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
@@ -721,7 +734,9 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs)]
#[rustc_never_returns_null_ptr]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
@@ -1885,6 +1900,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
#[rustc_diagnostic_item = "ptr_eq"]
+#[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))] // it's actually clear here
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1898,14 +1914,15 @@ pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
/// # Examples
///
/// ```
-/// #![feature(ptr_addr_eq)]
+/// use std::ptr;
///
/// let whole: &[i32; 3] = &[1, 2, 3];
/// let first: &i32 = &whole[0];
-/// assert!(std::ptr::addr_eq(whole, first));
-/// assert!(!std::ptr::eq::<dyn std::fmt::Debug>(whole, first));
+///
+/// assert!(ptr::addr_eq(whole, first));
+/// assert!(!ptr::eq::<dyn std::fmt::Debug>(whole, first));
/// ```
-#[unstable(feature = "ptr_addr_eq", issue = "116324")]
+#[stable(feature = "ptr_addr_eq", since = "1.76.0")]
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
@@ -1921,8 +1938,7 @@ pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
/// # Examples
///
/// ```
-/// use std::collections::hash_map::DefaultHasher;
-/// use std::hash::{Hash, Hasher};
+/// use std::hash::{DefaultHasher, Hash, Hasher};
/// use std::ptr;
///
/// let five = 5;
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index bc362fb62..9e7b8ec64 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
@@ -193,10 +193,10 @@ impl<T: ?Sized> *mut T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -226,7 +226,8 @@ impl<T: ?Sized> *mut T {
/// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
/// by tools that help you to stay conformant with the Rust memory model, so it is recommended
/// to use [`addr`][pointer::addr] wherever possible.
@@ -237,13 +238,13 @@ impl<T: ?Sized> *mut T {
/// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -259,7 +260,7 @@ impl<T: ?Sized> *mut T {
/// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
/// `self` to the given address, and therefore has all the same capabilities and restrictions.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// This API and its claimed semantics are an extension to the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
@@ -1634,10 +1635,19 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(
+ self.wrapping_add(ret).cast_const().cast(),
+ align,
+ );
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1920,10 +1930,10 @@ impl<T> *mut [T] {
///
/// ```
/// #![feature(slice_ptr_len)]
+ /// use std::ptr;
///
- /// let mut a = [1, 2, 3];
- /// let ptr = &mut a as *mut [_];
- /// assert!(!ptr.is_empty());
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert!(!slice.is_empty());
/// ```
#[inline(always)]
#[unstable(feature = "slice_ptr_len", issue = "71146")]
@@ -2189,6 +2199,7 @@ impl<T> *mut [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *mut T {
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
@@ -2200,6 +2211,7 @@ impl<T: ?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *mut T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
@@ -2219,21 +2231,25 @@ impl<T: ?Sized> PartialOrd for *mut T {
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*mut T) -> bool {
*self < *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*mut T) -> bool {
*self <= *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*mut T) -> bool {
*self > *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*mut T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index ae673b779..77961506e 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -1,12 +1,14 @@
use crate::cmp::Ordering;
-use crate::convert::From;
use crate::fmt;
use crate::hash;
+use crate::intrinsics;
use crate::intrinsics::assert_unsafe_precondition;
use crate::marker::Unsize;
+use crate::mem::SizedTypeProperties;
use crate::mem::{self, MaybeUninit};
use crate::num::NonZeroUsize;
use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr;
use crate::ptr::Unique;
use crate::slice::{self, SliceIndex};
@@ -471,41 +473,1047 @@ impl<T: ?Sized> NonNull<T> {
unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
}
- /// See [`pointer::add`] for semantics and safety requirements.
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut s = [1, 2, 3];
+ /// let ptr: NonNull<u32> = NonNull::new(s.as_mut_ptr()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.offset(1).read());
+ /// println!("{}", ptr.offset(2).read());
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset(self, count: isize) -> NonNull<T>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset][pointer::offset] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset(self, count: isize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `offset` and `byte_offset` has
+ // the same safety contract.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_offset(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ /// let ptr: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.add(1).read() as char);
+ /// println!("{}", ptr.add(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`add`][NonNull::add] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_add(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `add` and `byte_add` has the same
+ // safety contract.
+ // Additionally safety contract of `add` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_add(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap().add(3);
+ /// println!("{}", end.sub(1).read() as char);
+ /// println!("{}", end.sub(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ // We could always go back to wrapping if unchecked becomes unacceptable
+ #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ if T::IS_ZST {
+ // Pointer arithmetic does nothing when the pointee is a ZST.
+ self
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Because the pointee is *not* a ZST, that means that `count` is
+ // at most `isize::MAX`, and thus the negation cannot overflow.
+ unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
+ }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for
+ /// `.byte_offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`sub`][NonNull::sub] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_sub(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `sub` and `byte_sub` has the same
+ // safety contract.
+ // Additionally safety contract of `sub` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_sub(count) } }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+ ///
+ /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+ /// except that it has a lot more opportunities for UB, in exchange for the compiler
+ /// better understanding what you are doing.
+ ///
+ /// The primary motivation of this method is for computing the `len` of an array/slice
+ /// of `T` that you are currently representing as a "start" and "end" pointer
+ /// (and "end" is "one past the end" of the array).
+ /// In that case, `end.offset_from(start)` gets you the length of the array.
+ ///
+ /// All of the following safety requirements are trivially satisfied for this usecase.
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both `self` and `origin` must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
+ /// address space, so two pointers within some value of any Rust type `T` will always satisfy
+ /// the last two conditions. The standard library also generally ensures that allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
+ /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+ /// always satisfies the last two conditions.
+ ///
+ /// Most platforms fundamentally can't even construct such a large allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
+ /// such large allocations either.)
+ ///
+ /// The requirement for pointers to be derived from the same allocated object is primarily
+ /// needed for `const`-compatibility: the distance between pointers into *different* allocated
+ /// objects is not known at compile-time. However, the requirement also exists at
+ /// runtime and may be exploited by optimizations. If you wish to compute the difference between
+ /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
+ /// origin as isize) / mem::size_of::<T>()`.
+ // FIXME: recommend `addr()` instead of `as usize` once that is stable.
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// #![feature(non_null_convenience, strict_provenance)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap();
+ /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap();
+ /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap();
+ /// assert_eq!(ptr2.addr(), ptr2_other.addr());
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset_from(self, origin: NonNull<T>) -> isize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { self.pointer.offset_from(origin.pointer) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`offset_from`][NonNull::offset_from] on it. See that method for
+ /// documentation and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointers,
+ /// ignoring the metadata.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: NonNull<U>) -> isize {
+ // SAFETY: the caller must uphold the safety contract for `byte_offset_from`.
+ unsafe { self.pointer.byte_offset_from(origin.pointer) }
+ }
+
+ // N.B. `wrapping_offset``, `wrapping_add`, etc are not implemented because they can wrap to null
+
+ /// Calculates the distance between two pointers, *where it's known that
+ /// `self` is equal to or greater than `origin`*. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This computes the same value that [`offset_from`](#method.offset_from)
+ /// would compute, but with the added precondition that the offset is
+ /// guaranteed to be non-negative. This method is equivalent to
+ /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
+ /// but it provides slightly more information to the optimizer, which can
+ /// sometimes allow it to optimize slightly better with some backends.
+ ///
+ /// This method can be though of as recovering the `count` that was passed
+ /// to [`add`](#method.add) (or, with the parameters in the other order,
+ /// to [`sub`](#method.sub)). The following are all equivalent, assuming
+ /// that their safety preconditions are met:
+ /// ```rust
+ /// # #![feature(non_null_convenience)]
+ /// # unsafe fn blah(ptr: std::ptr::NonNull<u32>, origin: std::ptr::NonNull<u32>, count: usize) -> bool {
+ /// ptr.sub_ptr(origin) == count
+ /// # &&
+ /// origin.add(count) == ptr
+ /// # &&
+ /// ptr.sub(count) == origin
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - The distance between the pointers must be non-negative (`self >= origin`)
+ ///
+ /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
+ /// apply to this method as well; see it for the full details.
+ ///
+ /// Importantly, despite the return type of this method being able to represent
+ /// a larger offset, it's still *not permitted* to pass pointers which differ
+ /// by more than `isize::MAX` *bytes*. As such, the result of this method will
+ /// always be less than or equal to `isize::MAX as usize`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
+ /// assert_eq!(ptr1.add(2), ptr2);
+ /// assert_eq!(ptr2.sub(2), ptr1);
+ /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
+ /// }
+ ///
+ /// // This would be incorrect, as the pointers are not correctly ordered:
+ /// // ptr1.sub_ptr(ptr2)
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ // #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
+ // #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub_ptr(self, subtracted: NonNull<T>) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `sub_ptr`.
+ unsafe { self.pointer.sub_ptr(subtracted.pointer) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: crate::ptr::read()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read`.
+ unsafe { ptr::read(self.pointer) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn add(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn read_volatile(self) -> T
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as that would require wrapping the
- // address space, which no legal objects are allowed to do.
- // And the caller promised the `delta` is sound to add.
- unsafe { NonNull { pointer: self.pointer.add(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { ptr::read_volatile(self.pointer) }
}
- /// See [`pointer::sub`] for semantics and safety requirements.
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn sub(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { ptr::read_unaligned(self.pointer) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Executes the destructor (if any) of the pointed-to value.
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns and examples.
+ ///
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn drop_in_place(self) {
+ // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+ unsafe { ptr::drop_in_place(self.as_ptr()) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// See [`ptr::write`] for safety concerns and examples.
+ ///
+ /// [`ptr::write`]: crate::ptr::write()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write`.
+ unsafe { ptr::write(self.as_ptr(), val) }
+ }
+
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `self` to `val`.
+ ///
+ /// See [`ptr::write_bytes`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_bytes`]: crate::ptr::write_bytes()
+ #[doc(alias = "memset")]
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_bytes(self, val: u8, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+ unsafe { ptr::write_bytes(self.as_ptr(), val, count) }
+ }
+
+ /// Performs a volatile write of a memory location with the given value without
+ /// reading or dropping the old value.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::write_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_volatile`]: crate::ptr::write_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn write_volatile(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+ unsafe { ptr::write_volatile(self.as_ptr(), val) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// Unlike `write`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::write_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_unaligned(self, val: T)
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as no legal objects can be allocated
- // in such as way that the null address is part of them.
- // And the caller promised the `delta` is sound to subtract.
- unsafe { NonNull { pointer: self.pointer.sub(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+ unsafe { ptr::write_unaligned(self.as_ptr(), val) }
}
- /// See [`pointer::sub_ptr`] for semantics and safety requirements.
+ /// Replaces the value at `self` with `src`, returning the old
+ /// value, without dropping either.
+ ///
+ /// See [`ptr::replace`] for safety concerns and examples.
+ ///
+ /// [`ptr::replace`]: crate::ptr::replace()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn replace(self, src: T) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `replace`.
+ unsafe { ptr::replace(self.as_ptr(), src) }
+ }
+
+ /// Swaps the values at two mutable locations of the same type, without
+ /// deinitializing either. They may overlap, unlike `mem::swap` which is
+ /// otherwise equivalent.
+ ///
+ /// See [`ptr::swap`] for safety concerns and examples.
+ ///
+ /// [`ptr::swap`]: crate::ptr::swap()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ #[inline(always)]
+ pub const unsafe fn swap(self, with: NonNull<T>)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `swap`.
+ unsafe { ptr::swap(self.as_ptr(), with.as_ptr()) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::mem::align_of;
+ /// use std::ptr::NonNull;
+ ///
+ /// # unsafe {
+ /// let x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = NonNull::new(x.as_ptr() as *mut u8).unwrap();
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// assert!(u16_ptr.read() == u16::from_ne_bytes([5, 6]) || u16_ptr.read() == u16::from_ne_bytes([6, 7]));
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
+ #[must_use]
#[inline]
- pub(crate) const unsafe fn sub_ptr(self, subtrahend: Self) -> usize
+ pub const fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
- // SAFETY: The caller promised that this is safe to do, and
- // the non-nullness is irrelevant to the operation.
- unsafe { self.pointer.sub_ptr(subtrahend.pointer) }
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above.
+ unsafe { ptr::align_offset(self.pointer, align) }
+ }
+ }
+
+ /// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = NonNull::<AlignedI32>::from(&data);
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!NonNull::new(ptr.as_ptr().wrapping_byte_add(1)).unwrap().is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(non_null_convenience)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = [AlignedI32(42), AlignedI32(42)];
+ /// let ptr = NonNull::<AlignedI32>::new(&data[0] as *const _ as *mut _).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = unsafe { ptr.add(1).cast::<AlignedI64>() };
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = NonNull::new(40 as *mut AlignedI32).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = NonNull::new(ptr.as_ptr().wrapping_add(1)).unwrap().cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned(self) -> bool
+ where
+ T: Sized,
+ {
+ self.pointer.is_aligned()
+ }
+
+ /// Returns whether the pointer is aligned to `align`.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointer,
+ /// ignoring the metadata.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
+ self.pointer.is_aligned_to(align)
}
}
@@ -783,6 +1791,7 @@ impl<T: ?Sized> Eq for NonNull<T> {}
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> PartialEq for NonNull<T> {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index bf8b86677..067f1541e 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -1,4 +1,3 @@
-use crate::convert::From;
use crate::fmt;
use crate::marker::{PhantomData, Unsize};
use crate::ops::{CoerceUnsized, DispatchFromDyn};
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 50127b27f..eff1b9b59 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -835,8 +835,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// let x: u8 = "4"
/// .parse::<u8>()
/// .inspect(|x| println!("original: {x}"))
@@ -844,7 +842,7 @@ impl<T, E> Result<T, E> {
/// .expect("failed to parse number");
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
pub fn inspect<F: FnOnce(&T)>(self, f: F) -> Self {
if let Ok(ref t) = self {
f(t);
@@ -858,8 +856,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(result_option_inspect)]
- ///
/// use std::{fs, io};
///
/// fn read() -> io::Result<String> {
@@ -868,7 +864,7 @@ impl<T, E> Result<T, E> {
/// }
/// ```
#[inline]
- #[unstable(feature = "result_option_inspect", issue = "91345")]
+ #[stable(feature = "result_option_inspect", since = "1.76.0")]
pub fn inspect_err<F: FnOnce(&E)>(self, f: F) -> Self {
if let Err(ref e) = self {
f(e);
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
index 4cfccd2e3..ce04a9f40 100644
--- a/library/core/src/slice/ascii.rs
+++ b/library/core/src/slice/ascii.rs
@@ -125,6 +125,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii_start(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii_start(&self) -> &[u8] {
let mut bytes = self;
// Note: A pattern matching based approach (instead of indexing) allows
@@ -154,6 +155,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii_end(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii_end(&self) -> &[u8] {
let mut bytes = self;
// Note: A pattern matching based approach (instead of indexing) allows
@@ -184,6 +186,7 @@ impl [u8] {
/// assert_eq!(b"".trim_ascii(), b"");
/// ```
#[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[inline]
pub const fn trim_ascii(&self) -> &[u8] {
self.trim_ascii_start().trim_ascii_end()
}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 1da3a87e1..373b4aee4 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -1,9 +1,9 @@
//! Indexing implementations for `[T]`.
-use crate::intrinsics::assert_unsafe_precondition;
use crate::intrinsics::const_eval_select;
use crate::intrinsics::unchecked_sub;
use crate::ops;
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
@@ -225,31 +225,28 @@ unsafe impl<T> SliceIndex<[T]> for usize {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
- let this = self;
+ debug_assert_nounwind!(
+ self < slice.len(),
+ "slice::get_unchecked requires that the index is within the slice",
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the index is within the slice",
- [T](this: usize, slice: *const [T]) => this < slice.len()
- );
+ crate::intrinsics::assume(self < slice.len());
slice.as_ptr().add(self)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
- let this = self;
+ debug_assert_nounwind!(
+ self < slice.len(),
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the index is within the slice",
- [T](this: usize, slice: *mut [T]) => this < slice.len()
- );
- slice.as_mut_ptr().add(self)
- }
+ unsafe { slice.as_mut_ptr().add(self) }
}
#[inline]
@@ -293,32 +290,25 @@ unsafe impl<T> SliceIndex<[T]> for ops::IndexRange {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
- let end = self.end();
+ debug_assert_nounwind!(
+ self.end() <= slice.len(),
+ "slice::get_unchecked requires that the index is within the slice"
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
-
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the index is within the slice",
- [T](end: usize, slice: *const [T]) => end <= slice.len()
- );
- ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len())
- }
+ unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len()) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
- let end = self.end();
+ debug_assert_nounwind!(
+ self.end() <= slice.len(),
+ "slice::get_unchecked_mut requires that the index is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
- unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the index is within the slice",
- [T](end: usize, slice: *mut [T]) => end <= slice.len()
- );
- ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len())
- }
+ unsafe { ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len()) }
}
#[inline]
@@ -369,17 +359,15 @@ unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
- let this = ops::Range { ..self };
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "slice::get_unchecked requires that the range is within the slice",
+ );
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked requires that the range is within the slice",
- [T](this: ops::Range<usize>, slice: *const [T]) =>
- this.end >= this.start && this.end <= slice.len()
- );
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), new_len)
}
@@ -387,14 +375,12 @@ unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
- let this = ops::Range { ..self };
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "slice::get_unchecked_mut requires that the range is within the slice",
+ );
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(
- "slice::get_unchecked_mut requires that the range is within the slice",
- [T](this: ops::Range<usize>, slice: *mut [T]) =>
- this.end >= this.start && this.end <= slice.len()
- );
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), new_len)
}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 5e229bf52..fc54ea237 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -10,7 +10,7 @@ use crate::intrinsics::assume;
use crate::iter::{
FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator,
};
-use crate::marker::{PhantomData, Send, Sized, Sync};
+use crate::marker::PhantomData;
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
use crate::ptr::{self, invalid, invalid_mut, NonNull};
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index 3a8b59d72..da7ceb2dd 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -20,20 +20,6 @@ const fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
-#[inline]
-#[cfg(target_pointer_width = "16")]
-#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")]
-const fn repeat_byte(b: u8) -> usize {
- (b as usize) << 8 | b as usize
-}
-
-#[inline]
-#[cfg(not(target_pointer_width = "16"))]
-#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")]
-const fn repeat_byte(b: u8) -> usize {
- (b as usize) * (usize::MAX / 255)
-}
-
/// Returns the first index matching the byte `x` in `text`.
#[inline]
#[must_use]
@@ -93,7 +79,7 @@ const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
}
// search the body of the text
- let repeated_x = repeat_byte(x);
+ let repeated_x = usize::repeat_u8(x);
while offset <= len - 2 * USIZE_BYTES {
// SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
// between the offset and the end of the slice.
@@ -149,7 +135,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
// Search the body of the text, make sure we don't cross min_aligned_offset.
// offset is always aligned, so just testing `>` is sufficient and avoids possible
// overflow.
- let repeated_x = repeat_byte(x);
+ let repeated_x = usize::repeat_u8(x);
let chunk_bytes = mem::size_of::<Chunk>();
while offset > min_aligned_offset {
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 6cf5d48a1..b14d97127 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -6,18 +6,14 @@
#![stable(feature = "rust1", since = "1.0.0")]
-use crate::cmp::Ordering::{self, Greater, Less};
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::fmt;
-use crate::intrinsics::{assert_unsafe_precondition, exact_div};
-use crate::marker::Copy;
+use crate::intrinsics::exact_div;
use crate::mem::{self, SizedTypeProperties};
use crate::num::NonZeroUsize;
-use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds};
-use crate::option::Option;
-use crate::option::Option::{None, Some};
+use crate::ops::{Bound, OneSidedRange, Range, RangeBounds};
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
-use crate::result::Result;
-use crate::result::Result::{Err, Ok};
use crate::simd::{self, Simd};
use crate::slice;
@@ -929,14 +925,14 @@ impl<T> [T] {
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
- let this = self;
- let ptr = this.as_mut_ptr();
+ debug_assert_nounwind!(
+ a < self.len() && b < self.len(),
+ "slice::swap_unchecked requires that the indices are within the slice",
+ );
+
+ let ptr = self.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
- assert_unsafe_precondition!(
- "slice::swap_unchecked requires that the indices are within the slice",
- [T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len()
- );
ptr::swap(ptr.add(a), ptr.add(b));
}
}
@@ -1044,11 +1040,11 @@ impl<T> [T] {
/// # Examples
///
/// ```
- /// let slice = ['r', 'u', 's', 't'];
- /// let mut iter = slice.windows(2);
- /// assert_eq!(iter.next().unwrap(), &['r', 'u']);
- /// assert_eq!(iter.next().unwrap(), &['u', 's']);
- /// assert_eq!(iter.next().unwrap(), &['s', 't']);
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.windows(3);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o', 'r']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r', 'e']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e', 'm']);
/// assert!(iter.next().is_none());
/// ```
///
@@ -1269,15 +1265,12 @@ impl<T> [T] {
#[inline]
#[must_use]
pub const unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
- let this = self;
+ debug_assert_nounwind!(
+ N != 0 && self.len() % N == 0,
+ "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ );
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
- let new_len = unsafe {
- assert_unsafe_precondition!(
- "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
- [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
- );
- exact_div(self.len(), N)
- };
+ let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
@@ -1426,15 +1419,12 @@ impl<T> [T] {
#[inline]
#[must_use]
pub const unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
- let this = &*self;
+ debug_assert_nounwind!(
+ N != 0 && self.len() % N == 0,
+ "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
+ );
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
- let new_len = unsafe {
- assert_unsafe_precondition!(
- "slice::as_chunks_unchecked_mut requires `N != 0` and the slice to split exactly into `N`-element chunks",
- [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0
- );
- exact_div(this.len(), N)
- };
+ let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
@@ -1967,14 +1957,13 @@ impl<T> [T] {
let len = self.len();
let ptr = self.as_ptr();
+ debug_assert_nounwind!(
+ mid <= len,
+ "slice::split_at_unchecked requires the index to be within the slice",
+ );
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
- unsafe {
- assert_unsafe_precondition!(
- "slice::split_at_unchecked requires the index to be within the slice",
- (mid: usize, len: usize) => mid <= len
- );
- (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid))
- }
+ unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
@@ -2018,17 +2007,16 @@ impl<T> [T] {
let len = self.len();
let ptr = self.as_mut_ptr();
+ debug_assert_nounwind!(
+ mid <= len,
+ "slice::split_at_mut_unchecked requires the index to be within the slice",
+ );
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
- unsafe {
- assert_unsafe_precondition!(
- "slice::split_at_mut_unchecked requires the index to be within the slice",
- (mid: usize, len: usize) => mid <= len
- );
- (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
- }
+ unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Divides one slice into an array and a remainder slice at an index.
@@ -2854,14 +2842,13 @@ impl<T> [T] {
// we have `left + size/2 < self.len()`, and this is in-bounds.
let cmp = f(unsafe { self.get_unchecked(mid) });
- // The reason why we use if/else control flow rather than match
- // is because match reorders comparison operations, which is perf sensitive.
- // This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
- if cmp == Less {
- left = mid + 1;
- } else if cmp == Greater {
- right = mid;
- } else {
+ // This control flow produces conditional moves, which results in
+ // fewer branches and instructions than if/else or matching on
+ // cmp::Ordering.
+ // This is x86 asm for u8: https://rust.godbolt.org/z/698eYffTx.
+ left = if cmp == Less { mid + 1 } else { left };
+ right = if cmp == Greater { mid } else { right };
+ if cmp == Equal {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
@@ -3876,6 +3863,12 @@ impl<T> [T] {
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ // Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
+ #[cfg(miri)]
+ crate::intrinsics::miri_promise_symbolic_alignment(
+ rest.as_ptr().cast(),
+ mem::align_of::<U>(),
+ );
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
@@ -3946,6 +3939,12 @@ impl<T> [T] {
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
+ // Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
+ #[cfg(miri)]
+ crate::intrinsics::miri_promise_symbolic_alignment(
+ mut_ptr.cast() as *const (),
+ mem::align_of::<U>(),
+ );
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
@@ -3987,7 +3986,7 @@ impl<T> [T] {
///
/// ```
/// #![feature(portable_simd)]
- /// use core::simd::SimdFloat;
+ /// use core::simd::prelude::*;
///
/// let short = &[1, 2, 3];
/// let (prefix, middle, suffix) = short.as_simd::<4>();
@@ -3999,7 +3998,6 @@ impl<T> [T] {
///
/// fn basic_simd_sum(x: &[f32]) -> f32 {
/// use std::ops::Add;
- /// use std::simd::f32x4;
/// let (prefix, middle, suffix) = x.as_simd();
/// let sums = f32x4::from_array([
/// prefix.iter().copied().sum(),
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index c30f01b3c..dd2efb005 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -8,6 +8,7 @@ use crate::iter::{TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use crate::ops::Try;
use crate::option;
use crate::slice::{self, Split as SliceSplit};
+use core::num::NonZeroUsize;
use super::from_utf8_unchecked;
use super::pattern::Pattern;
@@ -50,6 +51,55 @@ impl<'a> Iterator for Chars<'a> {
}
#[inline]
+ fn advance_by(&mut self, mut remainder: usize) -> Result<(), NonZeroUsize> {
+ const CHUNK_SIZE: usize = 32;
+
+ if remainder >= CHUNK_SIZE {
+ let mut chunks = self.iter.as_slice().array_chunks::<CHUNK_SIZE>();
+ let mut bytes_skipped: usize = 0;
+
+ while remainder > CHUNK_SIZE
+ && let Some(chunk) = chunks.next()
+ {
+ bytes_skipped += CHUNK_SIZE;
+
+ let mut start_bytes = [false; CHUNK_SIZE];
+
+ for i in 0..CHUNK_SIZE {
+ start_bytes[i] = !super::validations::utf8_is_cont_byte(chunk[i]);
+ }
+
+ remainder -= start_bytes.into_iter().map(|i| i as u8).sum::<u8>() as usize;
+ }
+
+ // SAFETY: The amount of bytes exists since we just iterated over them,
+ // so advance_by will succeed.
+ unsafe { self.iter.advance_by(bytes_skipped).unwrap_unchecked() };
+
+ // skip trailing continuation bytes
+ while self.iter.len() > 0 {
+ let b = self.iter.as_slice()[0];
+ if !super::validations::utf8_is_cont_byte(b) {
+ break;
+ }
+ // SAFETY: We just peeked at the byte, therefore it exists
+ unsafe { self.iter.advance_by(1).unwrap_unchecked() };
+ }
+ }
+
+ while (remainder > 0) && (self.iter.len() > 0) {
+ remainder -= 1;
+ let b = self.iter.as_slice()[0];
+ let slurp = super::validations::utf8_char_width(b);
+ // SAFETY: utf8 validity requires that the string must contain
+ // the continuation bytes (if any)
+ unsafe { self.iter.advance_by(slurp).unwrap_unchecked() };
+ }
+
+ NonZeroUsize::new(remainder).map_or(Ok(()), Err)
+ }
+
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.iter.len();
// `(len + 3)` can't overflow, because we know that the `slice::Iter`
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 27178328b..a22c46edc 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -2423,6 +2423,85 @@ impl str {
me.make_ascii_lowercase()
}
+ /// Returns a string slice with leading ASCII whitespace removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!(" \t \u{3000}hello world\n".trim_ascii_start(), "\u{3000}hello world\n");
+ /// assert_eq!(" ".trim_ascii_start(), "");
+ /// assert_eq!("".trim_ascii_start(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii_start(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii_start()) }
+ }
+
+ /// Returns a string slice with trailing ASCII whitespace removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!("\r hello world\u{3000}\n ".trim_ascii_end(), "\r hello world\u{3000}");
+ /// assert_eq!(" ".trim_ascii_end(), "");
+ /// assert_eq!("".trim_ascii_end(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii_end(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii_end()) }
+ }
+
+ /// Returns a string slice with leading and trailing ASCII whitespace
+ /// removed.
+ ///
+ /// 'Whitespace' refers to the definition used by
+ /// [`u8::is_ascii_whitespace`].
+ ///
+ /// [`u8::is_ascii_whitespace`]: u8::is_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(byte_slice_trim_ascii)]
+ ///
+ /// assert_eq!("\r hello world\n ".trim_ascii(), "hello world");
+ /// assert_eq!(" ".trim_ascii(), "");
+ /// assert_eq!("".trim_ascii(), "");
+ /// ```
+ #[unstable(feature = "byte_slice_trim_ascii", issue = "94035")]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[inline]
+ pub const fn trim_ascii(&self) -> &str {
+ // SAFETY: Removing ASCII characters from a `&str` does not invalidate
+ // UTF-8.
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes().trim_ascii()) }
+ }
+
/// Return an iterator that escapes each char in `self` with [`char::escape_debug`].
///
/// Note: only extended grapheme codepoints that begin the string will be
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index 701e61e66..caa54e00f 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -1740,9 +1740,9 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
debug_assert!(needle.len() > 1);
use crate::ops::BitAnd;
+ use crate::simd::cmp::SimdPartialEq;
use crate::simd::mask8x16 as Mask;
use crate::simd::u8x16 as Block;
- use crate::simd::{SimdPartialEq, ToBitMask};
let first_probe = needle[0];
let last_byte_offset = needle.len() - 1;
@@ -1765,7 +1765,7 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
};
// do a naive search if the haystack is too small to fit
- if haystack.len() < Block::LANES + last_byte_offset {
+ if haystack.len() < Block::LEN + last_byte_offset {
return Some(haystack.windows(needle.len()).any(|c| c == needle));
}
@@ -1812,7 +1812,7 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
let eq_first: Mask = a.simd_eq(first_probe);
let eq_last: Mask = b.simd_eq(second_probe);
let both = eq_first.bitand(eq_last);
- let mask = both.to_bitmask();
+ let mask = both.to_bitmask() as u16;
return mask;
};
@@ -1822,32 +1822,32 @@ fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
// The loop condition must ensure that there's enough headroom to read LANE bytes,
// and not only at the current index but also at the index shifted by block_offset
const UNROLL: usize = 4;
- while i + last_byte_offset + UNROLL * Block::LANES < haystack.len() && !result {
+ while i + last_byte_offset + UNROLL * Block::LEN < haystack.len() && !result {
let mut masks = [0u16; UNROLL];
for j in 0..UNROLL {
- masks[j] = test_chunk(i + j * Block::LANES);
+ masks[j] = test_chunk(i + j * Block::LEN);
}
for j in 0..UNROLL {
let mask = masks[j];
if mask != 0 {
- result |= check_mask(i + j * Block::LANES, mask, result);
+ result |= check_mask(i + j * Block::LEN, mask, result);
}
}
- i += UNROLL * Block::LANES;
+ i += UNROLL * Block::LEN;
}
- while i + last_byte_offset + Block::LANES < haystack.len() && !result {
+ while i + last_byte_offset + Block::LEN < haystack.len() && !result {
let mask = test_chunk(i);
if mask != 0 {
result |= check_mask(i, mask, result);
}
- i += Block::LANES;
+ i += Block::LEN;
}
// Process the tail that didn't fit into LANES-sized steps.
// This simply repeats the same procedure but as right-aligned chunk instead
// of a left-aligned one. The last byte must be exactly flush with the string end so
// we don't miss a single byte or read out of bounds.
- let i = haystack.len() - last_byte_offset - Block::LANES;
+ let i = haystack.len() - last_byte_offset - Block::LEN;
let mask = test_chunk(i);
if mask != 0 {
result |= check_mask(i, mask, result);
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 16fb1dad7..777ad0d81 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -1,8 +1,8 @@
//! Trait implementations for `str`.
use crate::cmp::Ordering;
-use crate::intrinsics::assert_unsafe_precondition;
use crate::ops;
+use crate::panic::debug_assert_nounwind;
use crate::ptr;
use crate::slice::SliceIndex;
@@ -191,39 +191,35 @@ unsafe impl SliceIndex<str> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let slice = slice as *const [u8];
+
+ debug_assert_nounwind!(
+ // We'd like to check that the bounds are on char boundaries,
+ // but there's not really a way to do so without reading
+ // behind the pointer, which has aliasing implications.
+ // It's also not possible to move this check up to
+ // `str::get_unchecked` without adding a special function
+ // to `SliceIndex` just for this.
+ self.end >= self.start && self.end <= slice.len(),
+ "str::get_unchecked requires that the range is within the string slice",
+ );
+
// SAFETY: the caller guarantees that `self` is in bounds of `slice`
// which satisfies all the conditions for `add`.
- let ptr = unsafe {
- let this = ops::Range { ..self };
- assert_unsafe_precondition!(
- "str::get_unchecked requires that the range is within the string slice",
- (this: ops::Range<usize>, slice: *const [u8]) =>
- // We'd like to check that the bounds are on char boundaries,
- // but there's not really a way to do so without reading
- // behind the pointer, which has aliasing implications.
- // It's also not possible to move this check up to
- // `str::get_unchecked` without adding a special function
- // to `SliceIndex` just for this.
- this.end >= this.start && this.end <= slice.len()
- );
- slice.as_ptr().add(self.start)
- };
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
let len = self.end - self.start;
ptr::slice_from_raw_parts(ptr, len) as *const str
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let slice = slice as *mut [u8];
+
+ debug_assert_nounwind!(
+ self.end >= self.start && self.end <= slice.len(),
+ "str::get_unchecked_mut requires that the range is within the string slice",
+ );
+
// SAFETY: see comments for `get_unchecked`.
- let ptr = unsafe {
- let this = ops::Range { ..self };
- assert_unsafe_precondition!(
- "str::get_unchecked_mut requires that the range is within the string slice",
- (this: ops::Range<usize>, slice: *mut [u8]) =>
- this.end >= this.start && this.end <= slice.len()
- );
- slice.as_mut_ptr().add(self.start)
- };
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
let len = self.end - self.start;
ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
}
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index 0a0f702f6..bfa1cf096 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -2,7 +2,6 @@
use crate::convert;
use crate::ops::{self, ControlFlow};
-use crate::result::Result;
/// Indicates whether a value is available or if the current task has been
/// scheduled to receive a wakeup instead.
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 817e39942..9c41b8b4f 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -1,7 +1,7 @@
#![stable(feature = "futures_api", since = "1.36.0")]
use crate::fmt;
-use crate::marker::{PhantomData, Unpin};
+use crate::marker::PhantomData;
use crate::ptr;
/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`]
@@ -48,7 +48,7 @@ impl RawWaker {
/// Get the `data` pointer used to create this `RawWaker`.
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn data(&self) -> *const () {
self.data
}
@@ -56,7 +56,7 @@ impl RawWaker {
/// Get the `vtable` pointer used to create this `RawWaker`.
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn vtable(&self) -> &'static RawWakerVTable {
self.vtable
}
@@ -371,7 +371,7 @@ impl Waker {
/// Get a reference to the underlying [`RawWaker`].
#[inline]
#[must_use]
- #[unstable(feature = "waker_getters", issue = "87021")]
+ #[unstable(feature = "waker_getters", issue = "96992")]
pub fn as_raw(&self) -> &RawWaker {
&self.waker
}
diff --git a/library/core/src/time.rs b/library/core/src/time.rs
index 6ef35d841..b67777644 100644
--- a/library/core/src/time.rs
+++ b/library/core/src/time.rs
@@ -461,6 +461,27 @@ impl Duration {
self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.0 as u128
}
+ /// Computes the absolute difference between `self` and `other`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(duration_abs_diff)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(100, 0).abs_diff(Duration::new(80, 0)), Duration::new(20, 0));
+ /// assert_eq!(Duration::new(100, 400_000_000).abs_diff(Duration::new(110, 0)), Duration::new(9, 600_000_000));
+ /// ```
+ #[unstable(feature = "duration_abs_diff", issue = "117618")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn abs_diff(self, other: Duration) -> Duration {
+ if let Some(res) = self.checked_sub(other) { res } else { other.checked_sub(self).unwrap() }
+ }
+
/// Checked `Duration` addition. Computes `self + other`, returning [`None`]
/// if overflow occurred.
///
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index ff292ff2d..3689312e6 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -8,6 +8,7 @@ use crate::marker::{StructuralEq, StructuralPartialEq};
//
// Also provides implementations for tuples with lesser arity. For example, tuple_impls!(A B C)
// will implement everything for (A, B, C), (A, B) and (A,).
+#[cfg(bootstrap)]
macro_rules! tuple_impls {
// Stopping criteria (1-ary tuple)
($T:ident) => {
@@ -50,22 +51,19 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
- {}
+ impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+) {}
}
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T),+> StructuralPartialEq for ($($T,)+)
- {}
+ impl<$($T),+> StructuralPartialEq for ($($T,)+) {}
}
maybe_tuple_doc! {
$($T)+ @
#[unstable(feature = "structural_match", issue = "31434")]
- impl<$($T),+> StructuralEq for ($($T,)+)
- {}
+ impl<$($T),+> StructuralEq for ($($T,)+) {}
}
maybe_tuple_doc! {
@@ -118,7 +116,7 @@ macro_rules! tuple_impls {
impl<$($T: Default),+> Default for ($($T,)+) {
#[inline]
fn default() -> ($($T,)+) {
- ($({ let x: $T = Default::default(); x},)+)
+ ($($T::default(),)+)
}
}
}
@@ -145,6 +143,148 @@ macro_rules! tuple_impls {
}
}
+// Recursive macro for implementing n-ary tuple functions and operations
+//
+// Also provides implementations for tuples with lesser arity. For example, tuple_impls!(A B C)
+// will implement everything for (A, B, C), (A, B) and (A,).
+#[cfg(not(bootstrap))]
+macro_rules! tuple_impls {
+ // Stopping criteria (1-ary tuple)
+ ($T:ident) => {
+ tuple_impls!(@impl $T);
+ };
+ // Running criteria (n-ary tuple, with n >= 2)
+ ($T:ident $( $U:ident )+) => {
+ tuple_impls!($( $U )+);
+ tuple_impls!(@impl $T $( $U )+);
+ };
+ // "Private" internal implementation
+ (@impl $( $T:ident )+) => {
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: PartialEq),+> PartialEq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn eq(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore($T)} self.${index()} == other.${index()} )&&+
+ }
+ #[inline]
+ fn ne(&self, other: &($($T,)+)) -> bool {
+ $( ${ignore($T)} self.${index()} != other.${index()} )||+
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Eq),+> Eq for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralPartialEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: PartialOrd),+> PartialOrd for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
+ lexical_partial_cmp!($( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn lt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(lt, Less, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn le(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(le, Less, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn ge(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(ge, Greater, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ #[inline]
+ fn gt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(gt, Greater, $( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Ord),+> Ord for ($($T,)+)
+ where
+ last_type!($($T,)+): ?Sized
+ {
+ #[inline]
+ fn cmp(&self, other: &($($T,)+)) -> Ordering {
+ lexical_cmp!($( ${ignore($T)} self.${index()}, other.${index()} ),+)
+ }
+ }
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T: Default),+> Default for ($($T,)+) {
+ #[inline]
+ fn default() -> ($($T,)+) {
+ ($({ let x: $T = Default::default(); x},)+)
+ }
+ }
+ }
+
+ #[stable(feature = "array_tuple_conv", since = "1.71.0")]
+ impl<T> From<[T; ${count($T)}]> for ($(${ignore($T)} T,)+) {
+ #[inline]
+ #[allow(non_snake_case)]
+ fn from(array: [T; ${count($T)}]) -> Self {
+ let [$($T,)+] = array;
+ ($($T,)+)
+ }
+ }
+
+ #[stable(feature = "array_tuple_conv", since = "1.71.0")]
+ impl<T> From<($(${ignore($T)} T,)+)> for [T; ${count($T)}] {
+ #[inline]
+ #[allow(non_snake_case)]
+ fn from(tuple: ($(${ignore($T)} T,)+)) -> Self {
+ let ($($T,)+) = tuple;
+ [$($T,)+]
+ }
+ }
+ }
+}
+
// If this is a unary tuple, it adds a doc comment.
// Otherwise, it hides the docs entirely.
macro_rules! maybe_tuple_doc {
@@ -196,7 +336,7 @@ macro_rules! lexical_partial_cmp {
($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
match ($a).partial_cmp(&$b) {
Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
- ordering => ordering
+ ordering => ordering
}
};
($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
@@ -206,7 +346,7 @@ macro_rules! lexical_cmp {
($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
match ($a).cmp(&$b) {
Equal => lexical_cmp!($($rest_a, $rest_b),+),
- ordering => ordering
+ ordering => ordering
}
};
($a:expr, $b:expr) => { ($a).cmp(&$b) };