summaryrefslogtreecommitdiffstats
path: root/library/core/src/ptr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
commit9918693037dce8aa4bb6f08741b6812923486c18 (patch)
tree21d2b40bec7e6a7ea664acee056eb3d08e15a1cf /library/core/src/ptr
parentReleasing progress-linux version 1.75.0+dfsg1-5~progress7.99u1. (diff)
downloadrustc-9918693037dce8aa4bb6f08741b6812923486c18.tar.xz
rustc-9918693037dce8aa4bb6f08741b6812923486c18.zip
Merging upstream version 1.76.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src/ptr')
-rw-r--r--library/core/src/ptr/alignment.rs57
-rw-r--r--library/core/src/ptr/const_ptr.rs59
-rw-r--r--library/core/src/ptr/mod.rs110
-rw-r--r--library/core/src/ptr/mut_ptr.rs50
-rw-r--r--library/core/src/ptr/non_null.rs1049
-rw-r--r--library/core/src/ptr/unique.rs1
6 files changed, 1218 insertions, 108 deletions
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index bbf7199ff..ce176e6fc 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -1,5 +1,4 @@
use crate::convert::{TryFrom, TryInto};
-use crate::intrinsics::assert_unsafe_precondition;
use crate::num::NonZeroUsize;
use crate::{cmp, fmt, hash, mem, num};
@@ -42,6 +41,7 @@ impl Alignment {
/// This provides the same numerical value as [`mem::align_of`],
/// but in an `Alignment` instead of a `usize`.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn of<T>() -> Self {
// SAFETY: rustc ensures that type alignment is always a power of two.
@@ -53,6 +53,7 @@ impl Alignment {
///
/// Note that `0` is not a power of two, nor a valid alignment.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
@@ -75,13 +76,10 @@ impl Alignment {
#[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const unsafe fn new_unchecked(align: usize) -> Self {
- // SAFETY: Precondition passed to the caller.
- unsafe {
- assert_unsafe_precondition!(
- "Alignment::new_unchecked requires a power of two",
- (align: usize) => align.is_power_of_two()
- )
- };
+ crate::panic::debug_assert_nounwind!(
+ align.is_power_of_two(),
+ "Alignment::new_unchecked requires a power of two"
+ );
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
@@ -98,6 +96,7 @@ impl Alignment {
/// Returns the alignment as a [`NonZeroUsize`]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
pub const fn as_nonzero(self) -> NonZeroUsize {
// SAFETY: All the discriminants are non-zero.
@@ -118,10 +117,42 @@ impl Alignment {
/// assert_eq!(Alignment::new(1024).unwrap().log2(), 10);
/// ```
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
#[inline]
- pub fn log2(self) -> u32 {
+ pub const fn log2(self) -> u32 {
self.as_nonzero().trailing_zeros()
}
+
+ /// Returns a bit mask that can be used to match this alignment.
+ ///
+ /// This is equivalent to `!(self.as_usize() - 1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// #![feature(ptr_mask)]
+ /// use std::ptr::{Alignment, NonNull};
+ ///
+ /// #[repr(align(1))] struct Align1(u8);
+ /// #[repr(align(2))] struct Align2(u16);
+ /// #[repr(align(4))] struct Align4(u32);
+ /// let one = <NonNull<Align1>>::dangling().as_ptr();
+ /// let two = <NonNull<Align2>>::dangling().as_ptr();
+ /// let four = <NonNull<Align4>>::dangling().as_ptr();
+ ///
+ /// assert_eq!(four.mask(Alignment::of::<Align1>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align2>().mask()), four);
+ /// assert_eq!(four.mask(Alignment::of::<Align4>().mask()), four);
+ /// assert_ne!(one.mask(Alignment::of::<Align4>().mask()), one);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn mask(self) -> usize {
+ // SAFETY: The alignment is always nonzero, and therefore decrementing won't overflow.
+ !(unsafe { self.as_usize().unchecked_sub(1) })
+ }
}
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
@@ -193,6 +224,14 @@ impl hash::Hash for Alignment {
}
}
+/// Returns [`Alignment::MIN`], which is valid for any type.
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl Default for Alignment {
+ fn default() -> Alignment {
+ Alignment::MIN
+ }
+}
+
#[cfg(target_pointer_width = "16")]
type AlignmentEnum = AlignmentEnum16;
#[cfg(target_pointer_width = "32")]
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 36685f756..a444c30c7 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,7 +1,7 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
-use crate::mem::{self, SizedTypeProperties};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
+use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *const T {
@@ -186,10 +186,10 @@ impl<T: ?Sized> *const T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -219,7 +219,8 @@ impl<T: ?Sized> *const T {
/// later call [`from_exposed_addr`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`addr`][pointer::addr] wherever possible.
@@ -230,13 +231,13 @@ impl<T: ?Sized> *const T {
/// side-effect which is required for [`from_exposed_addr`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -1367,10 +1368,16 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1644,6 +1651,24 @@ impl<T> *const [T] {
metadata(self)
}
+ /// Returns `true` if the raw slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_len)]
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert!(!slice.is_empty());
+ /// ```
+ #[inline(always)]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn is_empty(self) -> bool {
+ self.len() == 0
+ }
+
/// Returns a raw pointer to the slice's buffer.
///
/// This is equivalent to casting `self` to `*const T`, but more type-safe.
@@ -1747,6 +1772,7 @@ impl<T> *const [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*const T) -> bool {
*self == *other
}
@@ -1759,6 +1785,7 @@ impl<T: ?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *const T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
@@ -1778,21 +1805,25 @@ impl<T: ?Sized> PartialOrd for *const T {
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*const T) -> bool {
*self < *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*const T) -> bool {
*self <= *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*const T) -> bool {
*self > *other
}
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*const T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index d71079dd0..390e07371 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -312,22 +312,30 @@
//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits
//! that and should support it.
//!
-//! ## Pointer-usize-pointer roundtrips and 'exposed' provenance
+//! ## Exposed Provenance
//!
-//! **This section is *non-normative* and is part of the [Strict Provenance] experiment.**
+//! **This section is *non-normative* and is an extension to the [Strict Provenance] experiment.**
//!
//! As discussed above, pointer-usize-pointer roundtrips are not possible under [Strict Provenance].
-//! However, there exists legacy Rust code that is full of such roundtrips, and legacy platform APIs
-//! regularly assume that `usize` can capture all the information that makes up a pointer. There
-//! also might be code that cannot be ported to Strict Provenance (which is something we would [like
-//! to hear about][Strict Provenance]).
-//!
-//! For situations like this, there is a fallback plan, a way to 'opt out' of Strict Provenance.
-//! However, note that this makes your code a lot harder to specify, and the code will not work
-//! (well) with tools like [Miri] and [CHERI].
-//!
-//! This fallback plan is provided by the [`expose_addr`] and [`from_exposed_addr`] methods (which
-//! are equivalent to `as` casts between pointers and integers). [`expose_addr`] is a lot like
+//! This is by design: the goal of Strict Provenance is to provide a clear specification that we are
+//! confident can be formalized unambiguously and can be subject to precise formal reasoning.
+//!
+//! However, there exist situations where pointer-usize-pointer roundtrips cannot be avoided, or
+//! where avoiding them would require major refactoring. Legacy platform APIs also regularly assume
+//! that `usize` can capture all the information that makes up a pointer. The goal of Strict
+//! Provenance is not to rule out such code; the goal is to put all the *other* pointer-manipulating
+//! code onto a more solid foundation. Strict Provenance is about improving the situation where
+//! possible (all the code that can be written with Strict Provenance) without making things worse
+//! for situations where Strict Provenance is insufficient.
+//!
+//! For these situations, there is a highly experimental extension to Strict Provenance called
+//! *Exposed Provenance*. This extension permits pointer-usize-pointer roundtrips. However, its
+//! semantics are on much less solid footing than Strict Provenance, and at this point it is not yet
+//! clear where a satisfying unambiguous semantics can be defined for Exposed Provenance.
+//! Furthermore, Exposed Provenance will not work (well) with tools like [Miri] and [CHERI].
+//!
+//! Exposed Provenance is provided by the [`expose_addr`] and [`from_exposed_addr`] methods, which
+//! are meant to replace `as` casts between pointers and integers. [`expose_addr`] is a lot like
//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed'
//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but
//! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`]
@@ -341,10 +349,11 @@
//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will
//! be used, the program has undefined behavior.
//!
-//! Using [`expose_addr`] or [`from_exposed_addr`] (or the equivalent `as` casts) means that code is
+//! Using [`expose_addr`] or [`from_exposed_addr`] (or the `as` casts) means that code is
//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to
-//! determine whether it is possible to use Rust without [`expose_addr`] and [`from_exposed_addr`].
-//! If this is successful, it would be a major win for avoiding specification complexity and to
+//! determine how far one can get in Rust without the use of [`expose_addr`] and
+//! [`from_exposed_addr`], and to encourage code to be written with Strict Provenance APIs only.
+//! Maximizing the amount of such code is a major win for avoiding specification complexity and to
//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the
//! confidence in (unsafe) Rust code.
//!
@@ -619,12 +628,12 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr],
-/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract
-/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory
-/// is disjoint from memory that will be used by the abstract machine such as the stack, heap,
-/// and statics.
+/// This is a more rigorously specified alternative to `addr as *const T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously exposed by passing it to
+/// [`expose_addr`][pointer::expose_addr], or a `ptr as usize` cast. In addition, memory which is
+/// outside the control of the Rust abstract machine (MMIO registers, for example) is always
+/// considered to be exposed, so long as this memory is disjoint from memory that will be used by
+/// the abstract machine such as the stack, heap, and statics.
///
/// If there is no 'exposed' provenance that justifies the way this pointer will be used,
/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
@@ -639,7 +648,8 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -649,13 +659,13 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -666,18 +676,20 @@ where
/// Convert an address back to a mutable pointer, picking up a previously 'exposed' provenance.
///
-/// This is equivalent to `addr as *mut T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
-/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
-/// used, the program has undefined behavior. Note that there is no algorithm that decides which
-/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
-/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
-/// behavior, then that is the guess that will be taken.
+/// This is a more rigorously specified alternative to `addr as *mut T`. The provenance of the
+/// returned pointer is that of *any* pointer that was previously passed to
+/// [`expose_addr`][pointer::expose_addr] or a `ptr as usize` cast. If there is no previously
+/// 'exposed' provenance that justifies the way this pointer will be used, the program has undefined
+/// behavior. Note that there is no algorithm that decides which provenance will be used. You can
+/// think of this as "guessing" the right provenance, and the guess will be "maximally in your
+/// favor", in the sense that if there is any way to avoid undefined behavior, then that is the
+/// guess that will be taken.
///
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
///
-/// Using this method means that code is *not* following strict provenance rules. "Guessing" a
+/// Using this function means that code is *not* following [Strict
+/// Provenance][../index.html#strict-provenance] rules. "Guessing" a
/// suitable provenance complicates specification and reasoning and may not be supported by
/// tools that help you to stay conformant with the Rust memory model, so it is recommended to
/// use [`with_addr`][pointer::with_addr] wherever possible.
@@ -687,13 +699,13 @@ where
/// since it is generally not possible to actually *compute* which provenance the returned
/// pointer has to pick up.
///
-/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
-/// [module documentation][crate::ptr] for details.
+/// It is unclear whether this function can be given a satisfying unambiguous specification. This
+/// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
#[must_use]
#[inline(always)]
-#[unstable(feature = "strict_provenance", issue = "95228")]
+#[unstable(feature = "exposed_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
+#[allow(fuzzy_provenance_casts)] // this *is* the explicit provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -708,7 +720,8 @@ where
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
#[rustc_never_returns_null_ptr]
#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
@@ -721,7 +734,9 @@ pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_const_stable(feature = "ptr_from_ref", since = "1.76.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs)]
#[rustc_never_returns_null_ptr]
pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
@@ -1885,6 +1900,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
#[rustc_diagnostic_item = "ptr_eq"]
+#[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))] // it's actually clear here
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1898,14 +1914,15 @@ pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
/// # Examples
///
/// ```
-/// #![feature(ptr_addr_eq)]
+/// use std::ptr;
///
/// let whole: &[i32; 3] = &[1, 2, 3];
/// let first: &i32 = &whole[0];
-/// assert!(std::ptr::addr_eq(whole, first));
-/// assert!(!std::ptr::eq::<dyn std::fmt::Debug>(whole, first));
+///
+/// assert!(ptr::addr_eq(whole, first));
+/// assert!(!ptr::eq::<dyn std::fmt::Debug>(whole, first));
/// ```
-#[unstable(feature = "ptr_addr_eq", issue = "116324")]
+#[stable(feature = "ptr_addr_eq", since = "1.76.0")]
#[inline(always)]
#[must_use = "pointer comparison produces a value"]
pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
@@ -1921,8 +1938,7 @@ pub fn addr_eq<T: ?Sized, U: ?Sized>(p: *const T, q: *const U) -> bool {
/// # Examples
///
/// ```
-/// use std::collections::hash_map::DefaultHasher;
-/// use std::hash::{Hash, Hasher};
+/// use std::hash::{DefaultHasher, Hash, Hasher};
/// use std::ptr;
///
/// let five = 5;
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index bc362fb62..9e7b8ec64 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
-use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics::{self, const_eval_select};
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::intrinsics::const_eval_select;
use crate::mem::SizedTypeProperties;
use crate::slice::{self, SliceIndex};
@@ -193,10 +193,10 @@ impl<T: ?Sized> *mut T {
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
- /// required provenance, use [`expose_addr`][pointer::expose_addr] and
- /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes
- /// your code less portable and less amenable to tools that check for compliance with the Rust
- /// memory model.
+ /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
+ /// or [`expose_addr`][pointer::expose_addr] and [`from_exposed_addr`][from_exposed_addr]
+ /// instead. However, note that this makes your code less portable and less amenable to tools
+ /// that check for compliance with the Rust memory model.
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
@@ -226,7 +226,8 @@ impl<T: ?Sized> *mut T {
/// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its
/// provenance. (Reconstructing address space information, if required, is your responsibility.)
///
- /// Using this method means that code is *not* following Strict Provenance rules. Supporting
+ /// Using this method means that code is *not* following [Strict
+ /// Provenance][../index.html#strict-provenance] rules. Supporting
/// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported
/// by tools that help you to stay conformant with the Rust memory model, so it is recommended
/// to use [`addr`][pointer::addr] wherever possible.
@@ -237,13 +238,13 @@ impl<T: ?Sized> *mut T {
/// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not
/// available.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment, see the
- /// [module documentation][crate::ptr] for details.
+ /// It is unclear whether this method can be given a satisfying unambiguous specification. This
+ /// API and its claimed semantics are part of [Exposed Provenance][../index.html#exposed-provenance].
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
#[inline(always)]
- #[unstable(feature = "strict_provenance", issue = "95228")]
+ #[unstable(feature = "exposed_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self.cast::<()>() as usize
@@ -259,7 +260,7 @@ impl<T: ?Sized> *mut T {
/// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
/// `self` to the given address, and therefore has all the same capabilities and restrictions.
///
- /// This API and its claimed semantics are part of the Strict Provenance experiment,
+ /// This API and its claimed semantics are an extension to the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
@@ -1634,10 +1635,19 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(self, align) }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ let ret = unsafe { align_offset(self, align) };
+
+ // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
+ #[cfg(miri)]
+ if ret != usize::MAX {
+ intrinsics::miri_promise_symbolic_alignment(
+ self.wrapping_add(ret).cast_const().cast(),
+ align,
+ );
}
+
+ ret
}
/// Returns whether the pointer is properly aligned for `T`.
@@ -1920,10 +1930,10 @@ impl<T> *mut [T] {
///
/// ```
/// #![feature(slice_ptr_len)]
+ /// use std::ptr;
///
- /// let mut a = [1, 2, 3];
- /// let ptr = &mut a as *mut [_];
- /// assert!(!ptr.is_empty());
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert!(!slice.is_empty());
/// ```
#[inline(always)]
#[unstable(feature = "slice_ptr_len", issue = "71146")]
@@ -2189,6 +2199,7 @@ impl<T> *mut [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> PartialEq for *mut T {
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
@@ -2200,6 +2211,7 @@ impl<T: ?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Ord for *mut T {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
@@ -2219,21 +2231,25 @@ impl<T: ?Sized> PartialOrd for *mut T {
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn lt(&self, other: &*mut T) -> bool {
*self < *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn le(&self, other: &*mut T) -> bool {
*self <= *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn gt(&self, other: &*mut T) -> bool {
*self > *other
}
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn ge(&self, other: &*mut T) -> bool {
*self >= *other
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index ae673b779..77961506e 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -1,12 +1,14 @@
use crate::cmp::Ordering;
-use crate::convert::From;
use crate::fmt;
use crate::hash;
+use crate::intrinsics;
use crate::intrinsics::assert_unsafe_precondition;
use crate::marker::Unsize;
+use crate::mem::SizedTypeProperties;
use crate::mem::{self, MaybeUninit};
use crate::num::NonZeroUsize;
use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr;
use crate::ptr::Unique;
use crate::slice::{self, SliceIndex};
@@ -471,41 +473,1047 @@ impl<T: ?Sized> NonNull<T> {
unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
}
- /// See [`pointer::add`] for semantics and safety requirements.
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let mut s = [1, 2, 3];
+ /// let ptr: NonNull<u32> = NonNull::new(s.as_mut_ptr()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.offset(1).read());
+ /// println!("{}", ptr.offset(2).read());
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset(self, count: isize) -> NonNull<T>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes.
+ ///
+ /// `count` is in units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [offset][pointer::offset] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset(self, count: isize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `offset` and `byte_offset` has
+ // the same safety contract.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_offset(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ /// let ptr: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap();
+ ///
+ /// unsafe {
+ /// println!("{}", ptr.add(1).read() as char);
+ /// println!("{}", ptr.add(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Additionally safety contract of `offset` guarantees that the resulting pointer is
+ // pointing to an allocation, there can't be an allocation at null, thus it's safe to
+ // construct `NonNull`.
+ unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`add`][NonNull::add] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_add(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `add` and `byte_add` has the same
+ // safety contract.
+ // Additionally safety contract of `add` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_add(count) } }
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap().add(3);
+ /// println!("{}", end.sub(1).read() as char);
+ /// println!("{}", end.sub(2).read() as char);
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ // We could always go back to wrapping if unchecked becomes unacceptable
+ #[rustc_allow_const_fn_unstable(const_int_unchecked_arith)]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ if T::IS_ZST {
+ // Pointer arithmetic does nothing when the pointee is a ZST.
+ self
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // Because the pointee is *not* a ZST, that means that `count` is
+ // at most `isize::MAX`, and thus the negation cannot overflow.
+ unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
+ }
+ }
+
+ /// Calculates the offset from a pointer in bytes (convenience for
+ /// `.byte_offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of bytes.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`sub`][NonNull::sub] on it. See that method for documentation
+ /// and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[must_use]
+ #[inline(always)]
+ #[rustc_allow_const_fn_unstable(set_ptr_value)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_sub(self, count: usize) -> Self {
+ // SAFETY: the caller must uphold the safety contract for `sub` and `byte_sub` has the same
+ // safety contract.
+ // Additionally safety contract of `sub` guarantees that the resulting pointer is pointing
+ // to an allocation, there can't be an allocation at null, thus it's safe to construct
+ // `NonNull`.
+ unsafe { NonNull { pointer: self.pointer.byte_sub(count) } }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
+ ///
+ /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
+ /// except that it has a lot more opportunities for UB, in exchange for the compiler
+ /// better understanding what you are doing.
+ ///
+ /// The primary motivation of this method is for computing the `len` of an array/slice
+ /// of `T` that you are currently representing as a "start" and "end" pointer
+ /// (and "end" is "one past the end" of the array).
+ /// In that case, `end.offset_from(start)` gets you the length of the array.
+ ///
+ /// All of the following safety requirements are trivially satisfied for this usecase.
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both `self` and `origin` must be either in bounds or one
+ /// byte past the end of the same [allocated object].
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
+ /// address space, so two pointers within some value of any Rust type `T` will always satisfy
+ /// the last two conditions. The standard library also generally ensures that allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
+ /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+ /// always satisfies the last two conditions.
+ ///
+ /// Most platforms fundamentally can't even construct such a large allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
+ /// such large allocations either.)
+ ///
+ /// The requirement for pointers to be derived from the same allocated object is primarily
+ /// needed for `const`-compatibility: the distance between pointers into *different* allocated
+ /// objects is not known at compile-time. However, the requirement also exists at
+ /// runtime and may be exploited by optimizations. If you wish to compute the difference between
+ /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
+ /// origin as isize) / mem::size_of::<T>()`.
+ // FIXME: recommend `addr()` instead of `as usize` once that is stable.
+ ///
+ /// [`add`]: #method.add
+ /// [allocated object]: crate::ptr#allocated-object
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// #![feature(non_null_convenience, strict_provenance)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap();
+ /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap();
+ /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap();
+ /// assert_eq!(ptr2.addr(), ptr2_other.addr());
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn offset_from(self, origin: NonNull<T>) -> isize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { self.pointer.offset_from(origin.pointer) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of **bytes**.
+ ///
+ /// This is purely a convenience for casting to a `u8` pointer and
+ /// using [`offset_from`][NonNull::offset_from] on it. See that method for
+ /// documentation and safety requirements.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointers,
+ /// ignoring the metadata.
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: NonNull<U>) -> isize {
+ // SAFETY: the caller must uphold the safety contract for `byte_offset_from`.
+ unsafe { self.pointer.byte_offset_from(origin.pointer) }
+ }
+
+ // N.B. `wrapping_offset``, `wrapping_add`, etc are not implemented because they can wrap to null
+
+ /// Calculates the distance between two pointers, *where it's known that
+ /// `self` is equal to or greater than `origin`*. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This computes the same value that [`offset_from`](#method.offset_from)
+ /// would compute, but with the added precondition that the offset is
+ /// guaranteed to be non-negative. This method is equivalent to
+ /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
+ /// but it provides slightly more information to the optimizer, which can
+ /// sometimes allow it to optimize slightly better with some backends.
+ ///
+ /// This method can be though of as recovering the `count` that was passed
+ /// to [`add`](#method.add) (or, with the parameters in the other order,
+ /// to [`sub`](#method.sub)). The following are all equivalent, assuming
+ /// that their safety preconditions are met:
+ /// ```rust
+ /// # #![feature(non_null_convenience)]
+ /// # unsafe fn blah(ptr: std::ptr::NonNull<u32>, origin: std::ptr::NonNull<u32>, count: usize) -> bool {
+ /// ptr.sub_ptr(origin) == count
+ /// # &&
+ /// origin.add(count) == ptr
+ /// # &&
+ /// ptr.sub(count) == origin
+ /// # }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// - The distance between the pointers must be non-negative (`self >= origin`)
+ ///
+ /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
+ /// apply to this method as well; see it for the full details.
+ ///
+ /// Importantly, despite the return type of this method being able to represent
+ /// a larger offset, it's still *not permitted* to pass pointers which differ
+ /// by more than `isize::MAX` *bytes*. As such, the result of this method will
+ /// always be less than or equal to `isize::MAX as usize`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let a = [0; 5];
+ /// let ptr1: NonNull<u32> = NonNull::from(&a[1]);
+ /// let ptr2: NonNull<u32> = NonNull::from(&a[3]);
+ /// unsafe {
+ /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
+ /// assert_eq!(ptr1.add(2), ptr2);
+ /// assert_eq!(ptr2.sub(2), ptr1);
+ /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
+ /// }
+ ///
+ /// // This would be incorrect, as the pointers are not correctly ordered:
+ /// // ptr1.sub_ptr(ptr2)
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ // #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
+ // #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn sub_ptr(self, subtracted: NonNull<T>) -> usize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `sub_ptr`.
+ unsafe { self.pointer.sub_ptr(subtracted.pointer) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: crate::ptr::read()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read`.
+ unsafe { ptr::read(self.pointer) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn add(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn read_volatile(self) -> T
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as that would require wrapping the
- // address space, which no legal objects are allowed to do.
- // And the caller promised the `delta` is sound to add.
- unsafe { NonNull { pointer: self.pointer.add(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { ptr::read_volatile(self.pointer) }
}
- /// See [`pointer::sub`] for semantics and safety requirements.
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
#[inline]
- pub(crate) const unsafe fn sub(self, delta: usize) -> Self
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { ptr::read_unaligned(self.pointer) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(self.pointer, dest.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: crate::ptr::copy()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { ptr::copy(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull<T>, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { ptr::copy_nonoverlapping(src.pointer, self.as_ptr(), count) }
+ }
+
+ /// Executes the destructor (if any) of the pointed-to value.
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns and examples.
+ ///
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn drop_in_place(self) {
+ // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+ unsafe { ptr::drop_in_place(self.as_ptr()) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// See [`ptr::write`] for safety concerns and examples.
+ ///
+ /// [`ptr::write`]: crate::ptr::write()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write`.
+ unsafe { ptr::write(self.as_ptr(), val) }
+ }
+
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `self` to `val`.
+ ///
+ /// See [`ptr::write_bytes`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_bytes`]: crate::ptr::write_bytes()
+ #[doc(alias = "memset")]
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_bytes(self, val: u8, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+ unsafe { ptr::write_bytes(self.as_ptr(), val, count) }
+ }
+
+ /// Performs a volatile write of a memory location with the given value without
+ /// reading or dropping the old value.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::write_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_volatile`]: crate::ptr::write_volatile()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub unsafe fn write_volatile(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+ unsafe { ptr::write_volatile(self.as_ptr(), val) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// Unlike `write`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::write_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[inline(always)]
+ #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+ pub const unsafe fn write_unaligned(self, val: T)
where
T: Sized,
{
- // SAFETY: We require that the delta stays in-bounds of the object, and
- // thus it cannot become null, as no legal objects can be allocated
- // in such as way that the null address is part of them.
- // And the caller promised the `delta` is sound to subtract.
- unsafe { NonNull { pointer: self.pointer.sub(delta) } }
+ // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+ unsafe { ptr::write_unaligned(self.as_ptr(), val) }
}
- /// See [`pointer::sub_ptr`] for semantics and safety requirements.
+ /// Replaces the value at `self` with `src`, returning the old
+ /// value, without dropping either.
+ ///
+ /// See [`ptr::replace`] for safety concerns and examples.
+ ///
+ /// [`ptr::replace`]: crate::ptr::replace()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[inline(always)]
+ pub unsafe fn replace(self, src: T) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `replace`.
+ unsafe { ptr::replace(self.as_ptr(), src) }
+ }
+
+ /// Swaps the values at two mutable locations of the same type, without
+ /// deinitializing either. They may overlap, unlike `mem::swap` which is
+ /// otherwise equivalent.
+ ///
+ /// See [`ptr::swap`] for safety concerns and examples.
+ ///
+ /// [`ptr::swap`]: crate::ptr::swap()
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+ #[inline(always)]
+ pub const unsafe fn swap(self, with: NonNull<T>)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `swap`.
+ unsafe { ptr::swap(self.as_ptr(), with.as_ptr()) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// #![feature(non_null_convenience)]
+ /// use std::mem::align_of;
+ /// use std::ptr::NonNull;
+ ///
+ /// # unsafe {
+ /// let x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = NonNull::new(x.as_ptr() as *mut u8).unwrap();
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// assert!(u16_ptr.read() == u16::from_ne_bytes([5, 6]) || u16_ptr.read() == u16::from_ne_bytes([6, 7]));
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # }
+ /// ```
+ #[unstable(feature = "non_null_convenience", issue = "117691")]
+ #[rustc_const_unstable(feature = "non_null_convenience", issue = "117691")]
+ //#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
+ #[must_use]
#[inline]
- pub(crate) const unsafe fn sub_ptr(self, subtrahend: Self) -> usize
+ pub const fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
- // SAFETY: The caller promised that this is safe to do, and
- // the non-nullness is irrelevant to the operation.
- unsafe { self.pointer.sub_ptr(subtrahend.pointer) }
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above.
+ unsafe { ptr::align_offset(self.pointer, align) }
+ }
+ }
+
+ /// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = NonNull::<AlignedI32>::from(&data);
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!NonNull::new(ptr.as_ptr().wrapping_byte_add(1)).unwrap().is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(non_null_convenience)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = [AlignedI32(42), AlignedI32(42)];
+ /// let ptr = NonNull::<AlignedI32>::new(&data[0] as *const _ as *mut _).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = unsafe { ptr.add(1).cast::<AlignedI64>() };
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_option)]
+ /// #![feature(const_nonnull_new)]
+ /// use std::ptr::NonNull;
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = NonNull::new(40 as *mut AlignedI32).unwrap();
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = NonNull::new(ptr.as_ptr().wrapping_add(1)).unwrap().cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned(self) -> bool
+ where
+ T: Sized,
+ {
+ self.pointer.is_aligned()
+ }
+
+ /// Returns whether the pointer is aligned to `align`.
+ ///
+ /// For non-`Sized` pointees this operation considers only the data pointer,
+ /// ignoring the metadata.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
+ #[unstable(feature = "pointer_is_aligned", issue = "96284")]
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ #[must_use]
+ #[inline]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
+ self.pointer.is_aligned_to(align)
}
}
@@ -783,6 +1791,7 @@ impl<T: ?Sized> Eq for NonNull<T> {}
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> PartialEq for NonNull<T> {
#[inline]
+ #[cfg_attr(not(bootstrap), allow(ambiguous_wide_pointer_comparisons))]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index bf8b86677..067f1541e 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -1,4 +1,3 @@
-use crate::convert::From;
use crate::fmt;
use crate::marker::{PhantomData, Unsize};
use crate::ops::{CoerceUnsized, DispatchFromDyn};