summaryrefslogtreecommitdiffstats
path: root/library/core/src/ptr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
commit94a0819fe3a0d679c3042a77bfe6a2afc505daea (patch)
tree2b827afe6a05f3538db3f7803a88c4587fe85648 /library/core/src/ptr
parentAdding upstream version 1.64.0+dfsg1. (diff)
downloadrustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.tar.xz
rustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.zip
Adding upstream version 1.66.0+dfsg1.upstream/1.66.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src/ptr')
-rw-r--r--library/core/src/ptr/alignment.rs326
-rw-r--r--library/core/src/ptr/const_ptr.rs111
-rw-r--r--library/core/src/ptr/metadata.rs32
-rw-r--r--library/core/src/ptr/mod.rs130
-rw-r--r--library/core/src/ptr/mut_ptr.rs105
-rw-r--r--library/core/src/ptr/non_null.rs6
6 files changed, 536 insertions, 174 deletions
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
new file mode 100644
index 000000000..1390e09dd
--- /dev/null
+++ b/library/core/src/ptr/alignment.rs
@@ -0,0 +1,326 @@
+use crate::convert::{TryFrom, TryInto};
+use crate::intrinsics::assert_unsafe_precondition;
+use crate::num::NonZeroUsize;
+use crate::{cmp, fmt, hash, mem, num};
+
+/// A type storing a `usize` which is a power of two, and thus
+/// represents a possible alignment in the rust abstract machine.
+///
+/// Note that particularly large alignments, while representable in this type,
+/// are likely not to be supported by actual allocators and linkers.
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(transparent)]
+pub struct Alignment(AlignmentEnum);
+
+// Alignment is `repr(usize)`, but via extra steps.
+const _: () = assert!(mem::size_of::<Alignment>() == mem::size_of::<usize>());
+const _: () = assert!(mem::align_of::<Alignment>() == mem::align_of::<usize>());
+
+fn _alignment_can_be_structurally_matched(a: Alignment) -> bool {
+ matches!(a, Alignment::MIN)
+}
+
+impl Alignment {
+ /// The smallest possible alignment, 1.
+ ///
+ /// All addresses are always aligned at least this much.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// use std::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::MIN.as_usize(), 1);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ pub const MIN: Self = Self(AlignmentEnum::_Align1Shl0);
+
+ /// Returns the alignment for a type.
+ ///
+ /// This provides the same numerical value as [`mem::align_of`],
+ /// but in an `Alignment` instead of a `usize.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn of<T>() -> Self {
+ // SAFETY: rustc ensures that type alignment is always a power of two.
+ unsafe { Alignment::new_unchecked(mem::align_of::<T>()) }
+ }
+
+ /// Creates an `Alignment` from a `usize`, or returns `None` if it's
+ /// not a power of two.
+ ///
+ /// Note that `0` is not a power of two, nor a valid alignment.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn new(align: usize) -> Option<Self> {
+ if align.is_power_of_two() {
+ // SAFETY: Just checked it only has one bit set
+ Some(unsafe { Self::new_unchecked(align) })
+ } else {
+ None
+ }
+ }
+
+ /// Creates an `Alignment` from a power-of-two `usize`.
+ ///
+ /// # Safety
+ ///
+ /// `align` must be a power of two.
+ ///
+ /// Equivalently, it must be `1 << exp` for some `exp` in `0..usize::BITS`.
+ /// It must *not* be zero.
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const unsafe fn new_unchecked(align: usize) -> Self {
+ // SAFETY: Precondition passed to the caller.
+ unsafe {
+ assert_unsafe_precondition!(
+ "Alignment::new_unchecked requires a power of two",
+ (align: usize) => align.is_power_of_two()
+ )
+ };
+
+ // SAFETY: By precondition, this must be a power of two, and
+ // our variants encompass all possible powers of two.
+ unsafe { mem::transmute::<usize, Alignment>(align) }
+ }
+
+ /// Returns the alignment as a [`usize`]
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn as_usize(self) -> usize {
+ self.0 as usize
+ }
+
+ /// Returns the alignment as a [`NonZeroUsize`]
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub const fn as_nonzero(self) -> NonZeroUsize {
+ // SAFETY: All the discriminants are non-zero.
+ unsafe { NonZeroUsize::new_unchecked(self.as_usize()) }
+ }
+
+ /// Returns the base-2 logarithm of the alignment.
+ ///
+ /// This is always exact, as `self` represents a power of two.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ptr_alignment_type)]
+ /// use std::ptr::Alignment;
+ ///
+ /// assert_eq!(Alignment::of::<u8>().log2(), 0);
+ /// assert_eq!(Alignment::new(1024).unwrap().log2(), 10);
+ /// ```
+ #[unstable(feature = "ptr_alignment_type", issue = "102070")]
+ #[inline]
+ pub fn log2(self) -> u32 {
+ self.as_nonzero().trailing_zeros()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl fmt::Debug for Alignment {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2())
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl TryFrom<NonZeroUsize> for Alignment {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: NonZeroUsize) -> Result<Alignment, Self::Error> {
+ align.get().try_into()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl TryFrom<usize> for Alignment {
+ type Error = num::TryFromIntError;
+
+ #[inline]
+ fn try_from(align: usize) -> Result<Alignment, Self::Error> {
+ Self::new(align).ok_or(num::TryFromIntError(()))
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl From<Alignment> for NonZeroUsize {
+ #[inline]
+ fn from(align: Alignment) -> NonZeroUsize {
+ align.as_nonzero()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl From<Alignment> for usize {
+ #[inline]
+ fn from(align: Alignment) -> usize {
+ align.as_usize()
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl cmp::Ord for Alignment {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.as_nonzero().cmp(&other.as_nonzero())
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl cmp::PartialOrd for Alignment {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+impl hash::Hash for Alignment {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.as_nonzero().hash(state)
+ }
+}
+
+#[cfg(target_pointer_width = "16")]
+type AlignmentEnum = AlignmentEnum16;
+#[cfg(target_pointer_width = "32")]
+type AlignmentEnum = AlignmentEnum32;
+#[cfg(target_pointer_width = "64")]
+type AlignmentEnum = AlignmentEnum64;
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u16)]
+enum AlignmentEnum16 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u32)]
+enum AlignmentEnum32 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+#[repr(u64)]
+enum AlignmentEnum64 {
+ _Align1Shl0 = 1 << 0,
+ _Align1Shl1 = 1 << 1,
+ _Align1Shl2 = 1 << 2,
+ _Align1Shl3 = 1 << 3,
+ _Align1Shl4 = 1 << 4,
+ _Align1Shl5 = 1 << 5,
+ _Align1Shl6 = 1 << 6,
+ _Align1Shl7 = 1 << 7,
+ _Align1Shl8 = 1 << 8,
+ _Align1Shl9 = 1 << 9,
+ _Align1Shl10 = 1 << 10,
+ _Align1Shl11 = 1 << 11,
+ _Align1Shl12 = 1 << 12,
+ _Align1Shl13 = 1 << 13,
+ _Align1Shl14 = 1 << 14,
+ _Align1Shl15 = 1 << 15,
+ _Align1Shl16 = 1 << 16,
+ _Align1Shl17 = 1 << 17,
+ _Align1Shl18 = 1 << 18,
+ _Align1Shl19 = 1 << 19,
+ _Align1Shl20 = 1 << 20,
+ _Align1Shl21 = 1 << 21,
+ _Align1Shl22 = 1 << 22,
+ _Align1Shl23 = 1 << 23,
+ _Align1Shl24 = 1 << 24,
+ _Align1Shl25 = 1 << 25,
+ _Align1Shl26 = 1 << 26,
+ _Align1Shl27 = 1 << 27,
+ _Align1Shl28 = 1 << 28,
+ _Align1Shl29 = 1 << 29,
+ _Align1Shl30 = 1 << 30,
+ _Align1Shl31 = 1 << 31,
+ _Align1Shl32 = 1 << 32,
+ _Align1Shl33 = 1 << 33,
+ _Align1Shl34 = 1 << 34,
+ _Align1Shl35 = 1 << 35,
+ _Align1Shl36 = 1 << 36,
+ _Align1Shl37 = 1 << 37,
+ _Align1Shl38 = 1 << 38,
+ _Align1Shl39 = 1 << 39,
+ _Align1Shl40 = 1 << 40,
+ _Align1Shl41 = 1 << 41,
+ _Align1Shl42 = 1 << 42,
+ _Align1Shl43 = 1 << 43,
+ _Align1Shl44 = 1 << 44,
+ _Align1Shl45 = 1 << 45,
+ _Align1Shl46 = 1 << 46,
+ _Align1Shl47 = 1 << 47,
+ _Align1Shl48 = 1 << 48,
+ _Align1Shl49 = 1 << 49,
+ _Align1Shl50 = 1 << 50,
+ _Align1Shl51 = 1 << 51,
+ _Align1Shl52 = 1 << 52,
+ _Align1Shl53 = 1 << 53,
+ _Align1Shl54 = 1 << 54,
+ _Align1Shl55 = 1 << 55,
+ _Align1Shl56 = 1 << 56,
+ _Align1Shl57 = 1 << 57,
+ _Align1Shl58 = 1 << 58,
+ _Align1Shl59 = 1 << 59,
+ _Align1Shl60 = 1 << 60,
+ _Align1Shl61 = 1 << 61,
+ _Align1Shl62 = 1 << 62,
+ _Align1Shl63 = 1 << 63,
+}
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index e0655d68d..5a083227b 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -36,7 +36,10 @@ impl<T: ?Sized> *const T {
pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
- (self as *const u8).guaranteed_eq(null())
+ match (self as *const u8).guaranteed_eq(null()) {
+ None => false,
+ Some(res) => res,
+ }
}
/// Casts to a pointer of another type.
@@ -95,8 +98,8 @@ impl<T: ?Sized> *const T {
///
/// This is a bit safer than `as` because it wouldn't silently change the type if the code is
/// refactored.
- #[unstable(feature = "ptr_const_cast", issue = "92675")]
- #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
pub const fn cast_mut(self) -> *mut T {
self as _
}
@@ -154,7 +157,7 @@ impl<T: ?Sized> *const T {
/// This is similar to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. However, unlike `self as usize`, casting the returned address
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
- /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// properly restore the lost information and obtain a dereferenceable pointer, use
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
@@ -249,7 +252,7 @@ impl<T: ?Sized> *const T {
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
- self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ self.wrapping_byte_offset(offset)
}
/// Creates a new pointer by mapping `self`'s address to a new one.
@@ -559,6 +562,20 @@ impl<T: ?Sized> *const T {
from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
}
+ /// Masks out bits of the pointer according to a mask.
+ ///
+ /// This is convenience for `ptr.map_addr(|a| a & mask)`.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "ptr_mask", issue = "98290")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ pub fn mask(self, mask: usize) -> *const T {
+ let this = intrinsics::ptr_mask(self.cast::<()>(), mask);
+ from_raw_parts::<T>(this, metadata(self))
+ }
+
/// Calculates the distance between two pointers. The returned value is in
/// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
///
@@ -641,7 +658,7 @@ impl<T: ?Sized> *const T {
/// }
/// ```
#[stable(feature = "ptr_offset_from", since = "1.47.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn offset_from(self, origin: *const T) -> isize
@@ -677,7 +694,7 @@ impl<T: ?Sized> *const T {
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
- /// would compute, but with the added precondition that that the offset is
+ /// would compute, but with the added precondition that the offset is
/// guaranteed to be non-negative. This method is equivalent to
/// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
/// but it provides slightly more information to the optimizer, which can
@@ -740,9 +757,15 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
+ let this = self;
// SAFETY: The comparison has no side-effects, and the intrinsic
// does this check internally in the CTFE implementation.
- unsafe { assert_unsafe_precondition!(self >= origin) };
+ unsafe {
+ assert_unsafe_precondition!(
+ "ptr::sub_ptr requires `this >= origin`",
+ [T](this: *const T, origin: *const T) => this >= origin
+ )
+ };
let pointee_size = mem::size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
@@ -752,20 +775,16 @@ impl<T: ?Sized> *const T {
/// Returns whether two pointers are guaranteed to be equal.
///
- /// At runtime this function behaves like `self == other`.
+ /// At runtime this function behaves like `Some(self == other)`.
/// However, in some contexts (e.g., compile-time evaluation),
/// it is not always possible to determine equality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be equal.
- /// But when it returns `true`, the pointers are guaranteed to be equal.
+ /// spuriously return `None` for pointers that later actually turn out to have its equality known.
+ /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
///
- /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_ne`]: #method.guaranteed_ne
- ///
- /// The return value may change depending on the compiler version and unsafe code must not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -774,29 +793,28 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_eq(self, other: *const T) -> bool
+ pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_eq(self, other)
+ match intrinsics::ptr_guaranteed_cmp(self as _, other as _) {
+ 2 => None,
+ other => Some(other == 1),
+ }
}
- /// Returns whether two pointers are guaranteed to be unequal.
+ /// Returns whether two pointers are guaranteed to be inequal.
///
- /// At runtime this function behaves like `self != other`.
+ /// At runtime this function behaves like `Some(self != other)`.
/// However, in some contexts (e.g., compile-time evaluation),
- /// it is not always possible to determine the inequality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be unequal.
- /// But when it returns `true`, the pointers are guaranteed to be unequal.
- ///
- /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_eq`]: #method.guaranteed_eq
+ /// it is not always possible to determine inequality of two pointers, so this function may
+ /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
+ /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
///
- /// The return value may change depending on the compiler version and unsafe code must not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -805,11 +823,14 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_ne(self, other: *const T) -> bool
+ pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_ne(self, other)
+ match self.guaranteed_eq(other) {
+ None => None,
+ Some(eq) => Some(!eq),
+ }
}
/// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
@@ -1267,20 +1288,21 @@ impl<T: ?Sized> *const T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
- /// # fn foo(n: usize) {
- /// # use std::mem::align_of;
+ /// use std::mem::align_of;
+ ///
/// # unsafe {
- /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
- /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = x.as_ptr();
/// let offset = ptr.align_offset(align_of::<u16>());
- /// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.add(offset) as *const u16;
- /// assert_ne!(*u16_ptr, 500);
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
/// // outside the allocation
/// }
- /// # } }
+ /// # }
/// ```
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
@@ -1336,11 +1358,8 @@ impl<T: ?Sized> *const T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // SAFETY: `is_power_of_two()` will return `false` for zero.
- unsafe { core::intrinsics::assume(align != 0) };
-
// Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() % align == 0
+ self.cast::<u8>().addr() & align - 1 == 0
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index cd5edee04..caa10f181 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -135,16 +135,16 @@ pub const fn from_raw_parts_mut<T: ?Sized>(
}
#[repr(C)]
-pub(crate) union PtrRepr<T: ?Sized> {
- pub(crate) const_ptr: *const T,
- pub(crate) mut_ptr: *mut T,
- pub(crate) components: PtrComponents<T>,
+union PtrRepr<T: ?Sized> {
+ const_ptr: *const T,
+ mut_ptr: *mut T,
+ components: PtrComponents<T>,
}
#[repr(C)]
-pub(crate) struct PtrComponents<T: ?Sized> {
- pub(crate) data_address: *const (),
- pub(crate) metadata: <T as Pointee>::Metadata,
+struct PtrComponents<T: ?Sized> {
+ data_address: *const (),
+ metadata: <T as Pointee>::Metadata,
}
// Manual impl needed to avoid `T: Copy` bound.
@@ -180,7 +180,6 @@ pub struct DynMetadata<Dyn: ?Sized> {
phantom: crate::marker::PhantomData<Dyn>,
}
-#[cfg(not(bootstrap))]
extern "C" {
/// Opaque type for accessing vtables.
///
@@ -189,17 +188,6 @@ extern "C" {
type VTable;
}
-/// The common prefix of all vtables. It is followed by function pointers for trait methods.
-///
-/// Private implementation detail of `DynMetadata::size_of` etc.
-#[repr(C)]
-#[cfg(bootstrap)]
-struct VTable {
- drop_in_place: fn(*mut ()),
- size_of: usize,
- align_of: usize,
-}
-
impl<Dyn: ?Sized> DynMetadata<Dyn> {
/// Returns the size of the type associated with this vtable.
#[inline]
@@ -207,9 +195,6 @@ impl<Dyn: ?Sized> DynMetadata<Dyn> {
// Note that "size stored in vtable" is *not* the same as "result of size_of_val_raw".
// Consider a reference like `&(i32, dyn Send)`: the vtable will only store the size of the
// `Send` part!
- #[cfg(bootstrap)]
- return self.vtable_ptr.size_of;
- #[cfg(not(bootstrap))]
// SAFETY: DynMetadata always contains a valid vtable pointer
return unsafe {
crate::intrinsics::vtable_size(self.vtable_ptr as *const VTable as *const ())
@@ -219,9 +204,6 @@ impl<Dyn: ?Sized> DynMetadata<Dyn> {
/// Returns the alignment of the type associated with this vtable.
#[inline]
pub fn align_of(self) -> usize {
- #[cfg(bootstrap)]
- return self.vtable_ptr.align_of;
- #[cfg(not(bootstrap))]
// SAFETY: DynMetadata always contains a valid vtable pointer
return unsafe {
crate::intrinsics::vtable_align(self.vtable_ptr as *const VTable as *const ())
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 40e28e636..565c38d22 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -90,7 +90,7 @@
//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue
//! to conflate these notions). This would potentially make it possible to more efficiently
//! target platforms where pointers are larger than offsets, such as CHERI and maybe some
-//! segmented architecures.
+//! segmented architectures.
//!
//! ## Provenance
//!
@@ -172,7 +172,7 @@
//! a pointer to a usize is generally an operation which *only* extracts the address. It is
//! therefore *impossible* to construct a valid pointer from a usize because there is no way
//! to restore the address-space and provenance. In other words, pointer-integer-pointer
-//! roundtrips are not possible (in the sense that the resulting pointer is not dereferencable).
+//! roundtrips are not possible (in the sense that the resulting pointer is not dereferenceable).
//!
//! The key insight to making this model *at all* viable is the [`with_addr`][] method:
//!
@@ -272,7 +272,7 @@
//!
//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can
//! be used for sentinel values like `null` *or* to represent a tagged pointer that will
-//! never be dereferencable. In general, it is always sound for an integer to pretend
+//! never be dereferenceable. In general, it is always sound for an integer to pretend
//! to be a pointer "for fun" as long as you don't use operations on it which require
//! it to be valid (offset, read, write, etc).
//!
@@ -377,6 +377,10 @@ use crate::intrinsics::{
use crate::mem::{self, MaybeUninit};
+mod alignment;
+#[unstable(feature = "ptr_alignment_type", issue = "102070")]
+pub use alignment::Alignment;
+
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(inline)]
pub use crate::intrinsics::copy_nonoverlapping;
@@ -390,7 +394,6 @@ pub use crate::intrinsics::copy;
pub use crate::intrinsics::write_bytes;
mod metadata;
-pub(crate) use metadata::PtrRepr;
#[unstable(feature = "ptr_metadata", issue = "81513")]
pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin};
@@ -578,12 +581,21 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// Convert an address back to a pointer, picking up a previously 'exposed' provenance.
///
/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any*
-/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize`
-/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be
-/// used, the program has undefined behavior. Note that there is no algorithm that decides which
-/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess
-/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined
-/// behavior, then that is the guess that will be taken.
+/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr],
+/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract
+/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory
+/// is disjoint from memory that will be used by the abstract machine such as the stack, heap,
+/// and statics.
+///
+/// If there is no 'exposed' provenance that justifies the way this pointer will be used,
+/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers
+/// and references that have been invalidated due to aliasing accesses cannot be used any more,
+/// even if they have been exposed!
+///
+/// Note that there is no algorithm that decides which provenance will be used. You can think of this
+/// as "guessing" the right provenance, and the guess will be "maximally in your favor", in the sense
+/// that if there is any way to avoid undefined behavior (while upholding all aliasing requirements),
+/// then that is the guess that will be taken.
///
/// On platforms with multiple address spaces, it is your responsibility to ensure that the
/// address makes sense in the address space that this pointer will be used with.
@@ -603,6 +615,7 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -639,6 +652,7 @@ where
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -885,6 +899,9 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
// valid for writes and properly aligned.
unsafe {
assert_unsafe_precondition!(
+ "ptr::swap_nonoverlapping requires that both pointer arguments are aligned and non-null \
+ and the specified memory ranges do not overlap",
+ [T](x: *mut T, y: *mut T, count: usize) =>
is_aligned_and_not_null(x)
&& is_aligned_and_not_null(y)
&& is_nonoverlapping(x, y, count)
@@ -981,7 +998,10 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
// and cannot overlap `src` since `dst` must point to a distinct
// allocated object.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::replace requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
mem::swap(&mut *dst, &mut src); // cannot overlap
}
src
@@ -1112,6 +1132,10 @@ pub const unsafe fn read<T>(src: *const T) -> T {
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
+ assert_unsafe_precondition!(
+ "ptr::read requires that the pointer argument is aligned and non-null",
+ [T](src: *const T) => is_aligned_and_not_null(src)
+ );
copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
tmp.assume_init()
}
@@ -1305,6 +1329,10 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
+ assert_unsafe_precondition!(
+ "ptr::write requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
copy_nonoverlapping(&src as *const T, dst, 1);
intrinsics::forget(src);
}
@@ -1468,7 +1496,10 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
pub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(src));
+ assert_unsafe_precondition!(
+ "ptr::read_volatile requires that the pointer argument is aligned and non-null",
+ [T](src: *const T) => is_aligned_and_not_null(src)
+ );
intrinsics::volatile_load(src)
}
}
@@ -1539,7 +1570,10 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
- assert_unsafe_precondition!(is_aligned_and_not_null(dst));
+ assert_unsafe_precondition!(
+ "ptr::write_volatile requires that the pointer argument is aligned and non-null",
+ [T](dst: *mut T) => is_aligned_and_not_null(dst)
+ );
intrinsics::volatile_store(dst, src);
}
}
@@ -1726,6 +1760,12 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// by their address rather than comparing the values they point to
/// (which is what the `PartialEq for &T` implementation does).
///
+/// When comparing wide pointers, both the address and the metadata are tested for equality.
+/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers
+/// to values of the same underlying type can compare inequal (because vtables are duplicated in
+/// multiple codegen units), and pointers to values of *different* underlying type can compare equal
+/// (since identical vtables can be deduplicated within a codegen unit).
+///
/// # Examples
///
/// ```
@@ -1752,41 +1792,6 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// assert!(!std::ptr::eq(&a[..2], &a[..3]));
/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
/// ```
-///
-/// Traits are also compared by their implementation:
-///
-/// ```
-/// #[repr(transparent)]
-/// struct Wrapper { member: i32 }
-///
-/// trait Trait {}
-/// impl Trait for Wrapper {}
-/// impl Trait for i32 {}
-///
-/// let wrapper = Wrapper { member: 10 };
-///
-/// // Pointers have equal addresses.
-/// assert!(std::ptr::eq(
-/// &wrapper as *const Wrapper as *const u8,
-/// &wrapper.member as *const i32 as *const u8
-/// ));
-///
-/// // Objects have equal addresses, but `Trait` has different implementations.
-/// assert!(!std::ptr::eq(
-/// &wrapper as &dyn Trait,
-/// &wrapper.member as &dyn Trait,
-/// ));
-/// assert!(!std::ptr::eq(
-/// &wrapper as &dyn Trait as *const dyn Trait,
-/// &wrapper.member as &dyn Trait as *const dyn Trait,
-/// ));
-///
-/// // Converting the reference to a `*const u8` compares by address.
-/// assert!(std::ptr::eq(
-/// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
-/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
-/// ));
-/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
#[inline]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
@@ -1834,7 +1839,7 @@ macro_rules! maybe_fnptr_doc {
$item
};
($a:ident @ #[$meta:meta] $item:item) => {
- #[cfg_attr(not(bootstrap), doc(fake_variadic))]
+ #[doc(fake_variadic)]
#[doc = "This trait is implemented for function pointers with up to twelve arguments."]
#[$meta]
$item
@@ -1854,9 +1859,16 @@ macro_rules! maybe_fnptr_doc {
// Impls for function pointers
macro_rules! fnptr_impls_safety_abi {
($FnTy: ty, $($Arg: ident),*) => {
+ fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
+ };
+ (@c_unwind $FnTy: ty, $($Arg: ident),*) => {
+ #[cfg(not(bootstrap))]
+ fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
+ };
+ (#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> PartialEq for $FnTy {
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -1867,13 +1879,13 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> Eq for $FnTy {}
}
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> PartialOrd for $FnTy {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
@@ -1884,7 +1896,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> Ord for $FnTy {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
@@ -1895,7 +1907,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> hash::Hash for $FnTy {
fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
state.write_usize(*self as usize)
@@ -1905,7 +1917,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::pointer_fmt_inner(*self as usize, f)
@@ -1915,7 +1927,7 @@ macro_rules! fnptr_impls_safety_abi {
maybe_fnptr_doc! {
$($Arg)* @
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ #[$meta]
impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::pointer_fmt_inner(*self as usize, f)
@@ -1930,16 +1942,22 @@ macro_rules! fnptr_impls_args {
fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
};
() => {
// No variadic functions with 0 parameters
fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, }
fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, }
};
}
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index fc3dd2a9b..6764002bc 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -35,7 +35,10 @@ impl<T: ?Sized> *mut T {
pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
- (self as *mut u8).guaranteed_eq(null_mut())
+ match (self as *mut u8).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
}
/// Casts to a pointer of another type.
@@ -77,10 +80,14 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, mut val: *mut U) -> *mut U
+ pub fn with_metadata_of<U>(self, val: *const U) -> *mut U
where
U: ?Sized,
{
+ // Prepare in the type system that we will replace the pointer value with a mutable
+ // pointer, taking the mutable provenance from the `self` pointer.
+ let mut val = val as *mut U;
+ // Pointer to the pointer value within the value.
let target = &mut val as *mut *mut U as *mut *mut u8;
// SAFETY: In case of a thin pointer, this operations is identical
// to a simple assignment. In case of a fat pointer, with the current
@@ -100,8 +107,8 @@ impl<T: ?Sized> *mut T {
/// coercion.
///
/// [`cast_mut`]: #method.cast_mut
- #[unstable(feature = "ptr_const_cast", issue = "92675")]
- #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")]
+ #[stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
pub const fn cast_const(self) -> *const T {
self as _
}
@@ -160,7 +167,7 @@ impl<T: ?Sized> *mut T {
/// This is similar to `self as usize`, which semantically discards *provenance* and
/// *address-space* information. However, unlike `self as usize`, casting the returned address
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
- /// properly restore the lost information and obtain a dereferencable pointer, use
+ /// properly restore the lost information and obtain a dereferenceable pointer, use
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// If using those APIs is not possible because there is no way to preserve a pointer with the
@@ -255,7 +262,7 @@ impl<T: ?Sized> *mut T {
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
- self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ self.wrapping_byte_offset(offset)
}
/// Creates a new pointer by mapping `self`'s address to a new one.
@@ -575,6 +582,20 @@ impl<T: ?Sized> *mut T {
)
}
+ /// Masks out bits of the pointer according to a mask.
+ ///
+ /// This is convenience for `ptr.map_addr(|a| a & mask)`.
+ ///
+ /// For non-`Sized` pointees this operation changes only the data pointer,
+ /// leaving the metadata untouched.
+ #[unstable(feature = "ptr_mask", issue = "98290")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline(always)]
+ pub fn mask(self, mask: usize) -> *mut T {
+ let this = intrinsics::ptr_mask(self.cast::<()>(), mask) as *mut ();
+ from_raw_parts_mut::<T>(this, metadata(self))
+ }
+
/// Returns `None` if the pointer is null, or else returns a unique reference to
/// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`]
/// must be used instead.
@@ -682,20 +703,16 @@ impl<T: ?Sized> *mut T {
/// Returns whether two pointers are guaranteed to be equal.
///
- /// At runtime this function behaves like `self == other`.
+ /// At runtime this function behaves like `Some(self == other)`.
/// However, in some contexts (e.g., compile-time evaluation),
/// it is not always possible to determine equality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be equal.
- /// But when it returns `true`, the pointers are guaranteed to be equal.
+ /// spuriously return `None` for pointers that later actually turn out to have its equality known.
+ /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
///
- /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
- ///
- /// [`guaranteed_ne`]: #method.guaranteed_ne
- ///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -704,29 +721,25 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const fn guaranteed_eq(self, other: *mut T) -> bool
+ pub const fn guaranteed_eq(self, other: *mut T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _)
+ (self as *const T).guaranteed_eq(other as _)
}
- /// Returns whether two pointers are guaranteed to be unequal.
+ /// Returns whether two pointers are guaranteed to be inequal.
///
- /// At runtime this function behaves like `self != other`.
+ /// At runtime this function behaves like `Some(self != other)`.
/// However, in some contexts (e.g., compile-time evaluation),
- /// it is not always possible to determine the inequality of two pointers, so this function may
- /// spuriously return `false` for pointers that later actually turn out to be unequal.
- /// But when it returns `true`, the pointers are guaranteed to be unequal.
- ///
- /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
- /// comparisons for which both functions return `false`.
+ /// it is not always possible to determine inequality of two pointers, so this function may
+ /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
+ /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
///
- /// [`guaranteed_eq`]: #method.guaranteed_eq
- ///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change from `Some` to `None` and vice versa depending on the compiler
+ /// version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
- /// for performance optimizations where spurious `false` return values by this function do not
+ /// for performance optimizations where spurious `None` return values by this function do not
/// affect the outcome, but just the performance.
/// The consequences of using this method to make runtime and compile-time code behave
/// differently have not been explored. This method should not be used to introduce such
@@ -735,11 +748,11 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[inline]
- pub const unsafe fn guaranteed_ne(self, other: *mut T) -> bool
+ pub const fn guaranteed_ne(self, other: *mut T) -> Option<bool>
where
T: Sized,
{
- intrinsics::ptr_guaranteed_ne(self as *const _, other as *const _)
+ (self as *const T).guaranteed_ne(other as _)
}
/// Calculates the distance between two pointers. The returned value is in
@@ -824,7 +837,7 @@ impl<T: ?Sized> *mut T {
/// }
/// ```
#[stable(feature = "ptr_offset_from", since = "1.47.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")]
+ #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn offset_from(self, origin: *const T) -> isize
@@ -858,7 +871,7 @@ impl<T: ?Sized> *mut T {
/// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
///
/// This computes the same value that [`offset_from`](#method.offset_from)
- /// would compute, but with the added precondition that that the offset is
+ /// would compute, but with the added precondition that the offset is
/// guaranteed to be non-negative. This method is equivalent to
/// `usize::from(self.offset_from(origin)).unwrap_unchecked()`,
/// but it provides slightly more information to the optimizer, which can
@@ -1545,20 +1558,23 @@ impl<T: ?Sized> *mut T {
/// Accessing adjacent `u8` as `u16`
///
/// ```
- /// # fn foo(n: usize) {
- /// # use std::mem::align_of;
+ /// use std::mem::align_of;
+ ///
/// # unsafe {
- /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
- /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let mut x = [5_u8, 6, 7, 8, 9];
+ /// let ptr = x.as_mut_ptr();
/// let offset = ptr.align_offset(align_of::<u16>());
- /// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.add(offset) as *const u16;
- /// assert_ne!(*u16_ptr, 500);
+ ///
+ /// if offset < x.len() - 1 {
+ /// let u16_ptr = ptr.add(offset).cast::<u16>();
+ /// *u16_ptr = 0;
+ ///
+ /// assert!(x == [0, 0, 7, 8, 9] || x == [5, 0, 0, 8, 9]);
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
/// // outside the allocation
/// }
- /// # } }
+ /// # }
/// ```
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
@@ -1614,11 +1630,8 @@ impl<T: ?Sized> *mut T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // SAFETY: `is_power_of_two()` will return `false` for zero.
- unsafe { core::intrinsics::assume(align != 0) };
-
// Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() % align == 0
+ self.cast::<u8>().addr() & align - 1 == 0
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index f3ef094cb..c18264d13 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -2,6 +2,7 @@ use crate::cmp::Ordering;
use crate::convert::From;
use crate::fmt;
use crate::hash;
+use crate::intrinsics::assert_unsafe_precondition;
use crate::marker::Unsize;
use crate::mem::{self, MaybeUninit};
use crate::num::NonZeroUsize;
@@ -195,7 +196,10 @@ impl<T: ?Sized> NonNull<T> {
#[inline]
pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
- unsafe { NonNull { pointer: ptr as _ } }
+ unsafe {
+ assert_unsafe_precondition!("NonNull::new_unchecked requires that the pointer is non-null", [T: ?Sized](ptr: *mut T) => !ptr.is_null());
+ NonNull { pointer: ptr as _ }
+ }
}
/// Creates a new `NonNull` if `ptr` is non-null.