summaryrefslogtreecommitdiffstats
path: root/library/core/src/ptr
diff options
context:
space:
mode:
Diffstat (limited to 'library/core/src/ptr')
-rw-r--r--library/core/src/ptr/alignment.rs24
-rw-r--r--library/core/src/ptr/const_ptr.rs338
-rw-r--r--library/core/src/ptr/metadata.rs1
-rw-r--r--library/core/src/ptr/mod.rs111
-rw-r--r--library/core/src/ptr/mut_ptr.rs350
-rw-r--r--library/core/src/ptr/non_null.rs8
6 files changed, 683 insertions, 149 deletions
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 1390e09dd..64a5290c3 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -9,7 +9,9 @@ use crate::{cmp, fmt, hash, mem, num};
/// Note that particularly large alignments, while representable in this type,
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -167,16 +169,18 @@ impl From<Alignment> for usize {
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::Ord for Alignment {
+impl const cmp::Ord for Alignment {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.as_nonzero().cmp(&other.as_nonzero())
+ self.as_nonzero().get().cmp(&other.as_nonzero().get())
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::PartialOrd for Alignment {
+impl const cmp::PartialOrd for Alignment {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
@@ -198,7 +202,9 @@ type AlignmentEnum = AlignmentEnum32;
#[cfg(target_pointer_width = "64")]
type AlignmentEnum = AlignmentEnum64;
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -219,7 +225,9 @@ enum AlignmentEnum16 {
_Align1Shl15 = 1 << 15,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -256,7 +264,9 @@ enum AlignmentEnum32 {
_Align1Shl31 = 1 << 31,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 5a083227b..d34813599 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -45,7 +45,7 @@ impl<T: ?Sized> *const T {
/// Casts to a pointer of another type.
#[stable(feature = "ptr_cast", since = "1.38.0")]
#[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
- #[inline]
+ #[inline(always)]
pub const fn cast<U>(self) -> *const U {
self as _
}
@@ -79,19 +79,14 @@ impl<T: ?Sized> *const T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, mut val: *const U) -> *const U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
where
U: ?Sized,
{
- let target = &mut val as *mut *const U as *mut *const u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *const u8 };
- val
+ from_raw_parts::<U>(self as *const (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -100,6 +95,7 @@ impl<T: ?Sized> *const T {
/// refactored.
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_mut(self) -> *mut T {
self as _
}
@@ -117,13 +113,21 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let array = [13, 42];
/// let p0: *const i32 = &array[0];
/// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
/// let p1: *const i32 = &array[1];
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -140,11 +144,20 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *const u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*const u8>::from_bits(1), dangling);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr` function, or update \
+ your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -176,7 +189,7 @@ impl<T: ?Sized> *const T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -213,7 +226,7 @@ impl<T: ?Sized> *const T {
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -478,8 +491,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -559,7 +571,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -568,12 +580,36 @@ impl<T: ?Sized> *const T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let v = 17_u32;
+ /// let ptr: *const u32 = &v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *const T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask);
- from_raw_parts::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
}
/// Calculates the distance between two pointers. The returned value is in
@@ -684,7 +720,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -914,8 +950,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1001,8 +1036,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1082,7 +1116,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1162,7 +1196,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1304,6 +1338,8 @@ impl<T: ?Sized> *const T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1314,32 +1350,149 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *const T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *const T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
+ usize::MAX
+ }
- const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
- usize::MAX
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
+ }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1350,16 +1503,121 @@ impl<T: ?Sized> *const T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index caa10f181..a8604843e 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,6 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 565c38d22..48b2e88da 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -35,7 +35,8 @@
//! be used for inter-thread synchronization.
//! * The result of casting a reference to a pointer is valid for as long as the
//! underlying object is live and no reference (just raw pointers) is used to
-//! access the same memory.
+//! access the same memory. That is, reference and pointer accesses cannot be
+//! interleaved.
//!
//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
@@ -64,7 +65,6 @@
//! separate allocated object), heap allocations (each allocation created by the global allocator is
//! a separate allocated object), and `static` variables.
//!
-//!
//! # Strict Provenance
//!
//! **The following text is non-normative, insufficiently formal, and is an extremely strict
@@ -613,9 +613,10 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -650,9 +651,10 @@ where
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -908,21 +910,15 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
);
}
- // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a
- // pessimization for it. Also, if the type contains any unaligned pointers,
- // copying those over multiple reads is difficult to support.
- #[cfg(not(miri))]
+ // Split up the slice into small power-of-two-sized chunks that LLVM is able
+ // to vectorize (unless it's a special type with more-than-pointer alignment,
+ // because we don't want to pessimize things like slices of SIMD vectors.)
+ if mem::align_of::<T>() <= mem::size_of::<usize>()
+ && (!mem::size_of::<T>().is_power_of_two()
+ || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
{
- // Split up the slice into small power-of-two-sized chunks that LLVM is able
- // to vectorize (unless it's a special type with more-than-pointer alignment,
- // because we don't want to pessimize things like slices of SIMD vectors.)
- if mem::align_of::<T>() <= mem::size_of::<usize>()
- && (!mem::size_of::<T>().is_power_of_two()
- || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
- {
- attempt_swap_as_chunks!(usize);
- attempt_swap_as_chunks!(u8);
- }
+ attempt_swap_as_chunks!(usize);
+ attempt_swap_as_chunks!(u8);
}
// SAFETY: Same preconditions as this function
@@ -1580,10 +1576,14 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
/// Align pointer `p`.
///
-/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
+/// Calculate offset (in terms of elements of `size_of::<T>()` stride) that has to be applied
/// to pointer `p` so that pointer `p` would get aligned to `a`.
///
-/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
+/// # Safety
+/// `a` must be a power of two.
+///
+/// # Notes
+/// This implementation has been carefully tailored to not panic. It is UB for this to panic.
/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
/// constants.
///
@@ -1593,12 +1593,12 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
///
/// Any questions go to @nagisa.
#[lang = "align_offset"]
-pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
+pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub,
- wrapping_add, wrapping_mul, wrapping_sub,
+ cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
+ unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1610,7 +1610,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
///
/// Implementation of this function shall not panic. Ever.
#[inline]
- unsafe fn mod_inv(x: usize, m: usize) -> usize {
+ const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
@@ -1618,40 +1618,48 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
- /// INV_TABLE_MOD²
- const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
- let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
- if m <= INV_TABLE_MOD {
- table_inverse & m_minus_one
- } else {
- // We iterate "up" using the following formula:
- //
- // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
+ let mut mod_gate = INV_TABLE_MOD;
+ // We iterate "up" using the following formula:
+ //
+ // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ //
+ // This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
+ // finally reduce the computation to our desired `m` by taking `inverse mod m`.
+ //
+ // This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
+ // will always finish in at most 4 iterations.
+ loop {
+ // y = y * (2 - xy) mod n
//
- // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
- let mut inverse = table_inverse;
- let mut going_mod = INV_TABLE_MOD_SQUARED;
- loop {
- // y = y * (2 - xy) mod n
- //
- // Note, that we use wrapping operations here intentionally – the original formula
- // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
- // usize::MAX` instead, because we take the result `mod n` at the end
- // anyway.
- inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
- if going_mod >= m {
- return inverse & m_minus_one;
- }
- going_mod = wrapping_mul(going_mod, going_mod);
+ // Note, that we use wrapping operations here intentionally – the original formula
+ // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
+ // usize::MAX` instead, because we take the result `mod n` at the end
+ // anyway.
+ if mod_gate >= m {
+ break;
}
+ inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
+ let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
+ if overflow {
+ break;
+ }
+ mod_gate = new_gate;
}
+ inverse & m_minus_one
}
- let addr = p.addr();
let stride = mem::size_of::<T>();
+
+ // SAFETY: This is just an inlined `p.addr()` (which is not
+ // a `const fn` so we cannot call it).
+ // During const eval, we hook this function to ensure that the pointer never
+ // has provenance, making this sound.
+ let addr: usize = unsafe { mem::transmute(p) };
+
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
@@ -1761,7 +1769,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// (which is what the `PartialEq for &T` implementation does).
///
/// When comparing wide pointers, both the address and the metadata are tested for equality.
-/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers
+/// However, note that comparing trait object pointers (`*const dyn Trait`) is unreliable: pointers
/// to values of the same underlying type can compare inequal (because vtables are duplicated in
/// multiple codegen units), and pointers to values of *different* underlying type can compare equal
/// (since identical vtables can be deduplicated within a codegen unit).
@@ -1793,7 +1801,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
-#[inline]
+#[inline(always)]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1862,7 +1870,6 @@ macro_rules! fnptr_impls_safety_abi {
fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
};
(@c_unwind $FnTy: ty, $($Arg: ident),*) => {
- #[cfg(not(bootstrap))]
fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
};
(#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 6764002bc..c924a90b1 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -78,23 +78,14 @@ impl<T: ?Sized> *mut T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, val: *const U) -> *mut U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *mut U
where
U: ?Sized,
{
- // Prepare in the type system that we will replace the pointer value with a mutable
- // pointer, taking the mutable provenance from the `self` pointer.
- let mut val = val as *mut U;
- // Pointer to the pointer value within the value.
- let target = &mut val as *mut *mut U as *mut *mut u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *mut u8 };
- val
+ from_raw_parts_mut::<U>(self as *mut (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -109,6 +100,7 @@ impl<T: ?Sized> *mut T {
/// [`cast_mut`]: #method.cast_mut
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_const(self) -> *const T {
self as _
}
@@ -126,14 +118,22 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let mut array = [13, 42];
/// let mut it = array.iter_mut();
/// let p0: *mut i32 = it.next().unwrap();
/// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0);
/// let p1: *mut i32 = it.next().unwrap();
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -150,11 +150,20 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *mut u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*mut u8>::from_bits(1), dangling);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr_mut` function, or \
+ update your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -186,7 +195,7 @@ impl<T: ?Sized> *mut T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -223,7 +232,7 @@ impl<T: ?Sized> *mut T {
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -496,8 +505,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -576,10 +584,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts_mut::<T>(
- self.cast::<u8>().wrapping_offset(count).cast::<()>(),
- metadata(self),
- )
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -588,12 +593,39 @@ impl<T: ?Sized> *mut T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let mut v = 17_u32;
+ /// let ptr: *mut u32 = &mut v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ ///
+ /// unsafe { *masked_ptr = 0 };
+ /// assert_eq!(v, 0);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *mut T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask) as *mut ();
- from_raw_parts_mut::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).cast_mut().with_metadata_of(self)
}
/// Returns `None` if the pointer is null, or else returns a unique reference to
@@ -861,7 +893,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -1020,8 +1052,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1107,8 +1138,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1188,7 +1218,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1268,7 +1298,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1576,6 +1606,8 @@ impl<T: ?Sized> *mut T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1586,32 +1618,151 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *mut T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *mut T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
+ usize::MAX
+ }
+
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
- usize::MAX
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
}
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1622,16 +1773,123 @@ impl<T: ?Sized> *mut T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index c18264d13..c4348169c 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -330,7 +330,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const fn as_ptr(self) -> *mut T {
self.pointer as *mut T
}
@@ -378,7 +378,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
@@ -429,7 +429,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
@@ -703,7 +703,7 @@ impl<T> NonNull<[T]> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for NonNull<T> {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}