summaryrefslogtreecommitdiffstats
path: root/library/core/src/ptr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /library/core/src/ptr
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src/ptr')
-rw-r--r--library/core/src/ptr/alignment.rs16
-rw-r--r--library/core/src/ptr/const_ptr.rs27
-rw-r--r--library/core/src/ptr/mod.rs305
-rw-r--r--library/core/src/ptr/mut_ptr.rs39
-rw-r--r--library/core/src/ptr/non_null.rs30
-rw-r--r--library/core/src/ptr/unique.rs14
6 files changed, 154 insertions, 277 deletions
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index efe6d4183..bbf7199ff 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -9,8 +9,7 @@ use crate::{cmp, fmt, hash, mem, num};
/// Note that particularly large alignments, while representable in this type,
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-#[derive(Copy, Clone, Eq)]
-#[derive_const(PartialEq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -170,7 +169,7 @@ impl From<Alignment> for usize {
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl const cmp::Ord for Alignment {
+impl cmp::Ord for Alignment {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.as_nonzero().get().cmp(&other.as_nonzero().get())
@@ -179,7 +178,7 @@ impl const cmp::Ord for Alignment {
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl const cmp::PartialOrd for Alignment {
+impl cmp::PartialOrd for Alignment {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
@@ -201,8 +200,7 @@ type AlignmentEnum = AlignmentEnum32;
#[cfg(target_pointer_width = "64")]
type AlignmentEnum = AlignmentEnum64;
-#[derive(Copy, Clone, Eq)]
-#[derive_const(PartialEq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -223,8 +221,7 @@ enum AlignmentEnum16 {
_Align1Shl15 = 1 << 15,
}
-#[derive(Copy, Clone, Eq)]
-#[derive_const(PartialEq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -261,8 +258,7 @@ enum AlignmentEnum32 {
_Align1Shl31 = 1 << 31,
}
-#[derive(Copy, Clone, Eq)]
-#[derive_const(PartialEq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 839afc57f..6e1e862d3 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -132,8 +132,8 @@ impl<T: ?Sized> *const T {
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
- since = "1.67",
- note = "replaced by the `exposed_addr` method, or update your code \
+ since = "1.67.0",
+ note = "replaced by the `expose_addr` method, or update your code \
to follow the strict provenance rules using its APIs"
)]
#[inline(always)]
@@ -161,7 +161,7 @@ impl<T: ?Sized> *const T {
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
- since = "1.67",
+ since = "1.67.0",
note = "replaced by the `ptr::from_exposed_addr` function, or update \
your code to follow the strict provenance rules using its APIs"
)]
@@ -264,7 +264,7 @@ impl<T: ?Sized> *const T {
let dest_addr = addr as isize;
let offset = dest_addr.wrapping_sub(self_addr);
- // This is the canonical desugarring of this operation
+ // This is the canonical desugaring of this operation
self.wrapping_byte_offset(offset)
}
@@ -916,8 +916,16 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
+ #[cfg(bootstrap)]
// SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe { self.offset(count as isize) }
+ unsafe {
+ self.offset(count as isize)
+ }
+ #[cfg(not(bootstrap))]
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe {
+ intrinsics::offset(self, count)
+ }
}
/// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
@@ -1187,7 +1195,7 @@ impl<T: ?Sized> *const T {
///
/// [`ptr::read`]: crate::ptr::read()
#[stable(feature = "pointer_methods", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read(self) -> T
@@ -1228,7 +1236,7 @@ impl<T: ?Sized> *const T {
///
/// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
#[stable(feature = "pointer_methods", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read_unaligned(self) -> T
@@ -1650,11 +1658,10 @@ impl<T> *const [T] {
/// }
/// ```
#[unstable(feature = "slice_ptr_get", issue = "74265")]
- #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
- pub const unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
+ pub unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
where
- I: ~const SliceIndex<[T]>,
+ I: SliceIndex<[T]>,
{
// SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
unsafe { index.get_unchecked(self) }
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 818f1a919..d0cb2f715 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -374,6 +374,7 @@ use crate::hash;
use crate::intrinsics::{
self, assert_unsafe_precondition, is_aligned_and_not_null, is_nonoverlapping,
};
+use crate::marker::FnPtr;
use crate::mem::{self, MaybeUninit};
@@ -440,10 +441,18 @@ mod mut_ptr;
///
/// * `to_drop` must be [valid] for both reads and writes.
///
-/// * `to_drop` must be properly aligned.
+/// * `to_drop` must be properly aligned, even if `T` has size 0.
///
-/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
-/// additional invariants - this is type-dependent.
+/// * `to_drop` must be nonnull, even if `T` has size 0.
+///
+/// * The value `to_drop` points to must be valid for dropping, which may mean
+/// it must uphold additional invariants. These invariants depend on the type
+/// of the value being dropped. For instance, when dropping a Box, the box's
+/// pointer to the heap must be valid.
+///
+/// * While `drop_in_place` is executing, the only way to access parts of
+/// `to_drop` is through the `&mut self` references supplied to the
+/// `Drop::drop` methods that `drop_in_place` invokes.
///
/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
@@ -451,8 +460,6 @@ mod mut_ptr;
/// again. [`write()`] can be used to overwrite data without causing it to be
/// dropped.
///
-/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
-///
/// [valid]: self#safety
///
/// # Examples
@@ -1132,7 +1139,8 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
/// [valid]: self#safety
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
@@ -1167,26 +1175,7 @@ pub const unsafe fn read<T>(src: *const T) -> T {
"ptr::read requires that the pointer argument is aligned and non-null",
[T](src: *const T) => is_aligned_and_not_null(src)
);
-
- #[cfg(bootstrap)]
- {
- // We are calling the intrinsics directly to avoid function calls in the
- // generated code as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
-
- // `src` cannot overlap `tmp` because `tmp` was just allocated on
- // the stack as a separate allocated object.
- let mut tmp = MaybeUninit::<T>::uninit();
- copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- tmp.assume_init()
- }
- #[cfg(not(bootstrap))]
- {
- crate::intrinsics::read_via_copy(src)
- }
+ crate::intrinsics::read_via_copy(src)
}
}
@@ -1267,7 +1256,8 @@ pub const unsafe fn read<T>(src: *const T) -> T {
/// ```
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
@@ -1367,13 +1357,13 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write<T>(dst: *mut T, src: T) {
- // We are calling the intrinsics directly to avoid function calls in the generated code
- // as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- #[rustc_nounwind]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
+ // Semantically, it would be fine for this to be implemented as a
+ // `copy_nonoverlapping` and appropriate drop suppression of `src`.
+
+ // However, implementing via that currently produces more MIR than is ideal.
+ // Using an intrinsic keeps it down to just the simple `*dst = move src` in
+ // MIR (11 statements shorter, at the time of writing), and also allows
+ // `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
@@ -1383,8 +1373,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
"ptr::write requires that the pointer argument is aligned and non-null",
[T](dst: *mut T) => is_aligned_and_not_null(dst)
);
- copy_nonoverlapping(&src as *const T, dst, 1);
- intrinsics::forget(src);
+ intrinsics::write_via_move(dst, src)
}
}
@@ -1651,8 +1640,8 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
- unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
+ assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
+ unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1743,12 +1732,18 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
+ let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
+ let byte_offset = wrapping_sub(aligned_address, addr);
+ // FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
+ // SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
+ // the value by more than `a-1`, so even though the intermediate values might have
+ // wrapped, the byte_offset is always in `[0, a)`.
+ unsafe { assume(byte_offset < a) };
+
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
- let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
- let byte_offset = wrapping_sub(aligned_address, addr);
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }
@@ -1764,7 +1759,12 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// miracles, given the situations this case has to deal with.
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
- let gcdpow = unsafe { cttz_nonzero(stride).min(cttz_nonzero(a)) };
+ // FIXME(const-hack) replace with min
+ let gcdpow = unsafe {
+ let x = cttz_nonzero(stride);
+ let y = cttz_nonzero(a);
+ if x < y { x } else { y }
+ };
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a usize.
let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1.
@@ -1892,205 +1892,52 @@ pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
hashee.hash(into);
}
-#[cfg(bootstrap)]
-mod old_fn_ptr_impl {
- use super::*;
- // If this is a unary fn pointer, it adds a doc comment.
- // Otherwise, it hides the docs entirely.
- macro_rules! maybe_fnptr_doc {
- (@ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
- ($a:ident @ #[$meta:meta] $item:item) => {
- #[doc(fake_variadic)]
- #[doc = "This trait is implemented for function pointers with up to twelve arguments."]
- #[$meta]
- $item
- };
- ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
- }
-
- // FIXME(strict_provenance_magic): function pointers have buggy codegen that
- // necessitates casting to a usize to get the backend to do the right thing.
- // for now I will break AVR to silence *a billion* lints. We should probably
- // have a proper "opaque function pointer type" to handle this kind of thing.
-
- // Impls for function pointers
- macro_rules! fnptr_impls_safety_abi {
- ($FnTy: ty, $($Arg: ident),*) => {
- fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
- };
- (@c_unwind $FnTy: ty, $($Arg: ident),*) => {
- fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
- };
- (#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialEq for $FnTy {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- *self as usize == *other as usize
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Eq for $FnTy {}
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialOrd for $FnTy {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- (*self as usize).partial_cmp(&(*other as usize))
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Ord for $FnTy {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- (*self as usize).cmp(&(*other as usize))
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> hash::Hash for $FnTy {
- fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
- state.write_usize(*self as usize)
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
- }
- }
- }
- }
- }
-
- macro_rules! fnptr_impls_args {
- ($($Arg: ident),+) => {
- fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- };
- () => {
- // No variadic functions with 0 parameters
- fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, }
- };
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> PartialEq for F {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.addr() == other.addr()
}
-
- fnptr_impls_args! {}
- fnptr_impls_args! { T }
- fnptr_impls_args! { A, B }
- fnptr_impls_args! { A, B, C }
- fnptr_impls_args! { A, B, C, D }
- fnptr_impls_args! { A, B, C, D, E }
- fnptr_impls_args! { A, B, C, D, E, F }
- fnptr_impls_args! { A, B, C, D, E, F, G }
- fnptr_impls_args! { A, B, C, D, E, F, G, H }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
}
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> Eq for F {}
-#[cfg(not(bootstrap))]
-mod new_fn_ptr_impl {
- use super::*;
- use crate::marker::FnPtr;
-
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> PartialEq for F {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- self.addr() == other.addr()
- }
- }
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> Eq for F {}
-
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> PartialOrd for F {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.addr().partial_cmp(&other.addr())
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> PartialOrd for F {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.addr().partial_cmp(&other.addr())
}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> Ord for F {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- self.addr().cmp(&other.addr())
- }
+}
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> Ord for F {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.addr().cmp(&other.addr())
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> hash::Hash for F {
- fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
- state.write_usize(self.addr() as _)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> hash::Hash for F {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(self.addr() as _)
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> fmt::Pointer for F {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(self.addr() as _, f)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> fmt::Pointer for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> fmt::Debug for F {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(self.addr() as _, f)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> fmt::Debug for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
}
}
+
/// Create a `const` raw pointer to a place, without creating an intermediate reference.
///
/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
@@ -2121,7 +1968,7 @@ mod new_fn_ptr_impl {
/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
/// ```
///
-/// See [`addr_of_mut`] for how to create a pointer to unininitialized data.
+/// See [`addr_of_mut`] for how to create a pointer to uninitialized data.
/// Doing that with `addr_of` would not make much sense since one could only
/// read the data, and that would be Undefined Behavior.
#[stable(feature = "raw_ref_macros", since = "1.51.0")]
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index ece5244e9..2fe5164c3 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -138,8 +138,8 @@ impl<T: ?Sized> *mut T {
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
- since = "1.67",
- note = "replaced by the `exposed_addr` method, or update your code \
+ since = "1.67.0",
+ note = "replaced by the `expose_addr` method, or update your code \
to follow the strict provenance rules using its APIs"
)]
#[inline(always)]
@@ -167,7 +167,7 @@ impl<T: ?Sized> *mut T {
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
#[deprecated(
- since = "1.67",
+ since = "1.67.0",
note = "replaced by the `ptr::from_exposed_addr_mut` function, or \
update your code to follow the strict provenance rules using its APIs"
)]
@@ -270,7 +270,7 @@ impl<T: ?Sized> *mut T {
let dest_addr = addr as isize;
let offset = dest_addr.wrapping_sub(self_addr);
- // This is the canonical desugarring of this operation
+ // This is the canonical desugaring of this operation
self.wrapping_byte_offset(offset)
}
@@ -473,10 +473,20 @@ impl<T: ?Sized> *mut T {
where
T: Sized,
{
+ #[cfg(bootstrap)]
// SAFETY: the caller must uphold the safety contract for `offset`.
// The obtained pointer is valid for writes since the caller must
// guarantee that it points to the same allocated object as `self`.
- unsafe { intrinsics::offset(self, count) as *mut T }
+ unsafe {
+ intrinsics::offset(self, count) as *mut T
+ }
+ #[cfg(not(bootstrap))]
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // The obtained pointer is valid for writes since the caller must
+ // guarantee that it points to the same allocated object as `self`.
+ unsafe {
+ intrinsics::offset(self, count)
+ }
}
/// Calculates the offset from a pointer in bytes.
@@ -1016,8 +1026,16 @@ impl<T: ?Sized> *mut T {
where
T: Sized,
{
+ #[cfg(bootstrap)]
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe {
+ self.offset(count as isize)
+ }
+ #[cfg(not(bootstrap))]
// SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe { self.offset(count as isize) }
+ unsafe {
+ intrinsics::offset(self, count)
+ }
}
/// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
@@ -1287,7 +1305,7 @@ impl<T: ?Sized> *mut T {
///
/// [`ptr::read`]: crate::ptr::read()
#[stable(feature = "pointer_methods", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read(self) -> T
@@ -1328,7 +1346,7 @@ impl<T: ?Sized> *mut T {
///
/// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
#[stable(feature = "pointer_methods", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read_unaligned(self) -> T
@@ -2036,11 +2054,10 @@ impl<T> *mut [T] {
/// }
/// ```
#[unstable(feature = "slice_ptr_get", issue = "74265")]
- #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline(always)]
- pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
+ pub unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
where
- I: ~const SliceIndex<[T]>,
+ I: SliceIndex<[T]>,
{
// SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
unsafe { index.get_unchecked_mut(self) }
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index 13f56c0ce..b492d2f07 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -449,6 +449,19 @@ impl<T: ?Sized> NonNull<T> {
// SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
}
+
+ /// See [`pointer::add`] for semantics and safety requirements.
+ #[inline]
+ pub(crate) const unsafe fn add(self, delta: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: We require that the delta stays in-bounds of the object, and
+ // thus it cannot become null, as that would require wrapping the
+ // address space, which no legal objects are allowed to do.
+ // And the caller promised the `delta` is sound to add.
+ unsafe { NonNull { pointer: self.pointer.add(delta) } }
+ }
}
impl<T> NonNull<[T]> {
@@ -676,11 +689,10 @@ impl<T> NonNull<[T]> {
/// }
/// ```
#[unstable(feature = "slice_ptr_get", issue = "74265")]
- #[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
- pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output>
+ pub unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output>
where
- I: ~const SliceIndex<[T]>,
+ I: SliceIndex<[T]>,
{
// SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
// As a consequence, the resulting pointer cannot be null.
@@ -689,8 +701,7 @@ impl<T> NonNull<[T]> {
}
#[stable(feature = "nonnull", since = "1.25.0")]
-#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
-impl<T: ?Sized> const Clone for NonNull<T> {
+impl<T: ?Sized> Clone for NonNull<T> {
#[inline(always)]
fn clone(&self) -> Self {
*self
@@ -756,8 +767,7 @@ impl<T: ?Sized> hash::Hash for NonNull<T> {
}
#[unstable(feature = "ptr_internals", issue = "none")]
-#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
-impl<T: ?Sized> const From<Unique<T>> for NonNull<T> {
+impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
#[inline]
fn from(unique: Unique<T>) -> Self {
// SAFETY: A Unique pointer cannot be null, so the conditions for
@@ -767,8 +777,7 @@ impl<T: ?Sized> const From<Unique<T>> for NonNull<T> {
}
#[stable(feature = "nonnull", since = "1.25.0")]
-#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
-impl<T: ?Sized> const From<&mut T> for NonNull<T> {
+impl<T: ?Sized> From<&mut T> for NonNull<T> {
/// Converts a `&mut T` to a `NonNull<T>`.
///
/// This conversion is safe and infallible since references cannot be null.
@@ -780,8 +789,7 @@ impl<T: ?Sized> const From<&mut T> for NonNull<T> {
}
#[stable(feature = "nonnull", since = "1.25.0")]
-#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
-impl<T: ?Sized> const From<&T> for NonNull<T> {
+impl<T: ?Sized> From<&T> for NonNull<T> {
/// Converts a `&T` to a `NonNull<T>`.
///
/// This conversion is safe and infallible since references cannot be null.
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index 64616142b..a853f15ed 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -70,7 +70,8 @@ impl<T: Sized> Unique<T> {
#[must_use]
#[inline]
pub const fn dangling() -> Self {
- Self::from(NonNull::dangling())
+ // FIXME(const-hack) replace with `From`
+ Unique { pointer: NonNull::dangling(), _marker: PhantomData }
}
}
@@ -134,13 +135,14 @@ impl<T: ?Sized> Unique<T> {
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub const fn cast<U>(self) -> Unique<U> {
- Unique::from(self.pointer.cast())
+ // FIXME(const-hack): replace with `From`
+ // SAFETY: is `NonNull`
+ unsafe { Unique::new_unchecked(self.pointer.cast().as_ptr()) }
}
}
#[unstable(feature = "ptr_internals", issue = "none")]
-#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
-impl<T: ?Sized> const Clone for Unique<T> {
+impl<T: ?Sized> Clone for Unique<T> {
#[inline]
fn clone(&self) -> Self {
*self
@@ -171,7 +173,7 @@ impl<T: ?Sized> fmt::Pointer for Unique<T> {
}
#[unstable(feature = "ptr_internals", issue = "none")]
-impl<T: ?Sized> const From<&mut T> for Unique<T> {
+impl<T: ?Sized> From<&mut T> for Unique<T> {
/// Converts a `&mut T` to a `Unique<T>`.
///
/// This conversion is infallible since references cannot be null.
@@ -182,7 +184,7 @@ impl<T: ?Sized> const From<&mut T> for Unique<T> {
}
#[unstable(feature = "ptr_internals", issue = "none")]
-impl<T: ?Sized> const From<NonNull<T>> for Unique<T> {
+impl<T: ?Sized> From<NonNull<T>> for Unique<T> {
/// Converts a `NonNull<T>` to a `Unique<T>`.
///
/// This conversion is infallible since `NonNull` cannot be null.