summaryrefslogtreecommitdiffstats
path: root/library/core/src/ptr/mod.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /library/core/src/ptr/mod.rs
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src/ptr/mod.rs')
-rw-r--r--library/core/src/ptr/mod.rs305
1 files changed, 76 insertions, 229 deletions
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 818f1a919..d0cb2f715 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -374,6 +374,7 @@ use crate::hash;
use crate::intrinsics::{
self, assert_unsafe_precondition, is_aligned_and_not_null, is_nonoverlapping,
};
+use crate::marker::FnPtr;
use crate::mem::{self, MaybeUninit};
@@ -440,10 +441,18 @@ mod mut_ptr;
///
/// * `to_drop` must be [valid] for both reads and writes.
///
-/// * `to_drop` must be properly aligned.
+/// * `to_drop` must be properly aligned, even if `T` has size 0.
///
-/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
-/// additional invariants - this is type-dependent.
+/// * `to_drop` must be nonnull, even if `T` has size 0.
+///
+/// * The value `to_drop` points to must be valid for dropping, which may mean
+/// it must uphold additional invariants. These invariants depend on the type
+/// of the value being dropped. For instance, when dropping a Box, the box's
+/// pointer to the heap must be valid.
+///
+/// * While `drop_in_place` is executing, the only way to access parts of
+/// `to_drop` is through the `&mut self` references supplied to the
+/// `Drop::drop` methods that `drop_in_place` invokes.
///
/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
@@ -451,8 +460,6 @@ mod mut_ptr;
/// again. [`write()`] can be used to overwrite data without causing it to be
/// dropped.
///
-/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned.
-///
/// [valid]: self#safety
///
/// # Examples
@@ -1132,7 +1139,8 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
/// [valid]: self#safety
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
@@ -1167,26 +1175,7 @@ pub const unsafe fn read<T>(src: *const T) -> T {
"ptr::read requires that the pointer argument is aligned and non-null",
[T](src: *const T) => is_aligned_and_not_null(src)
);
-
- #[cfg(bootstrap)]
- {
- // We are calling the intrinsics directly to avoid function calls in the
- // generated code as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
-
- // `src` cannot overlap `tmp` because `tmp` was just allocated on
- // the stack as a separate allocated object.
- let mut tmp = MaybeUninit::<T>::uninit();
- copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- tmp.assume_init()
- }
- #[cfg(not(bootstrap))]
- {
- crate::intrinsics::read_via_copy(src)
- }
+ crate::intrinsics::read_via_copy(src)
}
}
@@ -1267,7 +1256,8 @@ pub const unsafe fn read<T>(src: *const T) -> T {
/// ```
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+#[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
+#[rustc_allow_const_fn_unstable(const_mut_refs, const_maybe_uninit_as_mut_ptr)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
@@ -1367,13 +1357,13 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write<T>(dst: *mut T, src: T) {
- // We are calling the intrinsics directly to avoid function calls in the generated code
- // as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- #[rustc_nounwind]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
+ // Semantically, it would be fine for this to be implemented as a
+ // `copy_nonoverlapping` and appropriate drop suppression of `src`.
+
+ // However, implementing via that currently produces more MIR than is ideal.
+ // Using an intrinsic keeps it down to just the simple `*dst = move src` in
+ // MIR (11 statements shorter, at the time of writing), and also allows
+ // `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
@@ -1383,8 +1373,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
"ptr::write requires that the pointer argument is aligned and non-null",
[T](dst: *mut T) => is_aligned_and_not_null(dst)
);
- copy_nonoverlapping(&src as *const T, dst, 1);
- intrinsics::forget(src);
+ intrinsics::write_via_move(dst, src)
}
}
@@ -1651,8 +1640,8 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
- unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
+ assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
+ unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1743,12 +1732,18 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
+ let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
+ let byte_offset = wrapping_sub(aligned_address, addr);
+ // FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
+ // SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
+ // the value by more than `a-1`, so even though the intermediate values might have
+ // wrapped, the byte_offset is always in `[0, a)`.
+ unsafe { assume(byte_offset < a) };
+
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
- let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
- let byte_offset = wrapping_sub(aligned_address, addr);
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }
@@ -1764,7 +1759,12 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// miracles, given the situations this case has to deal with.
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
- let gcdpow = unsafe { cttz_nonzero(stride).min(cttz_nonzero(a)) };
+ // FIXME(const-hack) replace with min
+ let gcdpow = unsafe {
+ let x = cttz_nonzero(stride);
+ let y = cttz_nonzero(a);
+ if x < y { x } else { y }
+ };
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a usize.
let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1.
@@ -1892,205 +1892,52 @@ pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
hashee.hash(into);
}
-#[cfg(bootstrap)]
-mod old_fn_ptr_impl {
- use super::*;
- // If this is a unary fn pointer, it adds a doc comment.
- // Otherwise, it hides the docs entirely.
- macro_rules! maybe_fnptr_doc {
- (@ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
- ($a:ident @ #[$meta:meta] $item:item) => {
- #[doc(fake_variadic)]
- #[doc = "This trait is implemented for function pointers with up to twelve arguments."]
- #[$meta]
- $item
- };
- ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
- }
-
- // FIXME(strict_provenance_magic): function pointers have buggy codegen that
- // necessitates casting to a usize to get the backend to do the right thing.
- // for now I will break AVR to silence *a billion* lints. We should probably
- // have a proper "opaque function pointer type" to handle this kind of thing.
-
- // Impls for function pointers
- macro_rules! fnptr_impls_safety_abi {
- ($FnTy: ty, $($Arg: ident),*) => {
- fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
- };
- (@c_unwind $FnTy: ty, $($Arg: ident),*) => {
- fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
- };
- (#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialEq for $FnTy {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- *self as usize == *other as usize
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Eq for $FnTy {}
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialOrd for $FnTy {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- (*self as usize).partial_cmp(&(*other as usize))
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Ord for $FnTy {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- (*self as usize).cmp(&(*other as usize))
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> hash::Hash for $FnTy {
- fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
- state.write_usize(*self as usize)
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
- }
- }
- }
-
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
- }
- }
- }
- }
- }
-
- macro_rules! fnptr_impls_args {
- ($($Arg: ident),+) => {
- fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- };
- () => {
- // No variadic functions with 0 parameters
- fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
- fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
- fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, }
- };
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> PartialEq for F {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.addr() == other.addr()
}
-
- fnptr_impls_args! {}
- fnptr_impls_args! { T }
- fnptr_impls_args! { A, B }
- fnptr_impls_args! { A, B, C }
- fnptr_impls_args! { A, B, C, D }
- fnptr_impls_args! { A, B, C, D, E }
- fnptr_impls_args! { A, B, C, D, E, F }
- fnptr_impls_args! { A, B, C, D, E, F, G }
- fnptr_impls_args! { A, B, C, D, E, F, G, H }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
- fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
}
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> Eq for F {}
-#[cfg(not(bootstrap))]
-mod new_fn_ptr_impl {
- use super::*;
- use crate::marker::FnPtr;
-
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> PartialEq for F {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- self.addr() == other.addr()
- }
- }
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> Eq for F {}
-
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> PartialOrd for F {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.addr().partial_cmp(&other.addr())
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> PartialOrd for F {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.addr().partial_cmp(&other.addr())
}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> Ord for F {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- self.addr().cmp(&other.addr())
- }
+}
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> Ord for F {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.addr().cmp(&other.addr())
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> hash::Hash for F {
- fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
- state.write_usize(self.addr() as _)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> hash::Hash for F {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(self.addr() as _)
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> fmt::Pointer for F {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(self.addr() as _, f)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> fmt::Pointer for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
}
+}
- #[stable(feature = "fnptr_impls", since = "1.4.0")]
- impl<F: FnPtr> fmt::Debug for F {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(self.addr() as _, f)
- }
+#[stable(feature = "fnptr_impls", since = "1.4.0")]
+impl<F: FnPtr> fmt::Debug for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
}
}
+
/// Create a `const` raw pointer to a place, without creating an intermediate reference.
///
/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
@@ -2121,7 +1968,7 @@ mod new_fn_ptr_impl {
/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
/// ```
///
-/// See [`addr_of_mut`] for how to create a pointer to unininitialized data.
+/// See [`addr_of_mut`] for how to create a pointer to uninitialized data.
/// Doing that with `addr_of` would not make much sense since one could only
/// read the data, and that would be Undefined Behavior.
#[stable(feature = "raw_ref_macros", since = "1.51.0")]