From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- library/core/src/ptr/const_ptr.rs | 1525 +++++++++++++++++++++++++++ library/core/src/ptr/metadata.rs | 290 ++++++ library/core/src/ptr/mod.rs | 2054 +++++++++++++++++++++++++++++++++++++ library/core/src/ptr/mut_ptr.rs | 1973 +++++++++++++++++++++++++++++++++++ library/core/src/ptr/non_null.rs | 802 +++++++++++++++ library/core/src/ptr/unique.rs | 193 ++++ 6 files changed, 6837 insertions(+) create mode 100644 library/core/src/ptr/const_ptr.rs create mode 100644 library/core/src/ptr/metadata.rs create mode 100644 library/core/src/ptr/mod.rs create mode 100644 library/core/src/ptr/mut_ptr.rs create mode 100644 library/core/src/ptr/non_null.rs create mode 100644 library/core/src/ptr/unique.rs (limited to 'library/core/src/ptr') diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs new file mode 100644 index 000000000..e0655d68d --- /dev/null +++ b/library/core/src/ptr/const_ptr.rs @@ -0,0 +1,1525 @@ +use super::*; +use crate::cmp::Ordering::{self, Equal, Greater, Less}; +use crate::intrinsics; +use crate::mem; +use crate::slice::{self, SliceIndex}; + +impl *const T { + /// Returns `true` if the pointer is null. + /// + /// Note that unsized types have many possible null pointers, as only the + /// raw data pointer is considered, not their length, vtable, etc. + /// Therefore, two pointers that are null may still not compare equal to + /// each other. + /// + /// ## Behavior during const evaluation + /// + /// When this function is used during const evaluation, it may return `false` for pointers + /// that turn out to be null at runtime. Specifically, when a pointer to some memory + /// is offset beyond its bounds in such a way that the resulting pointer is null, + /// the function will still return `false`. There is no way for CTFE to know + /// the absolute position of that memory, so we cannot tell if the pointer is + /// null or not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "Follow the rabbit"; + /// let ptr: *const u8 = s.as_ptr(); + /// assert!(!ptr.is_null()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")] + #[inline] + pub const fn is_null(self) -> bool { + // Compare via a cast to a thin pointer, so fat pointers are only + // considering their "data" part for null-ness. + (self as *const u8).guaranteed_eq(null()) + } + + /// Casts to a pointer of another type. + #[stable(feature = "ptr_cast", since = "1.38.0")] + #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")] + #[inline] + pub const fn cast(self) -> *const U { + self as _ + } + + /// Use the pointer value in a new pointer of another type. + /// + /// In case `val` is a (fat) pointer to an unsized type, this operation + /// will ignore the pointer part, whereas for (thin) pointers to sized + /// types, this has the same effect as a simple cast. + /// + /// The resulting pointer will have provenance of `self`, i.e., for a fat + /// pointer, this operation is semantically the same as creating a new + /// fat pointer with the data pointer value of `self` but the metadata of + /// `val`. + /// + /// # Examples + /// + /// This function is primarily useful for allowing byte-wise pointer + /// arithmetic on potentially fat pointers: + /// + /// ``` + /// #![feature(set_ptr_value)] + /// # use core::fmt::Debug; + /// let arr: [i32; 3] = [1, 2, 3]; + /// let mut ptr = arr.as_ptr() as *const dyn Debug; + /// let thin = ptr as *const u8; + /// unsafe { + /// ptr = thin.add(8).with_metadata_of(ptr); + /// # assert_eq!(*(ptr as *const i32), 3); + /// println!("{:?}", &*ptr); // will print "3" + /// } + /// ``` + #[unstable(feature = "set_ptr_value", issue = "75091")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[inline] + pub fn with_metadata_of(self, mut val: *const U) -> *const U + where + U: ?Sized, + { + let target = &mut val as *mut *const U as *mut *const u8; + // SAFETY: In case of a thin pointer, this operations is identical + // to a simple assignment. In case of a fat pointer, with the current + // fat pointer layout implementation, the first field of such a + // pointer is always the data pointer, which is likewise assigned. + unsafe { *target = self as *const u8 }; + val + } + + /// Changes constness without changing the type. + /// + /// This is a bit safer than `as` because it wouldn't silently change the type if the code is + /// refactored. + #[unstable(feature = "ptr_const_cast", issue = "92675")] + #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")] + pub const fn cast_mut(self) -> *mut T { + self as _ + } + + /// Casts a pointer to its raw bits. + /// + /// This is equivalent to `as usize`, but is more specific to enhance readability. + /// The inverse method is [`from_bits`](#method.from_bits). + /// + /// In particular, `*p as usize` and `p as usize` will both compile for + /// pointers to numeric types but do very different things, so using this + /// helps emphasize that reading the bits was intentional. + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_to_from_bits)] + /// let array = [13, 42]; + /// let p0: *const i32 = &array[0]; + /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0); + /// let p1: *const i32 = &array[1]; + /// assert_eq!(p1.to_bits() - p0.to_bits(), 4); + /// ``` + #[unstable(feature = "ptr_to_from_bits", issue = "91126")] + pub fn to_bits(self) -> usize + where + T: Sized, + { + self as usize + } + + /// Creates a pointer from its raw bits. + /// + /// This is equivalent to `as *const T`, but is more specific to enhance readability. + /// The inverse method is [`to_bits`](#method.to_bits). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_to_from_bits)] + /// use std::ptr::NonNull; + /// let dangling: *const u8 = NonNull::dangling().as_ptr(); + /// assert_eq!(<*const u8>::from_bits(1), dangling); + /// ``` + #[unstable(feature = "ptr_to_from_bits", issue = "91126")] + pub fn from_bits(bits: usize) -> Self + where + T: Sized, + { + bits as Self + } + + /// Gets the "address" portion of the pointer. + /// + /// This is similar to `self as usize`, which semantically discards *provenance* and + /// *address-space* information. However, unlike `self as usize`, casting the returned address + /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To + /// properly restore the lost information and obtain a dereferencable pointer, use + /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr]. + /// + /// If using those APIs is not possible because there is no way to preserve a pointer with the + /// required provenance, use [`expose_addr`][pointer::expose_addr] and + /// [`from_exposed_addr`][from_exposed_addr] instead. However, note that this makes + /// your code less portable and less amenable to tools that check for compliance with the Rust + /// memory model. + /// + /// On most platforms this will produce a value with the same bytes as the original + /// pointer, because all the bytes are dedicated to describing the address. + /// Platforms which need to store additional information in the pointer may + /// perform a change of representation to produce a value containing only the address + /// portion of the pointer. What that means is up to the platform to define. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such + /// might change in the future (including possibly weakening this so it becomes wholly + /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn addr(self) -> usize + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the + // provenance). + unsafe { mem::transmute(self) } + } + + /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future + /// use in [`from_exposed_addr`][]. + /// + /// This is equivalent to `self as usize`, which semantically discards *provenance* and + /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit + /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can + /// later call [`from_exposed_addr`][] to reconstitute the original pointer including its + /// provenance. (Reconstructing address space information, if required, is your responsibility.) + /// + /// Using this method means that code is *not* following Strict Provenance rules. Supporting + /// [`from_exposed_addr`][] complicates specification and reasoning and may not be supported by + /// tools that help you to stay conformant with the Rust memory model, so it is recommended to + /// use [`addr`][pointer::addr] wherever possible. + /// + /// On most platforms this will produce a value with the same bytes as the original pointer, + /// because all the bytes are dedicated to describing the address. Platforms which need to store + /// additional information in the pointer may not support this operation, since the 'expose' + /// side-effect which is required for [`from_exposed_addr`][] to work is typically not + /// available. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, see the + /// [module documentation][crate::ptr] for details. + /// + /// [`from_exposed_addr`]: from_exposed_addr + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn expose_addr(self) -> usize + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + self as usize + } + + /// Creates a new pointer with the given address. + /// + /// This performs the same operation as an `addr as ptr` cast, but copies + /// the *address-space* and *provenance* of `self` to the new pointer. + /// This allows us to dynamically preserve and propagate this important + /// information in a way that is otherwise impossible with a unary cast. + /// + /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset + /// `self` to the given address, and therefore has all the same capabilities and restrictions. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn with_addr(self, addr: usize) -> Self + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // + // In the mean-time, this operation is defined to be "as if" it was + // a wrapping_offset, so we can emulate it as such. This should properly + // restore pointer provenance even under today's compiler. + let self_addr = self.addr() as isize; + let dest_addr = addr as isize; + let offset = dest_addr.wrapping_sub(self_addr); + + // This is the canonical desugarring of this operation + self.cast::().wrapping_offset(offset).cast::() + } + + /// Creates a new pointer by mapping `self`'s address to a new one. + /// + /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self + where + T: Sized, + { + self.with_addr(f(self.addr())) + } + + /// Decompose a (possibly wide) pointer into its address and metadata components. + /// + /// The pointer can be later reconstructed with [`from_raw_parts`]. + #[unstable(feature = "ptr_metadata", issue = "81513")] + #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] + #[inline] + pub const fn to_raw_parts(self) -> (*const (), ::Metadata) { + (self.cast(), metadata(self)) + } + + /// Returns `None` if the pointer is null, or else returns a shared reference to + /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`] + /// must be used instead. + /// + /// [`as_uninit_ref`]: #method.as_uninit_ref + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * The pointer must point to an initialized instance of `T`. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// (The part about being initialized is not yet fully decided, but until + /// it is, the only safe approach is to ensure that they are indeed initialized.) + /// + /// [the module documentation]: crate::ptr#safety + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let ptr: *const u8 = &10u8 as *const u8; + /// + /// unsafe { + /// if let Some(val_back) = ptr.as_ref() { + /// println!("We got back the value: {val_back}!"); + /// } + /// } + /// ``` + /// + /// # Null-unchecked version + /// + /// If you are sure the pointer can never be null and are looking for some kind of + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can + /// dereference the pointer directly. + /// + /// ``` + /// let ptr: *const u8 = &10u8 as *const u8; + /// + /// unsafe { + /// let val_back = &*ptr; + /// println!("We got back the value: {val_back}!"); + /// } + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + #[inline] + pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> { + // SAFETY: the caller must guarantee that `self` is valid + // for a reference if it isn't null. + if self.is_null() { None } else { unsafe { Some(&*self) } } + } + + /// Returns `None` if the pointer is null, or else returns a shared reference to + /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require + /// that the value has to be initialized. + /// + /// [`as_ref`]: #method.as_ref + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// [the module documentation]: crate::ptr#safety + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(ptr_as_uninit)] + /// + /// let ptr: *const u8 = &10u8 as *const u8; + /// + /// unsafe { + /// if let Some(val_back) = ptr.as_uninit_ref() { + /// println!("We got back the value: {}!", val_back.assume_init()); + /// } + /// } + /// ``` + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit> + where + T: Sized, + { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit) }) } + } + + /// Calculates the offset from a pointer. + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset, **in bytes**, cannot overflow an `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_offset`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_offset`]: #method.wrapping_offset + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.offset(1) as char); + /// println!("{}", *ptr.offset(2) as char); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn offset(self, count: isize) -> *const T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { intrinsics::offset(self, count) } + } + + /// Calculates the offset from a pointer in bytes. + /// + /// `count` is in units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [offset][pointer::offset] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_offset(self, count: isize) -> Self { + // SAFETY: the caller must uphold the safety contract for `offset`. + let this = unsafe { self.cast::().offset(count).cast::<()>() }; + from_raw_parts::(this, metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`offset`], this method basically delays the requirement of staying within the + /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other + /// words, leaving the allocated object and then re-entering it later is permitted. + /// + /// [`offset`]: #method.offset + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_offset(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_offset(step); + /// } + /// ``` + #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + pub const fn wrapping_offset(self, count: isize) -> *const T + where + T: Sized, + { + // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called. + unsafe { intrinsics::arith_offset(self, count) } + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// + /// `count` is in units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method + /// for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_offset(self, count: isize) -> Self { + from_raw_parts::(self.cast::().wrapping_offset(count).cast::<()>(), metadata(self)) + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of T: the distance in bytes divided by `mem::size_of::()`. + /// + /// This function is the inverse of [`offset`]. + /// + /// [`offset`]: #method.offset + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and other pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * Both pointers must be *derived from* a pointer to the same object. + /// (See below for an example.) + /// + /// * The distance between the pointers, in bytes, must be an exact multiple + /// of the size of `T`. + /// + /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. + /// + /// * The distance being in bounds cannot rely on "wrapping around" the address space. + /// + /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the + /// address space, so two pointers within some value of any Rust type `T` will always satisfy + /// the last two conditions. The standard library also generally ensures that allocations + /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they + /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())` + /// always satisfies the last two conditions. + /// + /// Most platforms fundamentally can't even construct such a large allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on + /// such large allocations either.) + /// + /// [`add`]: #method.add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [0; 5]; + /// let ptr1: *const i32 = &a[1]; + /// let ptr2: *const i32 = &a[3]; + /// unsafe { + /// assert_eq!(ptr2.offset_from(ptr1), 2); + /// assert_eq!(ptr1.offset_from(ptr2), -2); + /// assert_eq!(ptr1.offset(2), ptr2); + /// assert_eq!(ptr2.offset(-2), ptr1); + /// } + /// ``` + /// + /// *Incorrect* usage: + /// + /// ```rust,no_run + /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8; + /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8; + /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize); + /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1. + /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff); + /// assert_eq!(ptr2 as usize, ptr2_other as usize); + /// // Since ptr2_other and ptr2 are derived from pointers to different objects, + /// // computing their offset is undefined behavior, even though + /// // they point to the same address! + /// unsafe { + /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior + /// } + /// ``` + #[stable(feature = "ptr_offset_from", since = "1.47.0")] + #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn offset_from(self, origin: *const T) -> isize + where + T: Sized, + { + let pointee_size = mem::size_of::(); + assert!(0 < pointee_size && pointee_size <= isize::MAX as usize); + // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`. + unsafe { intrinsics::ptr_offset_from(self, origin) } + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [offset_from][pointer::offset_from] on it. See that method for + /// documentation and safety requirements. + /// + /// For non-`Sized` pointees this operation considers only the data pointers, + /// ignoring the metadata. + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize { + // SAFETY: the caller must uphold the safety contract for `offset_from`. + unsafe { self.cast::().offset_from(origin.cast::()) } + } + + /// Calculates the distance between two pointers, *where it's known that + /// `self` is equal to or greater than `origin`*. The returned value is in + /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// + /// This computes the same value that [`offset_from`](#method.offset_from) + /// would compute, but with the added precondition that that the offset is + /// guaranteed to be non-negative. This method is equivalent to + /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// but it provides slightly more information to the optimizer, which can + /// sometimes allow it to optimize slightly better with some backends. + /// + /// This method can be though of as recovering the `count` that was passed + /// to [`add`](#method.add) (or, with the parameters in the other order, + /// to [`sub`](#method.sub)). The following are all equivalent, assuming + /// that their safety preconditions are met: + /// ```rust + /// # #![feature(ptr_sub_ptr)] + /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool { + /// ptr.sub_ptr(origin) == count + /// # && + /// origin.add(count) == ptr + /// # && + /// ptr.sub(count) == origin + /// # } + /// ``` + /// + /// # Safety + /// + /// - The distance between the pointers must be non-negative (`self >= origin`) + /// + /// - *All* the safety conditions of [`offset_from`](#method.offset_from) + /// apply to this method as well; see it for the full details. + /// + /// Importantly, despite the return type of this method being able to represent + /// a larger offset, it's still *not permitted* to pass pointers which differ + /// by more than `isize::MAX` *bytes*. As such, the result of this method will + /// always be less than or equal to `isize::MAX as usize`. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_sub_ptr)] + /// + /// let a = [0; 5]; + /// let ptr1: *const i32 = &a[1]; + /// let ptr2: *const i32 = &a[3]; + /// unsafe { + /// assert_eq!(ptr2.sub_ptr(ptr1), 2); + /// assert_eq!(ptr1.add(2), ptr2); + /// assert_eq!(ptr2.sub(2), ptr1); + /// assert_eq!(ptr2.sub_ptr(ptr2), 0); + /// } + /// + /// // This would be incorrect, as the pointers are not correctly ordered: + /// // ptr1.sub_ptr(ptr2) + /// ``` + #[unstable(feature = "ptr_sub_ptr", issue = "95892")] + #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn sub_ptr(self, origin: *const T) -> usize + where + T: Sized, + { + // SAFETY: The comparison has no side-effects, and the intrinsic + // does this check internally in the CTFE implementation. + unsafe { assert_unsafe_precondition!(self >= origin) }; + + let pointee_size = mem::size_of::(); + assert!(0 < pointee_size && pointee_size <= isize::MAX as usize); + // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`. + unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) } + } + + /// Returns whether two pointers are guaranteed to be equal. + /// + /// At runtime this function behaves like `self == other`. + /// However, in some contexts (e.g., compile-time evaluation), + /// it is not always possible to determine equality of two pointers, so this function may + /// spuriously return `false` for pointers that later actually turn out to be equal. + /// But when it returns `true`, the pointers are guaranteed to be equal. + /// + /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer + /// comparisons for which both functions return `false`. + /// + /// [`guaranteed_ne`]: #method.guaranteed_ne + /// + /// The return value may change depending on the compiler version and unsafe code must not + /// rely on the result of this function for soundness. It is suggested to only use this function + /// for performance optimizations where spurious `false` return values by this function do not + /// affect the outcome, but just the performance. + /// The consequences of using this method to make runtime and compile-time code behave + /// differently have not been explored. This method should not be used to introduce such + /// differences, and it should also not be stabilized before we have a better understanding + /// of this issue. + #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[inline] + pub const fn guaranteed_eq(self, other: *const T) -> bool + where + T: Sized, + { + intrinsics::ptr_guaranteed_eq(self, other) + } + + /// Returns whether two pointers are guaranteed to be unequal. + /// + /// At runtime this function behaves like `self != other`. + /// However, in some contexts (e.g., compile-time evaluation), + /// it is not always possible to determine the inequality of two pointers, so this function may + /// spuriously return `false` for pointers that later actually turn out to be unequal. + /// But when it returns `true`, the pointers are guaranteed to be unequal. + /// + /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer + /// comparisons for which both functions return `false`. + /// + /// [`guaranteed_eq`]: #method.guaranteed_eq + /// + /// The return value may change depending on the compiler version and unsafe code must not + /// rely on the result of this function for soundness. It is suggested to only use this function + /// for performance optimizations where spurious `false` return values by this function do not + /// affect the outcome, but just the performance. + /// The consequences of using this method to make runtime and compile-time code behave + /// differently have not been explored. This method should not be used to introduce such + /// differences, and it should also not be stabilized before we have a better understanding + /// of this issue. + #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[inline] + pub const fn guaranteed_ne(self, other: *const T) -> bool + where + T: Sized, + { + intrinsics::ptr_guaranteed_ne(self, other) + } + + /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset, **in bytes**, cannot overflow an `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a `usize`. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_add`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_add`]: #method.wrapping_add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.add(1) as char); + /// println!("{}", *ptr.add(2) as char); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn add(self, count: usize) -> Self + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { self.offset(count as isize) } + } + + /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`). + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [add][pointer::add] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_add(self, count: usize) -> Self { + // SAFETY: the caller must uphold the safety contract for `add`. + let this = unsafe { self.cast::().add(count).cast::<()>() }; + from_raw_parts::(this, metadata(self)) + } + + /// Calculates the offset from a pointer (convenience for + /// `.offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset cannot exceed `isize::MAX` **bytes**. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_sub`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_sub`]: #method.wrapping_sub + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// + /// unsafe { + /// let end: *const u8 = s.as_ptr().add(3); + /// println!("{}", *end.sub(1) as char); + /// println!("{}", *end.sub(2) as char); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn sub(self, count: usize) -> Self + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { self.offset((count as isize).wrapping_neg()) } + } + + /// Calculates the offset from a pointer in bytes (convenience for + /// `.byte_offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [sub][pointer::sub] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_sub(self, count: usize) -> Self { + // SAFETY: the caller must uphold the safety contract for `sub`. + let this = unsafe { self.cast::().sub(count).cast::<()>() }; + from_raw_parts::(this, metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset(count as isize)`) + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`add`], this method basically delays the requirement of staying within the + /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the + /// allocated object and then re-entering it later is permitted. + /// + /// [`add`]: #method.add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_add(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_add(step); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + pub const fn wrapping_add(self, count: usize) -> Self + where + T: Sized, + { + self.wrapping_offset(count as isize) + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// (convenience for `.wrapping_byte_offset(count as isize)`) + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_add(self, count: usize) -> Self { + from_raw_parts::(self.cast::().wrapping_add(count).cast::<()>(), metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`) + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`sub`], this method basically delays the requirement of staying within the + /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the + /// allocated object and then re-entering it later is permitted. + /// + /// [`sub`]: #method.sub + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements (backwards) + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let start_rounded_down = ptr.wrapping_sub(2); + /// ptr = ptr.wrapping_add(4); + /// let step = 2; + /// // This loop prints "5, 3, 1, " + /// while ptr != start_rounded_down { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_sub(step); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline] + pub const fn wrapping_sub(self, count: usize) -> Self + where + T: Sized, + { + self.wrapping_offset((count as isize).wrapping_neg()) + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`) + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_sub(self, count: usize) -> Self { + from_raw_parts::(self.cast::().wrapping_sub(count).cast::<()>(), metadata(self)) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// See [`ptr::read`] for safety concerns and examples. + /// + /// [`ptr::read`]: crate::ptr::read() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn read(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `read`. + unsafe { read(self) } + } + + /// Performs a volatile read of the value from `self` without moving it. This + /// leaves the memory in `self` unchanged. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// See [`ptr::read_volatile`] for safety concerns and examples. + /// + /// [`ptr::read_volatile`]: crate::ptr::read_volatile() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub unsafe fn read_volatile(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `read_volatile`. + unsafe { read_volatile(self) } + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// Unlike `read`, the pointer may be unaligned. + /// + /// See [`ptr::read_unaligned`] for safety concerns and examples. + /// + /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn read_unaligned(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `read_unaligned`. + unsafe { read_unaligned(self) } + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *same* argument order as [`ptr::copy`]. + /// + /// See [`ptr::copy`] for safety concerns and examples. + /// + /// [`ptr::copy`]: crate::ptr::copy() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_to(self, dest: *mut T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy`. + unsafe { copy(self, dest, count) } + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. + /// + /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. + /// + /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`. + unsafe { copy_nonoverlapping(self, dest, count) } + } + + /// Computes the offset that needs to be applied to the pointer in order to make it aligned to + /// `align`. + /// + /// If it is not possible to align the pointer, the implementation returns + /// `usize::MAX`. It is permissible for the implementation to *always* + /// return `usize::MAX`. Only your algorithm's performance can depend + /// on getting a usable offset here, not its correctness. + /// + /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be + /// used with the `wrapping_add` method. + /// + /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go + /// beyond the allocation that the pointer points into. It is up to the caller to ensure that + /// the returned offset is correct in all terms other than alignment. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # fn foo(n: usize) { + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = x.as_ptr().add(n) as *const u8; + /// let offset = ptr.align_offset(align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.add(offset) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[stable(feature = "align_offset", since = "1.36.0")] + #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")] + pub const fn align_offset(self, align: usize) -> usize + where + T: Sized, + { + if !align.is_power_of_two() { + panic!("align_offset: align is not a power-of-two"); + } + + fn rt_impl(p: *const T, align: usize) -> usize { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(p, align) } + } + + const fn ctfe_impl(_: *const T, _: usize) -> usize { + usize::MAX + } + + // SAFETY: + // It is permissible for `align_offset` to always return `usize::MAX`, + // algorithm correctness can not depend on `align_offset` returning non-max values. + // + // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. + unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } + } + + /// Returns whether the pointer is properly aligned for `T`. + #[must_use] + #[inline] + #[unstable(feature = "pointer_is_aligned", issue = "96284")] + pub fn is_aligned(self) -> bool + where + T: Sized, + { + self.is_aligned_to(core::mem::align_of::()) + } + + /// Returns whether the pointer is aligned to `align`. + /// + /// For non-`Sized` pointees this operation considers only the data pointer, + /// ignoring the metadata. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two (this includes 0). + #[must_use] + #[inline] + #[unstable(feature = "pointer_is_aligned", issue = "96284")] + pub fn is_aligned_to(self, align: usize) -> bool { + if !align.is_power_of_two() { + panic!("is_aligned_to: align is not a power-of-two"); + } + + // SAFETY: `is_power_of_two()` will return `false` for zero. + unsafe { core::intrinsics::assume(align != 0) }; + + // Cast is needed for `T: !Sized` + self.cast::().addr() % align == 0 + } +} + +impl *const [T] { + /// Returns the length of a raw slice. + /// + /// The returned value is the number of **elements**, not the number of bytes. + /// + /// This function is safe, even when the raw slice cannot be cast to a slice + /// reference because the pointer is null or unaligned. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_len)] + /// + /// use std::ptr; + /// + /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3); + /// assert_eq!(slice.len(), 3); + /// ``` + #[inline] + #[unstable(feature = "slice_ptr_len", issue = "71146")] + #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")] + pub const fn len(self) -> usize { + metadata(self) + } + + /// Returns a raw pointer to the slice's buffer. + /// + /// This is equivalent to casting `self` to `*const T`, but more type-safe. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_get)] + /// use std::ptr; + /// + /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3); + /// assert_eq!(slice.as_ptr(), ptr::null()); + /// ``` + #[inline] + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")] + pub const fn as_ptr(self) -> *const T { + self as *const T + } + + /// Returns a raw pointer to an element or subslice, without doing bounds + /// checking. + /// + /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable + /// is *[undefined behavior]* even if the resulting pointer is not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_ptr_get)] + /// + /// let x = &[1, 2, 4] as *const [i32]; + /// + /// unsafe { + /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1)); + /// } + /// ``` + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_slice_index", issue = "none")] + #[inline] + pub const unsafe fn get_unchecked(self, index: I) -> *const I::Output + where + I: ~const SliceIndex<[T]>, + { + // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. + unsafe { index.get_unchecked(self) } + } + + /// Returns `None` if the pointer is null, or else returns a shared slice to + /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require + /// that the value has to be initialized. + /// + /// [`as_ref`]: #method.as_ref + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// and it must be properly aligned. This means in particular: + /// + /// * The entire memory range of this slice must be contained within a single [allocated object]! + /// Slices can never span across multiple allocated objects. + /// + /// * The pointer must be aligned even for zero-length slices. One + /// reason for this is that enum layout optimizations may rely on references + /// (including slices of any length) being aligned and non-null to distinguish + /// them from other data. You can obtain a pointer that is usable as `data` + /// for zero-length slices using [`NonNull::dangling()`]. + /// + /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// See also [`slice::from_raw_parts`][]. + /// + /// [valid]: crate::ptr#safety + /// [allocated object]: crate::ptr#allocated-object + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit]> { + if self.is_null() { + None + } else { + // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`. + Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit, self.len()) }) + } + } +} + +// Equality for pointers +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for *const T { + #[inline] + fn eq(&self, other: &*const T) -> bool { + *self == *other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Eq for *const T {} + +// Comparison for pointers +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for *const T { + #[inline] + fn cmp(&self, other: &*const T) -> Ordering { + if self < other { + Less + } else if self == other { + Equal + } else { + Greater + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for *const T { + #[inline] + fn partial_cmp(&self, other: &*const T) -> Option { + Some(self.cmp(other)) + } + + #[inline] + fn lt(&self, other: &*const T) -> bool { + *self < *other + } + + #[inline] + fn le(&self, other: &*const T) -> bool { + *self <= *other + } + + #[inline] + fn gt(&self, other: &*const T) -> bool { + *self > *other + } + + #[inline] + fn ge(&self, other: &*const T) -> bool { + *self >= *other + } +} diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs new file mode 100644 index 000000000..cd5edee04 --- /dev/null +++ b/library/core/src/ptr/metadata.rs @@ -0,0 +1,290 @@ +#![unstable(feature = "ptr_metadata", issue = "81513")] + +use crate::fmt; +use crate::hash::{Hash, Hasher}; + +/// Provides the pointer metadata type of any pointed-to type. +/// +/// # Pointer metadata +/// +/// Raw pointer types and reference types in Rust can be thought of as made of two parts: +/// a data pointer that contains the memory address of the value, and some metadata. +/// +/// For statically-sized types (that implement the `Sized` traits) +/// as well as for `extern` types, +/// pointers are said to be “thin”: metadata is zero-sized and its type is `()`. +/// +/// Pointers to [dynamically-sized types][dst] are said to be “wide” or “fat”, +/// they have non-zero-sized metadata: +/// +/// * For structs whose last field is a DST, metadata is the metadata for the last field +/// * For the `str` type, metadata is the length in bytes as `usize` +/// * For slice types like `[T]`, metadata is the length in items as `usize` +/// * For trait objects like `dyn SomeTrait`, metadata is [`DynMetadata`][DynMetadata] +/// (e.g. `DynMetadata`) +/// +/// In the future, the Rust language may gain new kinds of types +/// that have different pointer metadata. +/// +/// [dst]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#dynamically-sized-types-dsts +/// +/// +/// # The `Pointee` trait +/// +/// The point of this trait is its `Metadata` associated type, +/// which is `()` or `usize` or `DynMetadata<_>` as described above. +/// It is automatically implemented for every type. +/// It can be assumed to be implemented in a generic context, even without a corresponding bound. +/// +/// +/// # Usage +/// +/// Raw pointers can be decomposed into the data address and metadata components +/// with their [`to_raw_parts`] method. +/// +/// Alternatively, metadata alone can be extracted with the [`metadata`] function. +/// A reference can be passed to [`metadata`] and implicitly coerced. +/// +/// A (possibly-wide) pointer can be put back together from its address and metadata +/// with [`from_raw_parts`] or [`from_raw_parts_mut`]. +/// +/// [`to_raw_parts`]: *const::to_raw_parts +#[lang = "pointee_trait"] +pub trait Pointee { + /// The type for metadata in pointers and references to `Self`. + #[lang = "metadata_type"] + // NOTE: Keep trait bounds in `static_assert_expected_bounds_for_metadata` + // in `library/core/src/ptr/metadata.rs` + // in sync with those here: + type Metadata: Copy + Send + Sync + Ord + Hash + Unpin; +} + +/// Pointers to types implementing this trait alias are “thin”. +/// +/// This includes statically-`Sized` types and `extern` types. +/// +/// # Example +/// +/// ```rust +/// #![feature(ptr_metadata)] +/// +/// fn this_never_panics() { +/// assert_eq!(std::mem::size_of::<&T>(), std::mem::size_of::()) +/// } +/// ``` +#[unstable(feature = "ptr_metadata", issue = "81513")] +// NOTE: don’t stabilize this before trait aliases are stable in the language? +pub trait Thin = Pointee; + +/// Extract the metadata component of a pointer. +/// +/// Values of type `*mut T`, `&T`, or `&mut T` can be passed directly to this function +/// as they implicitly coerce to `*const T`. +/// +/// # Example +/// +/// ``` +/// #![feature(ptr_metadata)] +/// +/// assert_eq!(std::ptr::metadata("foo"), 3_usize); +/// ``` +#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] +#[inline] +pub const fn metadata(ptr: *const T) -> ::Metadata { + // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T + // and PtrComponents have the same memory layouts. Only std can make this + // guarantee. + unsafe { PtrRepr { const_ptr: ptr }.components.metadata } +} + +/// Forms a (possibly-wide) raw pointer from a data address and metadata. +/// +/// This function is safe but the returned pointer is not necessarily safe to dereference. +/// For slices, see the documentation of [`slice::from_raw_parts`] for safety requirements. +/// For trait objects, the metadata must come from a pointer to the same underlying erased type. +/// +/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts +#[unstable(feature = "ptr_metadata", issue = "81513")] +#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] +#[inline] +pub const fn from_raw_parts( + data_address: *const (), + metadata: ::Metadata, +) -> *const T { + // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T + // and PtrComponents have the same memory layouts. Only std can make this + // guarantee. + unsafe { PtrRepr { components: PtrComponents { data_address, metadata } }.const_ptr } +} + +/// Performs the same functionality as [`from_raw_parts`], except that a +/// raw `*mut` pointer is returned, as opposed to a raw `*const` pointer. +/// +/// See the documentation of [`from_raw_parts`] for more details. +#[unstable(feature = "ptr_metadata", issue = "81513")] +#[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] +#[inline] +pub const fn from_raw_parts_mut( + data_address: *mut (), + metadata: ::Metadata, +) -> *mut T { + // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T + // and PtrComponents have the same memory layouts. Only std can make this + // guarantee. + unsafe { PtrRepr { components: PtrComponents { data_address, metadata } }.mut_ptr } +} + +#[repr(C)] +pub(crate) union PtrRepr { + pub(crate) const_ptr: *const T, + pub(crate) mut_ptr: *mut T, + pub(crate) components: PtrComponents, +} + +#[repr(C)] +pub(crate) struct PtrComponents { + pub(crate) data_address: *const (), + pub(crate) metadata: ::Metadata, +} + +// Manual impl needed to avoid `T: Copy` bound. +impl Copy for PtrComponents {} + +// Manual impl needed to avoid `T: Clone` bound. +impl Clone for PtrComponents { + fn clone(&self) -> Self { + *self + } +} + +/// The metadata for a `Dyn = dyn SomeTrait` trait object type. +/// +/// It is a pointer to a vtable (virtual call table) +/// that represents all the necessary information +/// to manipulate the concrete type stored inside a trait object. +/// The vtable notably it contains: +/// +/// * type size +/// * type alignment +/// * a pointer to the type’s `drop_in_place` impl (may be a no-op for plain-old-data) +/// * pointers to all the methods for the type’s implementation of the trait +/// +/// Note that the first three are special because they’re necessary to allocate, drop, +/// and deallocate any trait object. +/// +/// It is possible to name this struct with a type parameter that is not a `dyn` trait object +/// (for example `DynMetadata`) but not to obtain a meaningful value of that struct. +#[lang = "dyn_metadata"] +pub struct DynMetadata { + vtable_ptr: &'static VTable, + phantom: crate::marker::PhantomData, +} + +#[cfg(not(bootstrap))] +extern "C" { + /// Opaque type for accessing vtables. + /// + /// Private implementation detail of `DynMetadata::size_of` etc. + /// There is conceptually not actually any Abstract Machine memory behind this pointer. + type VTable; +} + +/// The common prefix of all vtables. It is followed by function pointers for trait methods. +/// +/// Private implementation detail of `DynMetadata::size_of` etc. +#[repr(C)] +#[cfg(bootstrap)] +struct VTable { + drop_in_place: fn(*mut ()), + size_of: usize, + align_of: usize, +} + +impl DynMetadata { + /// Returns the size of the type associated with this vtable. + #[inline] + pub fn size_of(self) -> usize { + // Note that "size stored in vtable" is *not* the same as "result of size_of_val_raw". + // Consider a reference like `&(i32, dyn Send)`: the vtable will only store the size of the + // `Send` part! + #[cfg(bootstrap)] + return self.vtable_ptr.size_of; + #[cfg(not(bootstrap))] + // SAFETY: DynMetadata always contains a valid vtable pointer + return unsafe { + crate::intrinsics::vtable_size(self.vtable_ptr as *const VTable as *const ()) + }; + } + + /// Returns the alignment of the type associated with this vtable. + #[inline] + pub fn align_of(self) -> usize { + #[cfg(bootstrap)] + return self.vtable_ptr.align_of; + #[cfg(not(bootstrap))] + // SAFETY: DynMetadata always contains a valid vtable pointer + return unsafe { + crate::intrinsics::vtable_align(self.vtable_ptr as *const VTable as *const ()) + }; + } + + /// Returns the size and alignment together as a `Layout` + #[inline] + pub fn layout(self) -> crate::alloc::Layout { + // SAFETY: the compiler emitted this vtable for a concrete Rust type which + // is known to have a valid layout. Same rationale as in `Layout::for_value`. + unsafe { crate::alloc::Layout::from_size_align_unchecked(self.size_of(), self.align_of()) } + } +} + +unsafe impl Send for DynMetadata {} +unsafe impl Sync for DynMetadata {} + +impl fmt::Debug for DynMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("DynMetadata").field(&(self.vtable_ptr as *const VTable)).finish() + } +} + +// Manual impls needed to avoid `Dyn: $Trait` bounds. + +impl Unpin for DynMetadata {} + +impl Copy for DynMetadata {} + +impl Clone for DynMetadata { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Eq for DynMetadata {} + +impl PartialEq for DynMetadata { + #[inline] + fn eq(&self, other: &Self) -> bool { + crate::ptr::eq::(self.vtable_ptr, other.vtable_ptr) + } +} + +impl Ord for DynMetadata { + #[inline] + fn cmp(&self, other: &Self) -> crate::cmp::Ordering { + (self.vtable_ptr as *const VTable).cmp(&(other.vtable_ptr as *const VTable)) + } +} + +impl PartialOrd for DynMetadata { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Hash for DynMetadata { + #[inline] + fn hash(&self, hasher: &mut H) { + crate::ptr::hash::(self.vtable_ptr, hasher) + } +} diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs new file mode 100644 index 000000000..40e28e636 --- /dev/null +++ b/library/core/src/ptr/mod.rs @@ -0,0 +1,2054 @@ +//! Manually manage memory through raw pointers. +//! +//! *[See also the pointer primitive types](pointer).* +//! +//! # Safety +//! +//! Many functions in this module take raw pointers as arguments and read from +//! or write to them. For this to be safe, these pointers must be *valid*. +//! Whether a pointer is valid depends on the operation it is used for +//! (read or write), and the extent of the memory that is accessed (i.e., +//! how many bytes are read/written). Most functions use `*mut T` and `*const T` +//! to access only a single value, in which case the documentation omits the size +//! and implicitly assumes it to be `size_of::()` bytes. +//! +//! The precise rules for validity are not determined yet. The guarantees that are +//! provided at this point are very minimal: +//! +//! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. +//! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer +//! be *dereferenceable*: the memory range of the given size starting at the pointer must all be +//! within the bounds of a single allocated object. Note that in Rust, +//! every (stack-allocated) variable is considered a separate allocated object. +//! * Even for operations of [size zero][zst], the pointer must not be pointing to deallocated +//! memory, i.e., deallocation makes pointers invalid even for zero-sized operations. However, +//! casting any non-zero integer *literal* to a pointer is valid for zero-sized accesses, even if +//! some memory happens to exist at that address and gets deallocated. This corresponds to writing +//! your own allocator: allocating zero-sized objects is not very hard. The canonical way to +//! obtain a pointer that is valid for zero-sized accesses is [`NonNull::dangling`]. +//FIXME: mention `ptr::invalid` above, once it is stable. +//! * All accesses performed by functions in this module are *non-atomic* in the sense +//! of [atomic operations] used to synchronize between threads. This means it is +//! undefined behavior to perform two concurrent accesses to the same location from different +//! threads unless both accesses only read from memory. Notice that this explicitly +//! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot +//! be used for inter-thread synchronization. +//! * The result of casting a reference to a pointer is valid for as long as the +//! underlying object is live and no reference (just raw pointers) is used to +//! access the same memory. +//! +//! These axioms, along with careful use of [`offset`] for pointer arithmetic, +//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees +//! will be provided eventually, as the [aliasing] rules are being determined. For more +//! information, see the [book] as well as the section in the reference devoted +//! to [undefined behavior][ub]. +//! +//! ## Alignment +//! +//! Valid raw pointers as defined above are not necessarily properly aligned (where +//! "proper" alignment is defined by the pointee type, i.e., `*const T` must be +//! aligned to `mem::align_of::()`). However, most functions require their +//! arguments to be properly aligned, and will explicitly state +//! this requirement in their documentation. Notable exceptions to this are +//! [`read_unaligned`] and [`write_unaligned`]. +//! +//! When a function requires proper alignment, it does so even if the access +//! has size 0, i.e., even if memory is not actually touched. Consider using +//! [`NonNull::dangling`] in such cases. +//! +//! ## Allocated object +//! +//! For several operations, such as [`offset`] or field projections (`expr.field`), the notion of an +//! "allocated object" becomes relevant. An allocated object is a contiguous region of memory. +//! Common examples of allocated objects include stack-allocated variables (each variable is a +//! separate allocated object), heap allocations (each allocation created by the global allocator is +//! a separate allocated object), and `static` variables. +//! +//! +//! # Strict Provenance +//! +//! **The following text is non-normative, insufficiently formal, and is an extremely strict +//! interpretation of provenance. It's ok if your code doesn't strictly conform to it.** +//! +//! [Strict Provenance][] is an experimental set of APIs that help tools that try +//! to validate the memory-safety of your program's execution. Notably this includes [Miri][] +//! and [CHERI][], which can detect when you access out of bounds memory or otherwise violate +//! Rust's memory model. +//! +//! Provenance must exist in some form for any programming +//! language compiled for modern computer architectures, but specifying a model for provenance +//! in a way that is useful to both compilers and programmers is an ongoing challenge. +//! The [Strict Provenance][] experiment seeks to explore the question: *what if we just said you +//! couldn't do all the nasty operations that make provenance so messy?* +//! +//! What APIs would have to be removed? What APIs would have to be added? How much would code +//! have to change, and is it worse or better now? Would any patterns become truly inexpressible? +//! Could we carve out special exceptions for those patterns? Should we? +//! +//! A secondary goal of this project is to see if we can disambiguate the many functions of +//! pointer<->integer casts enough for the definition of `usize` to be loosened so that it +//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue +//! to conflate these notions). This would potentially make it possible to more efficiently +//! target platforms where pointers are larger than offsets, such as CHERI and maybe some +//! segmented architecures. +//! +//! ## Provenance +//! +//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.** +//! +//! Pointers are not *simply* an "integer" or "address". For instance, it's uncontroversial +//! to say that a Use After Free is clearly Undefined Behaviour, even if you "get lucky" +//! and the freed memory gets reallocated before your read/write (in fact this is the +//! worst-case scenario, UAFs would be much less concerning if this didn't happen!). +//! To rationalize this claim, pointers need to somehow be *more* than just their addresses: +//! they must have provenance. +//! +//! When an allocation is created, that allocation has a unique Original Pointer. For alloc +//! APIs this is literally the pointer the call returns, and for local variables and statics, +//! this is the name of the variable/static. This is mildly overloading the term "pointer" +//! for the sake of brevity/exposition. +//! +//! The Original Pointer for an allocation is guaranteed to have unique access to the entire +//! allocation and *only* that allocation. In this sense, an allocation can be thought of +//! as a "sandbox" that cannot be broken into or out of. *Provenance* is the permission +//! to access an allocation's sandbox and has both a *spatial* and *temporal* component: +//! +//! * Spatial: A range of bytes that the pointer is allowed to access. +//! * Temporal: The lifetime (of the allocation) that access to these bytes is tied to. +//! +//! Spatial provenance makes sure you don't go beyond your sandbox, while temporal provenance +//! makes sure that you can't "get lucky" after your permission to access some memory +//! has been revoked (either through deallocations or borrows expiring). +//! +//! Provenance is implicitly shared with all pointers transitively derived from +//! The Original Pointer through operations like [`offset`], borrowing, and pointer casts. +//! Some operations may *shrink* the derived provenance, limiting how much memory it can +//! access or how long it's valid for (i.e. borrowing a subfield and subslicing). +//! +//! Shrinking provenance cannot be undone: even if you "know" there is a larger allocation, you +//! can't derive a pointer with a larger provenance. Similarly, you cannot "recombine" +//! two contiguous provenances back into one (i.e. with a `fn merge(&[T], &[T]) -> &[T]`). +//! +//! A reference to a value always has provenance over exactly the memory that field occupies. +//! A reference to a slice always has provenance over exactly the range that slice describes. +//! +//! If an allocation is deallocated, all pointers with provenance to that allocation become +//! invalidated, and effectively lose their provenance. +//! +//! The strict provenance experiment is mostly only interested in exploring stricter *spatial* +//! provenance. In this sense it can be thought of as a subset of the more ambitious and +//! formal [Stacked Borrows][] research project, which is what tools like [Miri][] are based on. +//! In particular, Stacked Borrows is necessary to properly describe what borrows are allowed +//! to do and when they become invalidated. This necessarily involves much more complex +//! *temporal* reasoning than simply identifying allocations. Adjusting APIs and code +//! for the strict provenance experiment will also greatly help Stacked Borrows. +//! +//! +//! ## Pointer Vs Addresses +//! +//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.** +//! +//! One of the largest historical issues with trying to define provenance is that programmers +//! freely convert between pointers and integers. Once you allow for this, it generally becomes +//! impossible to accurately track and preserve provenance information, and you need to appeal +//! to very complex and unreliable heuristics. But of course, converting between pointers and +//! integers is very useful, so what can we do? +//! +//! Also did you know WASM is actually a "Harvard Architecture"? As in function pointers are +//! handled completely differently from data pointers? And we kind of just shipped Rust on WASM +//! without really addressing the fact that we let you freely convert between function pointers +//! and data pointers, because it mostly Just Works? Let's just put that on the "pointer casts +//! are dubious" pile. +//! +//! Strict Provenance attempts to square these circles by decoupling Rust's traditional conflation +//! of pointers and `usize` (and `isize`), and defining a pointer to semantically contain the +//! following information: +//! +//! * The **address-space** it is part of (e.g. "data" vs "code" in WASM). +//! * The **address** it points to, which can be represented by a `usize`. +//! * The **provenance** it has, defining the memory it has permission to access. +//! +//! Under Strict Provenance, a usize *cannot* accurately represent a pointer, and converting from +//! a pointer to a usize is generally an operation which *only* extracts the address. It is +//! therefore *impossible* to construct a valid pointer from a usize because there is no way +//! to restore the address-space and provenance. In other words, pointer-integer-pointer +//! roundtrips are not possible (in the sense that the resulting pointer is not dereferencable). +//! +//! The key insight to making this model *at all* viable is the [`with_addr`][] method: +//! +//! ```text +//! /// Creates a new pointer with the given address. +//! /// +//! /// This performs the same operation as an `addr as ptr` cast, but copies +//! /// the *address-space* and *provenance* of `self` to the new pointer. +//! /// This allows us to dynamically preserve and propagate this important +//! /// information in a way that is otherwise impossible with a unary cast. +//! /// +//! /// This is equivalent to using `wrapping_offset` to offset `self` to the +//! /// given address, and therefore has all the same capabilities and restrictions. +//! pub fn with_addr(self, addr: usize) -> Self; +//! ``` +//! +//! So you're still able to drop down to the address representation and do whatever +//! clever bit tricks you want *as long as* you're able to keep around a pointer +//! into the allocation you care about that can "reconstitute" the other parts of the pointer. +//! Usually this is very easy, because you only are taking a pointer, messing with the address, +//! and then immediately converting back to a pointer. To make this use case more ergonomic, +//! we provide the [`map_addr`][] method. +//! +//! To help make it clear that code is "following" Strict Provenance semantics, we also provide an +//! [`addr`][] method which promises that the returned address is not part of a +//! pointer-usize-pointer roundtrip. In the future we may provide a lint for pointer<->integer +//! casts to help you audit if your code conforms to strict provenance. +//! +//! +//! ## Using Strict Provenance +//! +//! Most code needs no changes to conform to strict provenance, as the only really concerning +//! operation that *wasn't* obviously already Undefined Behaviour is casts from usize to a +//! pointer. For code which *does* cast a usize to a pointer, the scope of the change depends +//! on exactly what you're doing. +//! +//! In general you just need to make sure that if you want to convert a usize address to a +//! pointer and then use that pointer to read/write memory, you need to keep around a pointer +//! that has sufficient provenance to perform that read/write itself. In this way all of your +//! casts from an address to a pointer are essentially just applying offsets/indexing. +//! +//! This is generally trivial to do for simple cases like tagged pointers *as long as you +//! represent the tagged pointer as an actual pointer and not a usize*. For instance: +//! +//! ``` +//! #![feature(strict_provenance)] +//! +//! unsafe { +//! // A flag we want to pack into our pointer +//! static HAS_DATA: usize = 0x1; +//! static FLAG_MASK: usize = !HAS_DATA; +//! +//! // Our value, which must have enough alignment to have spare least-significant-bits. +//! let my_precious_data: u32 = 17; +//! assert!(core::mem::align_of::() > 1); +//! +//! // Create a tagged pointer +//! let ptr = &my_precious_data as *const u32; +//! let tagged = ptr.map_addr(|addr| addr | HAS_DATA); +//! +//! // Check the flag: +//! if tagged.addr() & HAS_DATA != 0 { +//! // Untag and read the pointer +//! let data = *tagged.map_addr(|addr| addr & FLAG_MASK); +//! assert_eq!(data, 17); +//! } else { +//! unreachable!() +//! } +//! } +//! ``` +//! +//! (Yes, if you've been using AtomicUsize for pointers in concurrent datastructures, you should +//! be using AtomicPtr instead. If that messes up the way you atomically manipulate pointers, +//! we would like to know why, and what needs to be done to fix it.) +//! +//! Something more complicated and just generally *evil* like an XOR-List requires more significant +//! changes like allocating all nodes in a pre-allocated Vec or Arena and using a pointer +//! to the whole allocation to reconstitute the XORed addresses. +//! +//! Situations where a valid pointer *must* be created from just an address, such as baremetal code +//! accessing a memory-mapped interface at a fixed address, are an open question on how to support. +//! These situations *will* still be allowed, but we might require some kind of "I know what I'm +//! doing" annotation to explain the situation to the compiler. It's also possible they need no +//! special attention at all, because they're generally accessing memory outside the scope of +//! "the abstract machine", or already using "I know what I'm doing" annotations like "volatile". +//! +//! Under [Strict Provenance] it is Undefined Behaviour to: +//! +//! * Access memory through a pointer that does not have provenance over that memory. +//! +//! * [`offset`] a pointer to or from an address it doesn't have provenance over. +//! This means it's always UB to offset a pointer derived from something deallocated, +//! even if the offset is 0. Note that a pointer "one past the end" of its provenance +//! is not actually outside its provenance, it just has 0 bytes it can load/store. +//! +//! But it *is* still sound to: +//! +//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can +//! be used for sentinel values like `null` *or* to represent a tagged pointer that will +//! never be dereferencable. In general, it is always sound for an integer to pretend +//! to be a pointer "for fun" as long as you don't use operations on it which require +//! it to be valid (offset, read, write, etc). +//! +//! * Forge an allocation of size zero at any sufficiently aligned non-null address. +//! i.e. the usual "ZSTs are fake, do what you want" rules apply *but* this only applies +//! for actual forgery (integers cast to pointers). If you borrow some struct's field +//! that *happens* to be zero-sized, the resulting pointer will have provenance tied to +//! that allocation and it will still get invalidated if the allocation gets deallocated. +//! In the future we may introduce an API to make such a forged allocation explicit. +//! +//! * [`wrapping_offset`][] a pointer outside its provenance. This includes invalid pointers +//! which have "no" provenance. Unfortunately there may be practical limits on this for a +//! particular platform, and it's an open question as to how to specify this (if at all). +//! Notably, [CHERI][] relies on a compression scheme that can't handle a +//! pointer getting offset "too far" out of bounds. If this happens, the address +//! returned by `addr` will be the value you expect, but the provenance will get invalidated +//! and using it to read/write will fault. The details of this are architecture-specific +//! and based on alignment, but the buffer on either side of the pointer's range is pretty +//! generous (think kilobytes, not bytes). +//! +//! * Compare arbitrary pointers by address. Addresses *are* just integers and so there is +//! always a coherent answer, even if the pointers are invalid or from different +//! address-spaces/provenances. Of course, comparing addresses from different address-spaces +//! is generally going to be *meaningless*, but so is comparing Kilograms to Meters, and Rust +//! doesn't prevent that either. Similarly, if you get "lucky" and notice that a pointer +//! one-past-the-end is the "same" address as the start of an unrelated allocation, anything +//! you do with that fact is *probably* going to be gibberish. The scope of that gibberish +//! is kept under control by the fact that the two pointers *still* aren't allowed to access +//! the other's allocation (bytes), because they still have different provenance. +//! +//! * Perform pointer tagging tricks. This falls out of [`wrapping_offset`] but is worth +//! mentioning in more detail because of the limitations of [CHERI][]. Low-bit tagging +//! is very robust, and often doesn't even go out of bounds because types ensure +//! size >= align (and over-aligning actually gives CHERI more flexibility). Anything +//! more complex than this rapidly enters "extremely platform-specific" territory as +//! certain things may or may not be allowed based on specific supported operations. +//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits +//! that and should support it. +//! +//! ## Pointer-usize-pointer roundtrips and 'exposed' provenance +//! +//! **This section is *non-normative* and is part of the [Strict Provenance] experiment.** +//! +//! As discussed above, pointer-usize-pointer roundtrips are not possible under [Strict Provenance]. +//! However, there exists legacy Rust code that is full of such roundtrips, and legacy platform APIs +//! regularly assume that `usize` can capture all the information that makes up a pointer. There +//! also might be code that cannot be ported to Strict Provenance (which is something we would [like +//! to hear about][Strict Provenance]). +//! +//! For situations like this, there is a fallback plan, a way to 'opt out' of Strict Provenance. +//! However, note that this makes your code a lot harder to specify, and the code will not work +//! (well) with tools like [Miri] and [CHERI]. +//! +//! This fallback plan is provided by the [`expose_addr`] and [`from_exposed_addr`] methods (which +//! are equivalent to `as` casts between pointers and integers). [`expose_addr`] is a lot like +//! [`addr`], but additionally adds the provenance of the pointer to a global list of 'exposed' +//! provenances. (This list is purely conceptual, it exists for the purpose of specifying Rust but +//! is not materialized in actual executions, except in tools like [Miri].) [`from_exposed_addr`] +//! can be used to construct a pointer with one of these previously 'exposed' provenances. +//! [`from_exposed_addr`] takes only `addr: usize` as arguments, so unlike in [`with_addr`] there is +//! no indication of what the correct provenance for the returned pointer is -- and that is exactly +//! what makes pointer-usize-pointer roundtrips so tricky to rigorously specify! There is no +//! algorithm that decides which provenance will be used. You can think of this as "guessing" the +//! right provenance, and the guess will be "maximally in your favor", in the sense that if there is +//! any way to avoid undefined behavior, then that is the guess that will be taken. However, if +//! there is *no* previously 'exposed' provenance that justifies the way the returned pointer will +//! be used, the program has undefined behavior. +//! +//! Using [`expose_addr`] or [`from_exposed_addr`] (or the equivalent `as` casts) means that code is +//! *not* following Strict Provenance rules. The goal of the Strict Provenance experiment is to +//! determine whether it is possible to use Rust without [`expose_addr`] and [`from_exposed_addr`]. +//! If this is successful, it would be a major win for avoiding specification complexity and to +//! facilitate adoption of tools like [CHERI] and [Miri] that can be a big help in increasing the +//! confidence in (unsafe) Rust code. +//! +//! [aliasing]: ../../nomicon/aliasing.html +//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer +//! [ub]: ../../reference/behavior-considered-undefined.html +//! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts +//! [atomic operations]: crate::sync::atomic +//! [`offset`]: pointer::offset +//! [`wrapping_offset`]: pointer::wrapping_offset +//! [`with_addr`]: pointer::with_addr +//! [`map_addr`]: pointer::map_addr +//! [`addr`]: pointer::addr +//! [`ptr::invalid`]: core::ptr::invalid +//! [`expose_addr`]: pointer::expose_addr +//! [`from_exposed_addr`]: from_exposed_addr +//! [Miri]: https://github.com/rust-lang/miri +//! [CHERI]: https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/ +//! [Strict Provenance]: https://github.com/rust-lang/rust/issues/95228 +//! [Stacked Borrows]: https://plv.mpi-sws.org/rustbelt/stacked-borrows/ + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::cmp::Ordering; +use crate::fmt; +use crate::hash; +use crate::intrinsics::{ + self, assert_unsafe_precondition, is_aligned_and_not_null, is_nonoverlapping, +}; + +use crate::mem::{self, MaybeUninit}; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(inline)] +pub use crate::intrinsics::copy_nonoverlapping; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(inline)] +pub use crate::intrinsics::copy; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(inline)] +pub use crate::intrinsics::write_bytes; + +mod metadata; +pub(crate) use metadata::PtrRepr; +#[unstable(feature = "ptr_metadata", issue = "81513")] +pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin}; + +mod non_null; +#[stable(feature = "nonnull", since = "1.25.0")] +pub use non_null::NonNull; + +mod unique; +#[unstable(feature = "ptr_internals", issue = "none")] +pub use unique::Unique; + +mod const_ptr; +mod mut_ptr; + +/// Executes the destructor (if any) of the pointed-to value. +/// +/// This is semantically equivalent to calling [`ptr::read`] and discarding +/// the result, but has the following advantages: +/// +/// * It is *required* to use `drop_in_place` to drop unsized types like +/// trait objects, because they can't be read out onto the stack and +/// dropped normally. +/// +/// * It is friendlier to the optimizer to do this over [`ptr::read`] when +/// dropping manually allocated memory (e.g., in the implementations of +/// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's +/// sound to elide the copy. +/// +/// * It can be used to drop [pinned] data when `T` is not `repr(packed)` +/// (pinned data must not be moved before it is dropped). +/// +/// Unaligned values cannot be dropped in place, they must be copied to an aligned +/// location first using [`ptr::read_unaligned`]. For packed structs, this move is +/// done automatically by the compiler. This means the fields of packed structs +/// are not dropped in-place. +/// +/// [`ptr::read`]: self::read +/// [`ptr::read_unaligned`]: self::read_unaligned +/// [pinned]: crate::pin +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `to_drop` must be [valid] for both reads and writes. +/// +/// * `to_drop` must be properly aligned. +/// +/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold +/// additional invariants - this is type-dependent. +/// +/// Additionally, if `T` is not [`Copy`], using the pointed-to value after +/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = +/// foo` counts as a use because it will cause the value to be dropped +/// again. [`write()`] can be used to overwrite data without causing it to be +/// dropped. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// # Examples +/// +/// Manually remove the last item from a vector: +/// +/// ``` +/// use std::ptr; +/// use std::rc::Rc; +/// +/// let last = Rc::new(1); +/// let weak = Rc::downgrade(&last); +/// +/// let mut v = vec![Rc::new(0), last]; +/// +/// unsafe { +/// // Get a raw pointer to the last element in `v`. +/// let ptr = &mut v[1] as *mut _; +/// // Shorten `v` to prevent the last item from being dropped. We do that first, +/// // to prevent issues if the `drop_in_place` below panics. +/// v.set_len(1); +/// // Without a call `drop_in_place`, the last item would never be dropped, +/// // and the memory it manages would be leaked. +/// ptr::drop_in_place(ptr); +/// } +/// +/// assert_eq!(v, &[0.into()]); +/// +/// // Ensure that the last item was dropped. +/// assert!(weak.upgrade().is_none()); +/// ``` +#[stable(feature = "drop_in_place", since = "1.8.0")] +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + + // SAFETY: see comment above + unsafe { drop_in_place(to_drop) } +} + +/// Creates a null raw pointer. +/// +/// # Examples +/// +/// ``` +/// use std::ptr; +/// +/// let p: *const i32 = ptr::null(); +/// assert!(p.is_null()); +/// ``` +#[inline(always)] +#[must_use] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_promotable] +#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")] +#[rustc_allow_const_fn_unstable(ptr_metadata)] +#[rustc_diagnostic_item = "ptr_null"] +pub const fn null() -> *const T { + from_raw_parts(invalid(0), ()) +} + +/// Creates an invalid pointer with the given address. +/// +/// This is different from `addr as *const T`, which creates a pointer that picks up a previously +/// exposed provenance. See [`from_exposed_addr`] for more details on that operation. +/// +/// The module's top-level documentation discusses the precise meaning of an "invalid" +/// pointer but essentially this expresses that the pointer is not associated +/// with any actual allocation and is little more than a usize address in disguise. +/// +/// This pointer will have no provenance associated with it and is therefore +/// UB to read/write/offset. This mostly exists to facilitate things +/// like `ptr::null` and `NonNull::dangling` which make invalid pointers. +/// +/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it +/// may be desirable to give them their own API just to make that 100% clear.) +/// +/// This API and its claimed semantics are part of the Strict Provenance experiment, +/// see the [module documentation][crate::ptr] for details. +#[inline(always)] +#[must_use] +#[rustc_const_stable(feature = "stable_things_using_strict_provenance", since = "1.61.0")] +#[unstable(feature = "strict_provenance", issue = "95228")] +pub const fn invalid(addr: usize) -> *const T { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // We use transmute rather than a cast so tools like Miri can tell that this + // is *not* the same as from_exposed_addr. + // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that + // pointer). + unsafe { mem::transmute(addr) } +} + +/// Creates an invalid mutable pointer with the given address. +/// +/// This is different from `addr as *mut T`, which creates a pointer that picks up a previously +/// exposed provenance. See [`from_exposed_addr_mut`] for more details on that operation. +/// +/// The module's top-level documentation discusses the precise meaning of an "invalid" +/// pointer but essentially this expresses that the pointer is not associated +/// with any actual allocation and is little more than a usize address in disguise. +/// +/// This pointer will have no provenance associated with it and is therefore +/// UB to read/write/offset. This mostly exists to facilitate things +/// like `ptr::null` and `NonNull::dangling` which make invalid pointers. +/// +/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it +/// may be desirable to give them their own API just to make that 100% clear.) +/// +/// This API and its claimed semantics are part of the Strict Provenance experiment, +/// see the [module documentation][crate::ptr] for details. +#[inline(always)] +#[must_use] +#[rustc_const_stable(feature = "stable_things_using_strict_provenance", since = "1.61.0")] +#[unstable(feature = "strict_provenance", issue = "95228")] +pub const fn invalid_mut(addr: usize) -> *mut T { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // We use transmute rather than a cast so tools like Miri can tell that this + // is *not* the same as from_exposed_addr. + // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that + // pointer). + unsafe { mem::transmute(addr) } +} + +/// Convert an address back to a pointer, picking up a previously 'exposed' provenance. +/// +/// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any* +/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize` +/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be +/// used, the program has undefined behavior. Note that there is no algorithm that decides which +/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess +/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined +/// behavior, then that is the guess that will be taken. +/// +/// On platforms with multiple address spaces, it is your responsibility to ensure that the +/// address makes sense in the address space that this pointer will be used with. +/// +/// Using this method means that code is *not* following strict provenance rules. "Guessing" a +/// suitable provenance complicates specification and reasoning and may not be supported by +/// tools that help you to stay conformant with the Rust memory model, so it is recommended to +/// use [`with_addr`][pointer::with_addr] wherever possible. +/// +/// On most platforms this will produce a value with the same bytes as the address. Platforms +/// which need to store additional information in a pointer may not support this operation, +/// since it is generally not possible to actually *compute* which provenance the returned +/// pointer has to pick up. +/// +/// This API and its claimed semantics are part of the Strict Provenance experiment, see the +/// [module documentation][crate::ptr] for details. +#[must_use] +#[inline] +#[unstable(feature = "strict_provenance", issue = "95228")] +pub fn from_exposed_addr(addr: usize) -> *const T +where + T: Sized, +{ + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + addr as *const T +} + +/// Convert an address back to a mutable pointer, picking up a previously 'exposed' provenance. +/// +/// This is equivalent to `addr as *mut T`. The provenance of the returned pointer is that of *any* +/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize` +/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be +/// used, the program has undefined behavior. Note that there is no algorithm that decides which +/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess +/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined +/// behavior, then that is the guess that will be taken. +/// +/// On platforms with multiple address spaces, it is your responsibility to ensure that the +/// address makes sense in the address space that this pointer will be used with. +/// +/// Using this method means that code is *not* following strict provenance rules. "Guessing" a +/// suitable provenance complicates specification and reasoning and may not be supported by +/// tools that help you to stay conformant with the Rust memory model, so it is recommended to +/// use [`with_addr`][pointer::with_addr] wherever possible. +/// +/// On most platforms this will produce a value with the same bytes as the address. Platforms +/// which need to store additional information in a pointer may not support this operation, +/// since it is generally not possible to actually *compute* which provenance the returned +/// pointer has to pick up. +/// +/// This API and its claimed semantics are part of the Strict Provenance experiment, see the +/// [module documentation][crate::ptr] for details. +#[must_use] +#[inline] +#[unstable(feature = "strict_provenance", issue = "95228")] +pub fn from_exposed_addr_mut(addr: usize) -> *mut T +where + T: Sized, +{ + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + addr as *mut T +} + +/// Creates a null mutable raw pointer. +/// +/// # Examples +/// +/// ``` +/// use std::ptr; +/// +/// let p: *mut i32 = ptr::null_mut(); +/// assert!(p.is_null()); +/// ``` +#[inline(always)] +#[must_use] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_promotable] +#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")] +#[rustc_allow_const_fn_unstable(ptr_metadata)] +#[rustc_diagnostic_item = "ptr_null_mut"] +pub const fn null_mut() -> *mut T { + from_raw_parts_mut(invalid_mut(0), ()) +} + +/// Forms a raw slice from a pointer and a length. +/// +/// The `len` argument is the number of **elements**, not the number of bytes. +/// +/// This function is safe, but actually using the return value is unsafe. +/// See the documentation of [`slice::from_raw_parts`] for slice safety requirements. +/// +/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts +/// +/// # Examples +/// +/// ```rust +/// use std::ptr; +/// +/// // create a slice pointer when starting out with a pointer to the first element +/// let x = [5, 6, 7]; +/// let raw_pointer = x.as_ptr(); +/// let slice = ptr::slice_from_raw_parts(raw_pointer, 3); +/// assert_eq!(unsafe { &*slice }[2], 7); +/// ``` +#[inline] +#[stable(feature = "slice_from_raw_parts", since = "1.42.0")] +#[rustc_const_stable(feature = "const_slice_from_raw_parts", since = "1.64.0")] +#[rustc_allow_const_fn_unstable(ptr_metadata)] +pub const fn slice_from_raw_parts(data: *const T, len: usize) -> *const [T] { + from_raw_parts(data.cast(), len) +} + +/// Performs the same functionality as [`slice_from_raw_parts`], except that a +/// raw mutable slice is returned, as opposed to a raw immutable slice. +/// +/// See the documentation of [`slice_from_raw_parts`] for more details. +/// +/// This function is safe, but actually using the return value is unsafe. +/// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements. +/// +/// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut +/// +/// # Examples +/// +/// ```rust +/// use std::ptr; +/// +/// let x = &mut [5, 6, 7]; +/// let raw_pointer = x.as_mut_ptr(); +/// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3); +/// +/// unsafe { +/// (*slice)[2] = 99; // assign a value at an index in the slice +/// }; +/// +/// assert_eq!(unsafe { &*slice }[2], 99); +/// ``` +#[inline] +#[stable(feature = "slice_from_raw_parts", since = "1.42.0")] +#[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")] +pub const fn slice_from_raw_parts_mut(data: *mut T, len: usize) -> *mut [T] { + from_raw_parts_mut(data.cast(), len) +} + +/// Swaps the values at two mutable locations of the same type, without +/// deinitializing either. +/// +/// But for the following exceptions, this function is semantically +/// equivalent to [`mem::swap`]: +/// +/// * It operates on raw pointers instead of references. When references are +/// available, [`mem::swap`] should be preferred. +/// +/// * The two pointed-to values may overlap. If the values do overlap, then the +/// overlapping region of memory from `x` will be used. This is demonstrated +/// in the second example below. +/// +/// * The operation is "untyped" in the sense that data may be uninitialized or otherwise violate +/// the requirements of `T`. The initialization state is preserved exactly. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * Both `x` and `y` must be [valid] for both reads and writes. +/// +/// * Both `x` and `y` must be properly aligned. +/// +/// Note that even if `T` has size `0`, the pointers must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// # Examples +/// +/// Swapping two non-overlapping regions: +/// +/// ``` +/// use std::ptr; +/// +/// let mut array = [0, 1, 2, 3]; +/// +/// let (x, y) = array.split_at_mut(2); +/// let x = x.as_mut_ptr().cast::<[u32; 2]>(); // this is `array[0..2]` +/// let y = y.as_mut_ptr().cast::<[u32; 2]>(); // this is `array[2..4]` +/// +/// unsafe { +/// ptr::swap(x, y); +/// assert_eq!([2, 3, 0, 1], array); +/// } +/// ``` +/// +/// Swapping two overlapping regions: +/// +/// ``` +/// use std::ptr; +/// +/// let mut array: [i32; 4] = [0, 1, 2, 3]; +/// +/// let array_ptr: *mut i32 = array.as_mut_ptr(); +/// +/// let x = array_ptr as *mut [i32; 3]; // this is `array[0..3]` +/// let y = unsafe { array_ptr.add(1) } as *mut [i32; 3]; // this is `array[1..4]` +/// +/// unsafe { +/// ptr::swap(x, y); +/// // The indices `1..3` of the slice overlap between `x` and `y`. +/// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are +/// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]` +/// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`). +/// // This implementation is defined to make the latter choice. +/// assert_eq!([1, 0, 1, 2], array); +/// } +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_swap", issue = "83163")] +pub const unsafe fn swap(x: *mut T, y: *mut T) { + // Give ourselves some scratch space to work with. + // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. + let mut tmp = MaybeUninit::::uninit(); + + // Perform the swap + // SAFETY: the caller must guarantee that `x` and `y` are + // valid for writes and properly aligned. `tmp` cannot be + // overlapping either `x` or `y` because `tmp` was just allocated + // on the stack as a separate allocated object. + unsafe { + copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); + copy(y, x, 1); // `x` and `y` may overlap + copy_nonoverlapping(tmp.as_ptr(), y, 1); + } +} + +/// Swaps `count * size_of::()` bytes between the two regions of memory +/// beginning at `x` and `y`. The two regions must *not* overlap. +/// +/// The operation is "untyped" in the sense that data may be uninitialized or otherwise violate the +/// requirements of `T`. The initialization state is preserved exactly. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * Both `x` and `y` must be [valid] for both reads and writes of `count * +/// size_of::()` bytes. +/// +/// * Both `x` and `y` must be properly aligned. +/// +/// * The region of memory beginning at `x` with a size of `count * +/// size_of::()` bytes must *not* overlap with the region of memory +/// beginning at `y` with the same size. +/// +/// Note that even if the effectively copied size (`count * size_of::()`) is `0`, +/// the pointers must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::ptr; +/// +/// let mut x = [1, 2, 3, 4]; +/// let mut y = [7, 8, 9]; +/// +/// unsafe { +/// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); +/// } +/// +/// assert_eq!(x, [7, 8, 3, 4]); +/// assert_eq!(y, [1, 2, 9]); +/// ``` +#[inline] +#[stable(feature = "swap_nonoverlapping", since = "1.27.0")] +#[rustc_const_unstable(feature = "const_swap", issue = "83163")] +pub const unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { + #[allow(unused)] + macro_rules! attempt_swap_as_chunks { + ($ChunkTy:ty) => { + if mem::align_of::() >= mem::align_of::<$ChunkTy>() + && mem::size_of::() % mem::size_of::<$ChunkTy>() == 0 + { + let x: *mut $ChunkTy = x.cast(); + let y: *mut $ChunkTy = y.cast(); + let count = count * (mem::size_of::() / mem::size_of::<$ChunkTy>()); + // SAFETY: these are the same bytes that the caller promised were + // ok, just typed as `MaybeUninit`s instead of as `T`s. + // The `if` condition above ensures that we're not violating + // alignment requirements, and that the division is exact so + // that we don't lose any bytes off the end. + return unsafe { swap_nonoverlapping_simple_untyped(x, y, count) }; + } + }; + } + + // SAFETY: the caller must guarantee that `x` and `y` are + // valid for writes and properly aligned. + unsafe { + assert_unsafe_precondition!( + is_aligned_and_not_null(x) + && is_aligned_and_not_null(y) + && is_nonoverlapping(x, y, count) + ); + } + + // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a + // pessimization for it. Also, if the type contains any unaligned pointers, + // copying those over multiple reads is difficult to support. + #[cfg(not(miri))] + { + // Split up the slice into small power-of-two-sized chunks that LLVM is able + // to vectorize (unless it's a special type with more-than-pointer alignment, + // because we don't want to pessimize things like slices of SIMD vectors.) + if mem::align_of::() <= mem::size_of::() + && (!mem::size_of::().is_power_of_two() + || mem::size_of::() > mem::size_of::() * 2) + { + attempt_swap_as_chunks!(usize); + attempt_swap_as_chunks!(u8); + } + } + + // SAFETY: Same preconditions as this function + unsafe { swap_nonoverlapping_simple_untyped(x, y, count) } +} + +/// Same behaviour and safety conditions as [`swap_nonoverlapping`] +/// +/// LLVM can vectorize this (at least it can for the power-of-two-sized types +/// `swap_nonoverlapping` tries to use) so no need to manually SIMD it. +#[inline] +#[rustc_const_unstable(feature = "const_swap", issue = "83163")] +const unsafe fn swap_nonoverlapping_simple_untyped(x: *mut T, y: *mut T, count: usize) { + let x = x.cast::>(); + let y = y.cast::>(); + let mut i = 0; + while i < count { + // SAFETY: By precondition, `i` is in-bounds because it's below `n` + let x = unsafe { &mut *x.add(i) }; + // SAFETY: By precondition, `i` is in-bounds because it's below `n` + // and it's distinct from `x` since the ranges are non-overlapping + let y = unsafe { &mut *y.add(i) }; + mem::swap_simple::>(x, y); + + i += 1; + } +} + +/// Moves `src` into the pointed `dst`, returning the previous `dst` value. +/// +/// Neither value is dropped. +/// +/// This function is semantically equivalent to [`mem::replace`] except that it +/// operates on raw pointers instead of references. When references are +/// available, [`mem::replace`] should be preferred. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `dst` must be [valid] for both reads and writes. +/// +/// * `dst` must be properly aligned. +/// +/// * `dst` must point to a properly initialized value of type `T`. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// # Examples +/// +/// ``` +/// use std::ptr; +/// +/// let mut rust = vec!['b', 'u', 's', 't']; +/// +/// // `mem::replace` would have the same effect without requiring the unsafe +/// // block. +/// let b = unsafe { +/// ptr::replace(&mut rust[0], 'r') +/// }; +/// +/// assert_eq!(b, 'b'); +/// assert_eq!(rust, &['r', 'u', 's', 't']); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_replace", issue = "83164")] +pub const unsafe fn replace(dst: *mut T, mut src: T) -> T { + // SAFETY: the caller must guarantee that `dst` is valid to be + // cast to a mutable reference (valid for writes, aligned, initialized), + // and cannot overlap `src` since `dst` must point to a distinct + // allocated object. + unsafe { + assert_unsafe_precondition!(is_aligned_and_not_null(dst)); + mem::swap(&mut *dst, &mut src); // cannot overlap + } + src +} + +/// Reads the value from `src` without moving it. This leaves the +/// memory in `src` unchanged. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `src` must be [valid] for reads. +/// +/// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the +/// case. +/// +/// * `src` must point to a properly initialized value of type `T`. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let x = 12; +/// let y = &x as *const i32; +/// +/// unsafe { +/// assert_eq!(std::ptr::read(y), 12); +/// } +/// ``` +/// +/// Manually implement [`mem::swap`]: +/// +/// ``` +/// use std::ptr; +/// +/// fn swap(a: &mut T, b: &mut T) { +/// unsafe { +/// // Create a bitwise copy of the value at `a` in `tmp`. +/// let tmp = ptr::read(a); +/// +/// // Exiting at this point (either by explicitly returning or by +/// // calling a function which panics) would cause the value in `tmp` to +/// // be dropped while the same value is still referenced by `a`. This +/// // could trigger undefined behavior if `T` is not `Copy`. +/// +/// // Create a bitwise copy of the value at `b` in `a`. +/// // This is safe because mutable references cannot alias. +/// ptr::copy_nonoverlapping(b, a, 1); +/// +/// // As above, exiting here could trigger undefined behavior because +/// // the same value is referenced by `a` and `b`. +/// +/// // Move `tmp` into `b`. +/// ptr::write(b, tmp); +/// +/// // `tmp` has been moved (`write` takes ownership of its second argument), +/// // so nothing is dropped implicitly here. +/// } +/// } +/// +/// let mut foo = "foo".to_owned(); +/// let mut bar = "bar".to_owned(); +/// +/// swap(&mut foo, &mut bar); +/// +/// assert_eq!(foo, "bar"); +/// assert_eq!(bar, "foo"); +/// ``` +/// +/// ## Ownership of the Returned Value +/// +/// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. +/// If `T` is not [`Copy`], using both the returned value and the value at +/// `*src` can violate memory safety. Note that assigning to `*src` counts as a +/// use because it will attempt to drop the value at `*src`. +/// +/// [`write()`] can be used to overwrite data without causing it to be dropped. +/// +/// ``` +/// use std::ptr; +/// +/// let mut s = String::from("foo"); +/// unsafe { +/// // `s2` now points to the same underlying memory as `s`. +/// let mut s2: String = ptr::read(&s); +/// +/// assert_eq!(s2, "foo"); +/// +/// // Assigning to `s2` causes its original value to be dropped. Beyond +/// // this point, `s` must no longer be used, as the underlying memory has +/// // been freed. +/// s2 = String::default(); +/// assert_eq!(s2, ""); +/// +/// // Assigning to `s` would cause the old value to be dropped again, +/// // resulting in undefined behavior. +/// // s = String::from("bar"); // ERROR +/// +/// // `ptr::write` can be used to overwrite a value without dropping it. +/// ptr::write(&mut s, String::from("bar")); +/// } +/// +/// assert_eq!(s, "bar"); +/// ``` +/// +/// [valid]: self#safety +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub const unsafe fn read(src: *const T) -> T { + // We are calling the intrinsics directly to avoid function calls in the generated code + // as `intrinsics::copy_nonoverlapping` is a wrapper function. + extern "rust-intrinsic" { + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + } + + let mut tmp = MaybeUninit::::uninit(); + // SAFETY: the caller must guarantee that `src` is valid for reads. + // `src` cannot overlap `tmp` because `tmp` was just allocated on + // the stack as a separate allocated object. + // + // Also, since we just wrote a valid value into `tmp`, it is guaranteed + // to be properly initialized. + unsafe { + copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); + tmp.assume_init() + } +} + +/// Reads the value from `src` without moving it. This leaves the +/// memory in `src` unchanged. +/// +/// Unlike [`read`], `read_unaligned` works with unaligned pointers. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `src` must be [valid] for reads. +/// +/// * `src` must point to a properly initialized value of type `T`. +/// +/// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned +/// value and the value at `*src` can [violate memory safety][read-ownership]. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null. +/// +/// [read-ownership]: read#ownership-of-the-returned-value +/// [valid]: self#safety +/// +/// ## On `packed` structs +/// +/// Attempting to create a raw pointer to an `unaligned` struct field with +/// an expression such as `&packed.unaligned as *const FieldType` creates an +/// intermediate unaligned reference before converting that to a raw pointer. +/// That this reference is temporary and immediately cast is inconsequential +/// as the compiler always expects references to be properly aligned. +/// As a result, using `&packed.unaligned as *const FieldType` causes immediate +/// *undefined behavior* in your program. +/// +/// Instead you must use the [`ptr::addr_of!`](addr_of) macro to +/// create the pointer. You may use that returned pointer together with this +/// function. +/// +/// An example of what not to do and how this relates to `read_unaligned` is: +/// +/// ``` +/// #[repr(packed, C)] +/// struct Packed { +/// _padding: u8, +/// unaligned: u32, +/// } +/// +/// let packed = Packed { +/// _padding: 0x00, +/// unaligned: 0x01020304, +/// }; +/// +/// // Take the address of a 32-bit integer which is not aligned. +/// // In contrast to `&packed.unaligned as *const _`, this has no undefined behavior. +/// let unaligned = std::ptr::addr_of!(packed.unaligned); +/// +/// let v = unsafe { std::ptr::read_unaligned(unaligned) }; +/// assert_eq!(v, 0x01020304); +/// ``` +/// +/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however. +/// +/// # Examples +/// +/// Read a usize value from a byte buffer: +/// +/// ``` +/// use std::mem; +/// +/// fn read_usize(x: &[u8]) -> usize { +/// assert!(x.len() >= mem::size_of::()); +/// +/// let ptr = x.as_ptr() as *const usize; +/// +/// unsafe { ptr.read_unaligned() } +/// } +/// ``` +#[inline] +#[stable(feature = "ptr_unaligned", since = "1.17.0")] +#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub const unsafe fn read_unaligned(src: *const T) -> T { + let mut tmp = MaybeUninit::::uninit(); + // SAFETY: the caller must guarantee that `src` is valid for reads. + // `src` cannot overlap `tmp` because `tmp` was just allocated on + // the stack as a separate allocated object. + // + // Also, since we just wrote a valid value into `tmp`, it is guaranteed + // to be properly initialized. + unsafe { + copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::()); + tmp.assume_init() + } +} + +/// Overwrites a memory location with the given value without reading or +/// dropping the old value. +/// +/// `write` does not drop the contents of `dst`. This is safe, but it could leak +/// allocations or resources, so care should be taken not to overwrite an object +/// that should be dropped. +/// +/// Additionally, it does not drop `src`. Semantically, `src` is moved into the +/// location pointed to by `dst`. +/// +/// This is appropriate for initializing uninitialized memory, or overwriting +/// memory that has previously been [`read`] from. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `dst` must be [valid] for writes. +/// +/// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the +/// case. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let mut x = 0; +/// let y = &mut x as *mut i32; +/// let z = 12; +/// +/// unsafe { +/// std::ptr::write(y, z); +/// assert_eq!(std::ptr::read(y), 12); +/// } +/// ``` +/// +/// Manually implement [`mem::swap`]: +/// +/// ``` +/// use std::ptr; +/// +/// fn swap(a: &mut T, b: &mut T) { +/// unsafe { +/// // Create a bitwise copy of the value at `a` in `tmp`. +/// let tmp = ptr::read(a); +/// +/// // Exiting at this point (either by explicitly returning or by +/// // calling a function which panics) would cause the value in `tmp` to +/// // be dropped while the same value is still referenced by `a`. This +/// // could trigger undefined behavior if `T` is not `Copy`. +/// +/// // Create a bitwise copy of the value at `b` in `a`. +/// // This is safe because mutable references cannot alias. +/// ptr::copy_nonoverlapping(b, a, 1); +/// +/// // As above, exiting here could trigger undefined behavior because +/// // the same value is referenced by `a` and `b`. +/// +/// // Move `tmp` into `b`. +/// ptr::write(b, tmp); +/// +/// // `tmp` has been moved (`write` takes ownership of its second argument), +/// // so nothing is dropped implicitly here. +/// } +/// } +/// +/// let mut foo = "foo".to_owned(); +/// let mut bar = "bar".to_owned(); +/// +/// swap(&mut foo, &mut bar); +/// +/// assert_eq!(foo, "bar"); +/// assert_eq!(bar, "foo"); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub const unsafe fn write(dst: *mut T, src: T) { + // We are calling the intrinsics directly to avoid function calls in the generated code + // as `intrinsics::copy_nonoverlapping` is a wrapper function. + extern "rust-intrinsic" { + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + } + + // SAFETY: the caller must guarantee that `dst` is valid for writes. + // `dst` cannot overlap `src` because the caller has mutable access + // to `dst` while `src` is owned by this function. + unsafe { + copy_nonoverlapping(&src as *const T, dst, 1); + intrinsics::forget(src); + } +} + +/// Overwrites a memory location with the given value without reading or +/// dropping the old value. +/// +/// Unlike [`write()`], the pointer may be unaligned. +/// +/// `write_unaligned` does not drop the contents of `dst`. This is safe, but it +/// could leak allocations or resources, so care should be taken not to overwrite +/// an object that should be dropped. +/// +/// Additionally, it does not drop `src`. Semantically, `src` is moved into the +/// location pointed to by `dst`. +/// +/// This is appropriate for initializing uninitialized memory, or overwriting +/// memory that has previously been read with [`read_unaligned`]. +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `dst` must be [valid] for writes. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null. +/// +/// [valid]: self#safety +/// +/// ## On `packed` structs +/// +/// Attempting to create a raw pointer to an `unaligned` struct field with +/// an expression such as `&packed.unaligned as *const FieldType` creates an +/// intermediate unaligned reference before converting that to a raw pointer. +/// That this reference is temporary and immediately cast is inconsequential +/// as the compiler always expects references to be properly aligned. +/// As a result, using `&packed.unaligned as *const FieldType` causes immediate +/// *undefined behavior* in your program. +/// +/// Instead you must use the [`ptr::addr_of_mut!`](addr_of_mut) +/// macro to create the pointer. You may use that returned pointer together with +/// this function. +/// +/// An example of how to do it and how this relates to `write_unaligned` is: +/// +/// ``` +/// #[repr(packed, C)] +/// struct Packed { +/// _padding: u8, +/// unaligned: u32, +/// } +/// +/// let mut packed: Packed = unsafe { std::mem::zeroed() }; +/// +/// // Take the address of a 32-bit integer which is not aligned. +/// // In contrast to `&packed.unaligned as *mut _`, this has no undefined behavior. +/// let unaligned = std::ptr::addr_of_mut!(packed.unaligned); +/// +/// unsafe { std::ptr::write_unaligned(unaligned, 42) }; +/// +/// assert_eq!({packed.unaligned}, 42); // `{...}` forces copying the field instead of creating a reference. +/// ``` +/// +/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however +/// (as can be seen in the `assert_eq!` above). +/// +/// # Examples +/// +/// Write a usize value to a byte buffer: +/// +/// ``` +/// use std::mem; +/// +/// fn write_usize(x: &mut [u8], val: usize) { +/// assert!(x.len() >= mem::size_of::()); +/// +/// let ptr = x.as_mut_ptr() as *mut usize; +/// +/// unsafe { ptr.write_unaligned(val) } +/// } +/// ``` +#[inline] +#[stable(feature = "ptr_unaligned", since = "1.17.0")] +#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub const unsafe fn write_unaligned(dst: *mut T, src: T) { + // SAFETY: the caller must guarantee that `dst` is valid for writes. + // `dst` cannot overlap `src` because the caller has mutable access + // to `dst` while `src` is owned by this function. + unsafe { + copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::()); + // We are calling the intrinsic directly to avoid function calls in the generated code. + intrinsics::forget(src); + } +} + +/// Performs a volatile read of the value from `src` without moving it. This +/// leaves the memory in `src` unchanged. +/// +/// Volatile operations are intended to act on I/O memory, and are guaranteed +/// to not be elided or reordered by the compiler across other volatile +/// operations. +/// +/// # Notes +/// +/// Rust does not currently have a rigorously and formally defined memory model, +/// so the precise semantics of what "volatile" means here is subject to change +/// over time. That being said, the semantics will almost always end up pretty +/// similar to [C11's definition of volatile][c11]. +/// +/// The compiler shouldn't change the relative order or number of volatile +/// memory operations. However, volatile memory operations on zero-sized types +/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops +/// and may be ignored. +/// +/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `src` must be [valid] for reads. +/// +/// * `src` must be properly aligned. +/// +/// * `src` must point to a properly initialized value of type `T`. +/// +/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned +/// value and the value at `*src` can [violate memory safety][read-ownership]. +/// However, storing non-[`Copy`] types in volatile memory is almost certainly +/// incorrect. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// [read-ownership]: read#ownership-of-the-returned-value +/// +/// Just like in C, whether an operation is volatile has no bearing whatsoever +/// on questions involving concurrent access from multiple threads. Volatile +/// accesses behave exactly like non-atomic accesses in that regard. In particular, +/// a race between a `read_volatile` and any write operation to the same location +/// is undefined behavior. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let x = 12; +/// let y = &x as *const i32; +/// +/// unsafe { +/// assert_eq!(std::ptr::read_volatile(y), 12); +/// } +/// ``` +#[inline] +#[stable(feature = "volatile", since = "1.9.0")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub unsafe fn read_volatile(src: *const T) -> T { + // SAFETY: the caller must uphold the safety contract for `volatile_load`. + unsafe { + assert_unsafe_precondition!(is_aligned_and_not_null(src)); + intrinsics::volatile_load(src) + } +} + +/// Performs a volatile write of a memory location with the given value without +/// reading or dropping the old value. +/// +/// Volatile operations are intended to act on I/O memory, and are guaranteed +/// to not be elided or reordered by the compiler across other volatile +/// operations. +/// +/// `write_volatile` does not drop the contents of `dst`. This is safe, but it +/// could leak allocations or resources, so care should be taken not to overwrite +/// an object that should be dropped. +/// +/// Additionally, it does not drop `src`. Semantically, `src` is moved into the +/// location pointed to by `dst`. +/// +/// # Notes +/// +/// Rust does not currently have a rigorously and formally defined memory model, +/// so the precise semantics of what "volatile" means here is subject to change +/// over time. That being said, the semantics will almost always end up pretty +/// similar to [C11's definition of volatile][c11]. +/// +/// The compiler shouldn't change the relative order or number of volatile +/// memory operations. However, volatile memory operations on zero-sized types +/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops +/// and may be ignored. +/// +/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `dst` must be [valid] for writes. +/// +/// * `dst` must be properly aligned. +/// +/// Note that even if `T` has size `0`, the pointer must be non-null and properly aligned. +/// +/// [valid]: self#safety +/// +/// Just like in C, whether an operation is volatile has no bearing whatsoever +/// on questions involving concurrent access from multiple threads. Volatile +/// accesses behave exactly like non-atomic accesses in that regard. In particular, +/// a race between a `write_volatile` and any other operation (reading or writing) +/// on the same location is undefined behavior. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let mut x = 0; +/// let y = &mut x as *mut i32; +/// let z = 12; +/// +/// unsafe { +/// std::ptr::write_volatile(y, z); +/// assert_eq!(std::ptr::read_volatile(y), 12); +/// } +/// ``` +#[inline] +#[stable(feature = "volatile", since = "1.9.0")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub unsafe fn write_volatile(dst: *mut T, src: T) { + // SAFETY: the caller must uphold the safety contract for `volatile_store`. + unsafe { + assert_unsafe_precondition!(is_aligned_and_not_null(dst)); + intrinsics::volatile_store(dst, src); + } +} + +/// Align pointer `p`. +/// +/// Calculate offset (in terms of elements of `stride` stride) that has to be applied +/// to pointer `p` so that pointer `p` would get aligned to `a`. +/// +/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. +/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated +/// constants. +/// +/// If we ever decide to make it possible to call the intrinsic with `a` that is not a +/// power-of-two, it will probably be more prudent to just change to a naive implementation rather +/// than trying to adapt this to accommodate that change. +/// +/// Any questions go to @nagisa. +#[lang = "align_offset"] +pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { + // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <= + // 1, where the method versions of these operations are not inlined. + use intrinsics::{ + cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub, + wrapping_add, wrapping_mul, wrapping_sub, + }; + + /// Calculate multiplicative modular inverse of `x` modulo `m`. + /// + /// This implementation is tailored for `align_offset` and has following preconditions: + /// + /// * `m` is a power-of-two; + /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) + /// + /// Implementation of this function shall not panic. Ever. + #[inline] + unsafe fn mod_inv(x: usize, m: usize) -> usize { + /// Multiplicative modular inverse table modulo 2⁴ = 16. + /// + /// Note, that this table does not contain values where inverse does not exist (i.e., for + /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) + const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; + /// Modulo for which the `INV_TABLE_MOD_16` is intended. + const INV_TABLE_MOD: usize = 16; + /// INV_TABLE_MOD² + const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; + + let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize; + // SAFETY: `m` is required to be a power-of-two, hence non-zero. + let m_minus_one = unsafe { unchecked_sub(m, 1) }; + if m <= INV_TABLE_MOD { + table_inverse & m_minus_one + } else { + // We iterate "up" using the following formula: + // + // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ + // + // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. + let mut inverse = table_inverse; + let mut going_mod = INV_TABLE_MOD_SQUARED; + loop { + // y = y * (2 - xy) mod n + // + // Note, that we use wrapping operations here intentionally – the original formula + // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod + // usize::MAX` instead, because we take the result `mod n` at the end + // anyway. + inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse))); + if going_mod >= m { + return inverse & m_minus_one; + } + going_mod = wrapping_mul(going_mod, going_mod); + } + } + } + + let addr = p.addr(); + let stride = mem::size_of::(); + // SAFETY: `a` is a power-of-two, therefore non-zero. + let a_minus_one = unsafe { unchecked_sub(a, 1) }; + + if stride == 0 { + // SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will + // stay the same, so no offset will be able to align the pointer unless it is already + // aligned. This branch _will_ be optimized out as `stride` is known at compile-time. + let p_mod_a = addr & a_minus_one; + return if p_mod_a == 0 { 0 } else { usize::MAX }; + } + + // SAFETY: `stride == 0` case has been handled by the special case above. + let a_mod_stride = unsafe { unchecked_rem(a, stride) }; + if a_mod_stride == 0 { + // SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a + // pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte + // offset is not a multiple of `stride`, the input pointer was misaligned and no pointer + // offset will be able to produce a `p` aligned to the specified `a`. + // + // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions + // like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This + // redistributes operations around the load-bearing, but pessimizing `and` instruction + // sufficiently for LLVM to be able to utilize the various optimizations it knows about. + // + // LLVM handles the branch here particularly nicely. If this branch needs to be evaluated + // at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }` + // in a branch-free way and then bitwise-OR it with whatever result the `-p mod a` + // computation produces. + + // SAFETY: `stride == 0` case has been handled by the special case above. + let addr_mod_stride = unsafe { unchecked_rem(addr, stride) }; + + return if addr_mod_stride == 0 { + let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a); + let byte_offset = wrapping_sub(aligned_address, addr); + // SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because + // addr has been verified to be aligned to the original type’s alignment requirements. + unsafe { exact_div(byte_offset, stride) } + } else { + usize::MAX + }; + } + + // GENERAL_CASE: From here on we’re handling the very general case where `addr` may be + // misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an + // advantage of, etc. This case produces machine code that isn’t particularly high quality, + // compared to the special cases above. The code produced here is still within the realm of + // miracles, given the situations this case has to deal with. + + // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above. + let gcdpow = unsafe { cttz_nonzero(stride).min(cttz_nonzero(a)) }; + // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a usize. + let gcd = unsafe { unchecked_shl(1usize, gcdpow) }; + // SAFETY: gcd is always greater or equal to 1. + if addr & unsafe { unchecked_sub(gcd, 1) } == 0 { + // This branch solves for the following linear congruence equation: + // + // ` p + so = 0 mod a ` + // + // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the + // requested alignment. + // + // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by + // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to: + // + // ` p' + s'o = 0 mod a' ` + // ` o = (a' - (p' mod a')) * (s'^-1 mod a') ` + // + // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the + // second term is "how does incrementing `p` by `s` bytes change the relative alignment of + // `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well + // formed if `a` and `s` are not co-prime. + // + // Furthermore, the result produced by this solution is not "minimal", so it is necessary + // to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`. + + // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in + // `a`. + let a2 = unsafe { unchecked_shr(a, gcdpow) }; + // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits + // in `a` (of which it has exactly one). + let a2minus1 = unsafe { unchecked_sub(a2, 1) }; + // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in + // `a`. + let s2 = unsafe { unchecked_shr(stride & a_minus_one, gcdpow) }; + // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in + // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will + // always be strictly greater than `(p % a) >> gcdpow`. + let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(addr & a_minus_one, gcdpow)) }; + // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2` + // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`. + return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1; + } + + // Cannot be aligned at all. + usize::MAX +} + +/// Compares raw pointers for equality. +/// +/// This is the same as using the `==` operator, but less generic: +/// the arguments have to be `*const T` raw pointers, +/// not anything that implements `PartialEq`. +/// +/// This can be used to compare `&T` references (which coerce to `*const T` implicitly) +/// by their address rather than comparing the values they point to +/// (which is what the `PartialEq for &T` implementation does). +/// +/// # Examples +/// +/// ``` +/// use std::ptr; +/// +/// let five = 5; +/// let other_five = 5; +/// let five_ref = &five; +/// let same_five_ref = &five; +/// let other_five_ref = &other_five; +/// +/// assert!(five_ref == same_five_ref); +/// assert!(ptr::eq(five_ref, same_five_ref)); +/// +/// assert!(five_ref == other_five_ref); +/// assert!(!ptr::eq(five_ref, other_five_ref)); +/// ``` +/// +/// Slices are also compared by their length (fat pointers): +/// +/// ``` +/// let a = [1, 2, 3]; +/// assert!(std::ptr::eq(&a[..3], &a[..3])); +/// assert!(!std::ptr::eq(&a[..2], &a[..3])); +/// assert!(!std::ptr::eq(&a[0..2], &a[1..3])); +/// ``` +/// +/// Traits are also compared by their implementation: +/// +/// ``` +/// #[repr(transparent)] +/// struct Wrapper { member: i32 } +/// +/// trait Trait {} +/// impl Trait for Wrapper {} +/// impl Trait for i32 {} +/// +/// let wrapper = Wrapper { member: 10 }; +/// +/// // Pointers have equal addresses. +/// assert!(std::ptr::eq( +/// &wrapper as *const Wrapper as *const u8, +/// &wrapper.member as *const i32 as *const u8 +/// )); +/// +/// // Objects have equal addresses, but `Trait` has different implementations. +/// assert!(!std::ptr::eq( +/// &wrapper as &dyn Trait, +/// &wrapper.member as &dyn Trait, +/// )); +/// assert!(!std::ptr::eq( +/// &wrapper as &dyn Trait as *const dyn Trait, +/// &wrapper.member as &dyn Trait as *const dyn Trait, +/// )); +/// +/// // Converting the reference to a `*const u8` compares by address. +/// assert!(std::ptr::eq( +/// &wrapper as &dyn Trait as *const dyn Trait as *const u8, +/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8, +/// )); +/// ``` +#[stable(feature = "ptr_eq", since = "1.17.0")] +#[inline] +pub fn eq(a: *const T, b: *const T) -> bool { + a == b +} + +/// Hash a raw pointer. +/// +/// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly) +/// by its address rather than the value it points to +/// (which is what the `Hash for &T` implementation does). +/// +/// # Examples +/// +/// ``` +/// use std::collections::hash_map::DefaultHasher; +/// use std::hash::{Hash, Hasher}; +/// use std::ptr; +/// +/// let five = 5; +/// let five_ref = &five; +/// +/// let mut hasher = DefaultHasher::new(); +/// ptr::hash(five_ref, &mut hasher); +/// let actual = hasher.finish(); +/// +/// let mut hasher = DefaultHasher::new(); +/// (five_ref as *const i32).hash(&mut hasher); +/// let expected = hasher.finish(); +/// +/// assert_eq!(actual, expected); +/// ``` +#[stable(feature = "ptr_hash", since = "1.35.0")] +pub fn hash(hashee: *const T, into: &mut S) { + use crate::hash::Hash; + hashee.hash(into); +} + +// If this is a unary fn pointer, it adds a doc comment. +// Otherwise, it hides the docs entirely. +macro_rules! maybe_fnptr_doc { + (@ #[$meta:meta] $item:item) => { + #[doc(hidden)] + #[$meta] + $item + }; + ($a:ident @ #[$meta:meta] $item:item) => { + #[cfg_attr(not(bootstrap), doc(fake_variadic))] + #[doc = "This trait is implemented for function pointers with up to twelve arguments."] + #[$meta] + $item + }; + ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => { + #[doc(hidden)] + #[$meta] + $item + }; +} + +// FIXME(strict_provenance_magic): function pointers have buggy codegen that +// necessitates casting to a usize to get the backend to do the right thing. +// for now I will break AVR to silence *a billion* lints. We should probably +// have a proper "opaque function pointer type" to handle this kind of thing. + +// Impls for function pointers +macro_rules! fnptr_impls_safety_abi { + ($FnTy: ty, $($Arg: ident),*) => { + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl PartialEq for $FnTy { + #[inline] + fn eq(&self, other: &Self) -> bool { + *self as usize == *other as usize + } + } + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl Eq for $FnTy {} + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl PartialOrd for $FnTy { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + (*self as usize).partial_cmp(&(*other as usize)) + } + } + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl Ord for $FnTy { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + (*self as usize).cmp(&(*other as usize)) + } + } + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl hash::Hash for $FnTy { + fn hash(&self, state: &mut HH) { + state.write_usize(*self as usize) + } + } + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl fmt::Pointer for $FnTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::pointer_fmt_inner(*self as usize, f) + } + } + } + + maybe_fnptr_doc! { + $($Arg)* @ + #[stable(feature = "fnptr_impls", since = "1.4.0")] + impl fmt::Debug for $FnTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::pointer_fmt_inner(*self as usize, f) + } + } + } + } +} + +macro_rules! fnptr_impls_args { + ($($Arg: ident),+) => { + fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + }; + () => { + // No variadic functions with 0 parameters + fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } + fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } + fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } + fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } + }; +} + +fnptr_impls_args! {} +fnptr_impls_args! { T } +fnptr_impls_args! { A, B } +fnptr_impls_args! { A, B, C } +fnptr_impls_args! { A, B, C, D } +fnptr_impls_args! { A, B, C, D, E } +fnptr_impls_args! { A, B, C, D, E, F } +fnptr_impls_args! { A, B, C, D, E, F, G } +fnptr_impls_args! { A, B, C, D, E, F, G, H } +fnptr_impls_args! { A, B, C, D, E, F, G, H, I } +fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } +fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } +fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } + +/// Create a `const` raw pointer to a place, without creating an intermediate reference. +/// +/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned +/// and points to initialized data. For cases where those requirements do not hold, +/// raw pointers should be used instead. However, `&expr as *const _` creates a reference +/// before casting it to a raw pointer, and that reference is subject to the same rules +/// as all other references. This macro can create a raw pointer *without* creating +/// a reference first. +/// +/// Note, however, that the `expr` in `addr_of!(expr)` is still subject to all +/// the usual rules. In particular, `addr_of!(*ptr::null())` is Undefined +/// Behavior because it dereferences a null pointer. +/// +/// # Example +/// +/// ``` +/// use std::ptr; +/// +/// #[repr(packed)] +/// struct Packed { +/// f1: u8, +/// f2: u16, +/// } +/// +/// let packed = Packed { f1: 1, f2: 2 }; +/// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior! +/// let raw_f2 = ptr::addr_of!(packed.f2); +/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2); +/// ``` +/// +/// See [`addr_of_mut`] for how to create a pointer to unininitialized data. +/// Doing that with `addr_of` would not make much sense since one could only +/// read the data, and that would be Undefined Behavior. +#[stable(feature = "raw_ref_macros", since = "1.51.0")] +#[rustc_macro_transparency = "semitransparent"] +#[allow_internal_unstable(raw_ref_op)] +pub macro addr_of($place:expr) { + &raw const $place +} + +/// Create a `mut` raw pointer to a place, without creating an intermediate reference. +/// +/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned +/// and points to initialized data. For cases where those requirements do not hold, +/// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference +/// before casting it to a raw pointer, and that reference is subject to the same rules +/// as all other references. This macro can create a raw pointer *without* creating +/// a reference first. +/// +/// Note, however, that the `expr` in `addr_of_mut!(expr)` is still subject to all +/// the usual rules. In particular, `addr_of_mut!(*ptr::null_mut())` is Undefined +/// Behavior because it dereferences a null pointer. +/// +/// # Examples +/// +/// **Creating a pointer to unaligned data:** +/// +/// ``` +/// use std::ptr; +/// +/// #[repr(packed)] +/// struct Packed { +/// f1: u8, +/// f2: u16, +/// } +/// +/// let mut packed = Packed { f1: 1, f2: 2 }; +/// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior! +/// let raw_f2 = ptr::addr_of_mut!(packed.f2); +/// unsafe { raw_f2.write_unaligned(42); } +/// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference. +/// ``` +/// +/// **Creating a pointer to uninitialized data:** +/// +/// ```rust +/// use std::{ptr, mem::MaybeUninit}; +/// +/// struct Demo { +/// field: bool, +/// } +/// +/// let mut uninit = MaybeUninit::::uninit(); +/// // `&uninit.as_mut().field` would create a reference to an uninitialized `bool`, +/// // and thus be Undefined Behavior! +/// let f1_ptr = unsafe { ptr::addr_of_mut!((*uninit.as_mut_ptr()).field) }; +/// unsafe { f1_ptr.write(true); } +/// let init = unsafe { uninit.assume_init() }; +/// ``` +#[stable(feature = "raw_ref_macros", since = "1.51.0")] +#[rustc_macro_transparency = "semitransparent"] +#[allow_internal_unstable(raw_ref_op)] +pub macro addr_of_mut($place:expr) { + &raw mut $place +} diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs new file mode 100644 index 000000000..fc3dd2a9b --- /dev/null +++ b/library/core/src/ptr/mut_ptr.rs @@ -0,0 +1,1973 @@ +use super::*; +use crate::cmp::Ordering::{self, Equal, Greater, Less}; +use crate::intrinsics; +use crate::slice::{self, SliceIndex}; + +impl *mut T { + /// Returns `true` if the pointer is null. + /// + /// Note that unsized types have many possible null pointers, as only the + /// raw data pointer is considered, not their length, vtable, etc. + /// Therefore, two pointers that are null may still not compare equal to + /// each other. + /// + /// ## Behavior during const evaluation + /// + /// When this function is used during const evaluation, it may return `false` for pointers + /// that turn out to be null at runtime. Specifically, when a pointer to some memory + /// is offset beyond its bounds in such a way that the resulting pointer is null, + /// the function will still return `false`. There is no way for CTFE to know + /// the absolute position of that memory, so we cannot tell if the pointer is + /// null or not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// assert!(!ptr.is_null()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")] + #[inline] + pub const fn is_null(self) -> bool { + // Compare via a cast to a thin pointer, so fat pointers are only + // considering their "data" part for null-ness. + (self as *mut u8).guaranteed_eq(null_mut()) + } + + /// Casts to a pointer of another type. + #[stable(feature = "ptr_cast", since = "1.38.0")] + #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")] + #[inline(always)] + pub const fn cast(self) -> *mut U { + self as _ + } + + /// Use the pointer value in a new pointer of another type. + /// + /// In case `val` is a (fat) pointer to an unsized type, this operation + /// will ignore the pointer part, whereas for (thin) pointers to sized + /// types, this has the same effect as a simple cast. + /// + /// The resulting pointer will have provenance of `self`, i.e., for a fat + /// pointer, this operation is semantically the same as creating a new + /// fat pointer with the data pointer value of `self` but the metadata of + /// `val`. + /// + /// # Examples + /// + /// This function is primarily useful for allowing byte-wise pointer + /// arithmetic on potentially fat pointers: + /// + /// ``` + /// #![feature(set_ptr_value)] + /// # use core::fmt::Debug; + /// let mut arr: [i32; 3] = [1, 2, 3]; + /// let mut ptr = arr.as_mut_ptr() as *mut dyn Debug; + /// let thin = ptr as *mut u8; + /// unsafe { + /// ptr = thin.add(8).with_metadata_of(ptr); + /// # assert_eq!(*(ptr as *mut i32), 3); + /// println!("{:?}", &*ptr); // will print "3" + /// } + /// ``` + #[unstable(feature = "set_ptr_value", issue = "75091")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[inline] + pub fn with_metadata_of(self, mut val: *mut U) -> *mut U + where + U: ?Sized, + { + let target = &mut val as *mut *mut U as *mut *mut u8; + // SAFETY: In case of a thin pointer, this operations is identical + // to a simple assignment. In case of a fat pointer, with the current + // fat pointer layout implementation, the first field of such a + // pointer is always the data pointer, which is likewise assigned. + unsafe { *target = self as *mut u8 }; + val + } + + /// Changes constness without changing the type. + /// + /// This is a bit safer than `as` because it wouldn't silently change the type if the code is + /// refactored. + /// + /// While not strictly required (`*mut T` coerces to `*const T`), this is provided for symmetry + /// with [`cast_mut`] on `*const T` and may have documentation value if used instead of implicit + /// coercion. + /// + /// [`cast_mut`]: #method.cast_mut + #[unstable(feature = "ptr_const_cast", issue = "92675")] + #[rustc_const_unstable(feature = "ptr_const_cast", issue = "92675")] + pub const fn cast_const(self) -> *const T { + self as _ + } + + /// Casts a pointer to its raw bits. + /// + /// This is equivalent to `as usize`, but is more specific to enhance readability. + /// The inverse method is [`from_bits`](#method.from_bits-1). + /// + /// In particular, `*p as usize` and `p as usize` will both compile for + /// pointers to numeric types but do very different things, so using this + /// helps emphasize that reading the bits was intentional. + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_to_from_bits)] + /// let mut array = [13, 42]; + /// let mut it = array.iter_mut(); + /// let p0: *mut i32 = it.next().unwrap(); + /// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0); + /// let p1: *mut i32 = it.next().unwrap(); + /// assert_eq!(p1.to_bits() - p0.to_bits(), 4); + /// ``` + #[unstable(feature = "ptr_to_from_bits", issue = "91126")] + pub fn to_bits(self) -> usize + where + T: Sized, + { + self as usize + } + + /// Creates a pointer from its raw bits. + /// + /// This is equivalent to `as *mut T`, but is more specific to enhance readability. + /// The inverse method is [`to_bits`](#method.to_bits-1). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_to_from_bits)] + /// use std::ptr::NonNull; + /// let dangling: *mut u8 = NonNull::dangling().as_ptr(); + /// assert_eq!(<*mut u8>::from_bits(1), dangling); + /// ``` + #[unstable(feature = "ptr_to_from_bits", issue = "91126")] + pub fn from_bits(bits: usize) -> Self + where + T: Sized, + { + bits as Self + } + + /// Gets the "address" portion of the pointer. + /// + /// This is similar to `self as usize`, which semantically discards *provenance* and + /// *address-space* information. However, unlike `self as usize`, casting the returned address + /// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To + /// properly restore the lost information and obtain a dereferencable pointer, use + /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr]. + /// + /// If using those APIs is not possible because there is no way to preserve a pointer with the + /// required provenance, use [`expose_addr`][pointer::expose_addr] and + /// [`from_exposed_addr_mut`][from_exposed_addr_mut] instead. However, note that this makes + /// your code less portable and less amenable to tools that check for compliance with the Rust + /// memory model. + /// + /// On most platforms this will produce a value with the same bytes as the original + /// pointer, because all the bytes are dedicated to describing the address. + /// Platforms which need to store additional information in the pointer may + /// perform a change of representation to produce a value containing only the address + /// portion of the pointer. What that means is up to the platform to define. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such + /// might change in the future (including possibly weakening this so it becomes wholly + /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn addr(self) -> usize + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the + // provenance). + unsafe { mem::transmute(self) } + } + + /// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future + /// use in [`from_exposed_addr`][]. + /// + /// This is equivalent to `self as usize`, which semantically discards *provenance* and + /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit + /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can + /// later call [`from_exposed_addr_mut`][] to reconstitute the original pointer including its + /// provenance. (Reconstructing address space information, if required, is your responsibility.) + /// + /// Using this method means that code is *not* following Strict Provenance rules. Supporting + /// [`from_exposed_addr_mut`][] complicates specification and reasoning and may not be supported + /// by tools that help you to stay conformant with the Rust memory model, so it is recommended + /// to use [`addr`][pointer::addr] wherever possible. + /// + /// On most platforms this will produce a value with the same bytes as the original pointer, + /// because all the bytes are dedicated to describing the address. Platforms which need to store + /// additional information in the pointer may not support this operation, since the 'expose' + /// side-effect which is required for [`from_exposed_addr_mut`][] to work is typically not + /// available. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, see the + /// [module documentation][crate::ptr] for details. + /// + /// [`from_exposed_addr_mut`]: from_exposed_addr_mut + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn expose_addr(self) -> usize + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + self as usize + } + + /// Creates a new pointer with the given address. + /// + /// This performs the same operation as an `addr as ptr` cast, but copies + /// the *address-space* and *provenance* of `self` to the new pointer. + /// This allows us to dynamically preserve and propagate this important + /// information in a way that is otherwise impossible with a unary cast. + /// + /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset + /// `self` to the given address, and therefore has all the same capabilities and restrictions. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn with_addr(self, addr: usize) -> Self + where + T: Sized, + { + // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic. + // + // In the mean-time, this operation is defined to be "as if" it was + // a wrapping_offset, so we can emulate it as such. This should properly + // restore pointer provenance even under today's compiler. + let self_addr = self.addr() as isize; + let dest_addr = addr as isize; + let offset = dest_addr.wrapping_sub(self_addr); + + // This is the canonical desugarring of this operation + self.cast::().wrapping_offset(offset).cast::() + } + + /// Creates a new pointer by mapping `self`'s address to a new one. + /// + /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [module documentation][crate::ptr] for details. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self + where + T: Sized, + { + self.with_addr(f(self.addr())) + } + + /// Decompose a (possibly wide) pointer into its address and metadata components. + /// + /// The pointer can be later reconstructed with [`from_raw_parts_mut`]. + #[unstable(feature = "ptr_metadata", issue = "81513")] + #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] + #[inline] + pub const fn to_raw_parts(self) -> (*mut (), ::Metadata) { + (self.cast(), super::metadata(self)) + } + + /// Returns `None` if the pointer is null, or else returns a shared reference to + /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`] + /// must be used instead. + /// + /// For the mutable counterpart see [`as_mut`]. + /// + /// [`as_uninit_ref`]: #method.as_uninit_ref-1 + /// [`as_mut`]: #method.as_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * The pointer must point to an initialized instance of `T`. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// (The part about being initialized is not yet fully decided, but until + /// it is, the only safe approach is to ensure that they are indeed initialized.) + /// + /// [the module documentation]: crate::ptr#safety + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let ptr: *mut u8 = &mut 10u8 as *mut u8; + /// + /// unsafe { + /// if let Some(val_back) = ptr.as_ref() { + /// println!("We got back the value: {val_back}!"); + /// } + /// } + /// ``` + /// + /// # Null-unchecked version + /// + /// If you are sure the pointer can never be null and are looking for some kind of + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can + /// dereference the pointer directly. + /// + /// ``` + /// let ptr: *mut u8 = &mut 10u8 as *mut u8; + /// + /// unsafe { + /// let val_back = &*ptr; + /// println!("We got back the value: {val_back}!"); + /// } + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + #[inline] + pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> { + // SAFETY: the caller must guarantee that `self` is valid for a + // reference if it isn't null. + if self.is_null() { None } else { unsafe { Some(&*self) } } + } + + /// Returns `None` if the pointer is null, or else returns a shared reference to + /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require + /// that the value has to be initialized. + /// + /// For the mutable counterpart see [`as_uninit_mut`]. + /// + /// [`as_ref`]: #method.as_ref-1 + /// [`as_uninit_mut`]: #method.as_uninit_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// [the module documentation]: crate::ptr#safety + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(ptr_as_uninit)] + /// + /// let ptr: *mut u8 = &mut 10u8 as *mut u8; + /// + /// unsafe { + /// if let Some(val_back) = ptr.as_uninit_ref() { + /// println!("We got back the value: {}!", val_back.assume_init()); + /// } + /// } + /// ``` + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit> + where + T: Sized, + { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit) }) } + } + + /// Calculates the offset from a pointer. + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset, **in bytes**, cannot overflow an `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_offset`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_offset`]: #method.wrapping_offset + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.offset(1)); + /// println!("{}", *ptr.offset(2)); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn offset(self, count: isize) -> *mut T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + // The obtained pointer is valid for writes since the caller must + // guarantee that it points to the same allocated object as `self`. + unsafe { intrinsics::offset(self, count) as *mut T } + } + + /// Calculates the offset from a pointer in bytes. + /// + /// `count` is in units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [offset][pointer::offset] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_offset(self, count: isize) -> Self { + // SAFETY: the caller must uphold the safety contract for `offset`. + let this = unsafe { self.cast::().offset(count).cast::<()>() }; + from_raw_parts_mut::(this, metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`offset`], this method basically delays the requirement of staying within the + /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other + /// words, leaving the allocated object and then re-entering it later is permitted. + /// + /// [`offset`]: #method.offset + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements + /// let mut data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *mut u8 = data.as_mut_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_offset(6); + /// + /// while ptr != end_rounded_up { + /// unsafe { + /// *ptr = 0; + /// } + /// ptr = ptr.wrapping_offset(step); + /// } + /// assert_eq!(&data, &[0, 2, 0, 4, 0]); + /// ``` + #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + pub const fn wrapping_offset(self, count: isize) -> *mut T + where + T: Sized, + { + // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called. + unsafe { intrinsics::arith_offset(self, count) as *mut T } + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// + /// `count` is in units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method + /// for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_offset(self, count: isize) -> Self { + from_raw_parts_mut::( + self.cast::().wrapping_offset(count).cast::<()>(), + metadata(self), + ) + } + + /// Returns `None` if the pointer is null, or else returns a unique reference to + /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`] + /// must be used instead. + /// + /// For the shared counterpart see [`as_ref`]. + /// + /// [`as_uninit_mut`]: #method.as_uninit_mut + /// [`as_ref`]: #method.as_ref-1 + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * The pointer must point to an initialized instance of `T`. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// (The part about being initialized is not yet fully decided, but until + /// it is, the only safe approach is to ensure that they are indeed initialized.) + /// + /// [the module documentation]: crate::ptr#safety + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// let first_value = unsafe { ptr.as_mut().unwrap() }; + /// *first_value = 4; + /// # assert_eq!(s, [4, 2, 3]); + /// println!("{s:?}"); // It'll print: "[4, 2, 3]". + /// ``` + /// + /// # Null-unchecked version + /// + /// If you are sure the pointer can never be null and are looking for some kind of + /// `as_mut_unchecked` that returns the `&mut T` instead of `Option<&mut T>`, know that + /// you can dereference the pointer directly. + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// let first_value = unsafe { &mut *ptr }; + /// *first_value = 4; + /// # assert_eq!(s, [4, 2, 3]); + /// println!("{s:?}"); // It'll print: "[4, 2, 3]". + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + #[inline] + pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> { + // SAFETY: the caller must guarantee that `self` is be valid for + // a mutable reference if it isn't null. + if self.is_null() { None } else { unsafe { Some(&mut *self) } } + } + + /// Returns `None` if the pointer is null, or else returns a unique reference to + /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require + /// that the value has to be initialized. + /// + /// For the shared counterpart see [`as_uninit_ref`]. + /// + /// [`as_mut`]: #method.as_mut + /// [`as_uninit_ref`]: #method.as_uninit_ref-1 + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// + /// [the module documentation]: crate::ptr#safety + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_mut<'a>(self) -> Option<&'a mut MaybeUninit> + where + T: Sized, + { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + if self.is_null() { None } else { Some(unsafe { &mut *(self as *mut MaybeUninit) }) } + } + + /// Returns whether two pointers are guaranteed to be equal. + /// + /// At runtime this function behaves like `self == other`. + /// However, in some contexts (e.g., compile-time evaluation), + /// it is not always possible to determine equality of two pointers, so this function may + /// spuriously return `false` for pointers that later actually turn out to be equal. + /// But when it returns `true`, the pointers are guaranteed to be equal. + /// + /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer + /// comparisons for which both functions return `false`. + /// + /// [`guaranteed_ne`]: #method.guaranteed_ne + /// + /// The return value may change depending on the compiler version and unsafe code might not + /// rely on the result of this function for soundness. It is suggested to only use this function + /// for performance optimizations where spurious `false` return values by this function do not + /// affect the outcome, but just the performance. + /// The consequences of using this method to make runtime and compile-time code behave + /// differently have not been explored. This method should not be used to introduce such + /// differences, and it should also not be stabilized before we have a better understanding + /// of this issue. + #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[inline] + pub const fn guaranteed_eq(self, other: *mut T) -> bool + where + T: Sized, + { + intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _) + } + + /// Returns whether two pointers are guaranteed to be unequal. + /// + /// At runtime this function behaves like `self != other`. + /// However, in some contexts (e.g., compile-time evaluation), + /// it is not always possible to determine the inequality of two pointers, so this function may + /// spuriously return `false` for pointers that later actually turn out to be unequal. + /// But when it returns `true`, the pointers are guaranteed to be unequal. + /// + /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer + /// comparisons for which both functions return `false`. + /// + /// [`guaranteed_eq`]: #method.guaranteed_eq + /// + /// The return value may change depending on the compiler version and unsafe code might not + /// rely on the result of this function for soundness. It is suggested to only use this function + /// for performance optimizations where spurious `false` return values by this function do not + /// affect the outcome, but just the performance. + /// The consequences of using this method to make runtime and compile-time code behave + /// differently have not been explored. This method should not be used to introduce such + /// differences, and it should also not be stabilized before we have a better understanding + /// of this issue. + #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] + #[inline] + pub const unsafe fn guaranteed_ne(self, other: *mut T) -> bool + where + T: Sized, + { + intrinsics::ptr_guaranteed_ne(self as *const _, other as *const _) + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of T: the distance in bytes divided by `mem::size_of::()`. + /// + /// This function is the inverse of [`offset`]. + /// + /// [`offset`]: #method.offset-1 + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and other pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * Both pointers must be *derived from* a pointer to the same object. + /// (See below for an example.) + /// + /// * The distance between the pointers, in bytes, must be an exact multiple + /// of the size of `T`. + /// + /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. + /// + /// * The distance being in bounds cannot rely on "wrapping around" the address space. + /// + /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the + /// address space, so two pointers within some value of any Rust type `T` will always satisfy + /// the last two conditions. The standard library also generally ensures that allocations + /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they + /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())` + /// always satisfies the last two conditions. + /// + /// Most platforms fundamentally can't even construct such a large allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on + /// such large allocations either.) + /// + /// [`add`]: #method.add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut a = [0; 5]; + /// let ptr1: *mut i32 = &mut a[1]; + /// let ptr2: *mut i32 = &mut a[3]; + /// unsafe { + /// assert_eq!(ptr2.offset_from(ptr1), 2); + /// assert_eq!(ptr1.offset_from(ptr2), -2); + /// assert_eq!(ptr1.offset(2), ptr2); + /// assert_eq!(ptr2.offset(-2), ptr1); + /// } + /// ``` + /// + /// *Incorrect* usage: + /// + /// ```rust,no_run + /// let ptr1 = Box::into_raw(Box::new(0u8)); + /// let ptr2 = Box::into_raw(Box::new(1u8)); + /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize); + /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1. + /// let ptr2_other = (ptr1 as *mut u8).wrapping_offset(diff); + /// assert_eq!(ptr2 as usize, ptr2_other as usize); + /// // Since ptr2_other and ptr2 are derived from pointers to different objects, + /// // computing their offset is undefined behavior, even though + /// // they point to the same address! + /// unsafe { + /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior + /// } + /// ``` + #[stable(feature = "ptr_offset_from", since = "1.47.0")] + #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "92980")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn offset_from(self, origin: *const T) -> isize + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset_from`. + unsafe { (self as *const T).offset_from(origin) } + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of **bytes**. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [offset_from][pointer::offset_from] on it. See that method for + /// documentation and safety requirements. + /// + /// For non-`Sized` pointees this operation considers only the data pointers, + /// ignoring the metadata. + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize { + // SAFETY: the caller must uphold the safety contract for `offset_from`. + unsafe { self.cast::().offset_from(origin.cast::()) } + } + + /// Calculates the distance between two pointers, *where it's known that + /// `self` is equal to or greater than `origin`*. The returned value is in + /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// + /// This computes the same value that [`offset_from`](#method.offset_from) + /// would compute, but with the added precondition that that the offset is + /// guaranteed to be non-negative. This method is equivalent to + /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// but it provides slightly more information to the optimizer, which can + /// sometimes allow it to optimize slightly better with some backends. + /// + /// This method can be though of as recovering the `count` that was passed + /// to [`add`](#method.add) (or, with the parameters in the other order, + /// to [`sub`](#method.sub)). The following are all equivalent, assuming + /// that their safety preconditions are met: + /// ```rust + /// # #![feature(ptr_sub_ptr)] + /// # unsafe fn blah(ptr: *mut i32, origin: *mut i32, count: usize) -> bool { + /// ptr.sub_ptr(origin) == count + /// # && + /// origin.add(count) == ptr + /// # && + /// ptr.sub(count) == origin + /// # } + /// ``` + /// + /// # Safety + /// + /// - The distance between the pointers must be non-negative (`self >= origin`) + /// + /// - *All* the safety conditions of [`offset_from`](#method.offset_from) + /// apply to this method as well; see it for the full details. + /// + /// Importantly, despite the return type of this method being able to represent + /// a larger offset, it's still *not permitted* to pass pointers which differ + /// by more than `isize::MAX` *bytes*. As such, the result of this method will + /// always be less than or equal to `isize::MAX as usize`. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_sub_ptr)] + /// + /// let mut a = [0; 5]; + /// let p: *mut i32 = a.as_mut_ptr(); + /// unsafe { + /// let ptr1: *mut i32 = p.add(1); + /// let ptr2: *mut i32 = p.add(3); + /// + /// assert_eq!(ptr2.sub_ptr(ptr1), 2); + /// assert_eq!(ptr1.add(2), ptr2); + /// assert_eq!(ptr2.sub(2), ptr1); + /// assert_eq!(ptr2.sub_ptr(ptr2), 0); + /// } + /// + /// // This would be incorrect, as the pointers are not correctly ordered: + /// // ptr1.offset_from(ptr2) + #[unstable(feature = "ptr_sub_ptr", issue = "95892")] + #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn sub_ptr(self, origin: *const T) -> usize + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `sub_ptr`. + unsafe { (self as *const T).sub_ptr(origin) } + } + + /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset, **in bytes**, cannot overflow an `isize`. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a `usize`. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_add`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_add`]: #method.wrapping_add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.add(1) as char); + /// println!("{}", *ptr.add(2) as char); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn add(self, count: usize) -> Self + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { self.offset(count as isize) } + } + + /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`). + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [add][pointer::add] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_add(self, count: usize) -> Self { + // SAFETY: the caller must uphold the safety contract for `add`. + let this = unsafe { self.cast::().add(count).cast::<()>() }; + from_raw_parts_mut::(this, metadata(self)) + } + + /// Calculates the offset from a pointer (convenience for + /// `.offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and resulting pointer must be either in bounds or one + /// byte past the end of the same [allocated object]. + /// + /// * The computed offset cannot exceed `isize::MAX` **bytes**. + /// + /// * The offset being in bounds cannot rely on "wrapping around" the address + /// space. That is, the infinite-precision sum must fit in a usize. + /// + /// The compiler and standard library generally tries to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_sub`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// [`wrapping_sub`]: #method.wrapping_sub + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// + /// unsafe { + /// let end: *const u8 = s.as_ptr().add(3); + /// println!("{}", *end.sub(1) as char); + /// println!("{}", *end.sub(2) as char); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn sub(self, count: usize) -> Self + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { self.offset((count as isize).wrapping_neg()) } + } + + /// Calculates the offset from a pointer in bytes (convenience for + /// `.byte_offset((count as isize).wrapping_neg())`). + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [sub][pointer::sub] on it. See that method for documentation + /// and safety requirements. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn byte_sub(self, count: usize) -> Self { + // SAFETY: the caller must uphold the safety contract for `sub`. + let this = unsafe { self.cast::().sub(count).cast::<()>() }; + from_raw_parts_mut::(this, metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset(count as isize)`) + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`add`], this method basically delays the requirement of staying within the + /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the + /// allocated object and then re-entering it later is permitted. + /// + /// [`add`]: #method.add + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_add(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_add(step); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline(always)] + pub const fn wrapping_add(self, count: usize) -> Self + where + T: Sized, + { + self.wrapping_offset(count as isize) + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// (convenience for `.wrapping_byte_offset(count as isize)`) + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_add(self, count: usize) -> Self { + from_raw_parts_mut::(self.cast::().wrapping_add(count).cast::<()>(), metadata(self)) + } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`) + /// + /// `count` is in units of T; e.g., a `count` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// This operation itself is always safe, but using the resulting pointer is not. + /// + /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not + /// be used to read or write other allocated objects. + /// + /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z` + /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still + /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless + /// `x` and `y` point into the same allocated object. + /// + /// Compared to [`sub`], this method basically delays the requirement of staying within the + /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object + /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a + /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`] + /// can be optimized better and is thus preferable in performance-sensitive code. + /// + /// The delayed check only considers the value of the pointer that was dereferenced, not the + /// intermediate values used during the computation of the final result. For example, + /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the + /// allocated object and then re-entering it later is permitted. + /// + /// [`sub`]: #method.sub + /// [allocated object]: crate::ptr#allocated-object + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // Iterate using a raw pointer in increments of two elements (backwards) + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let start_rounded_down = ptr.wrapping_sub(2); + /// ptr = ptr.wrapping_add(4); + /// let step = 2; + /// // This loop prints "5, 3, 1, " + /// while ptr != start_rounded_down { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_sub(step); + /// } + /// ``` + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[must_use = "returns a new pointer rather than modifying its argument"] + #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")] + #[inline] + pub const fn wrapping_sub(self, count: usize) -> Self + where + T: Sized, + { + self.wrapping_offset((count as isize).wrapping_neg()) + } + + /// Calculates the offset from a pointer in bytes using wrapping arithmetic. + /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`) + /// + /// `count` is in units of bytes. + /// + /// This is purely a convenience for casting to a `u8` pointer and + /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation. + /// + /// For non-`Sized` pointees this operation changes only the data pointer, + /// leaving the metadata untouched. + #[must_use] + #[inline(always)] + #[unstable(feature = "pointer_byte_offsets", issue = "96283")] + #[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")] + pub const fn wrapping_byte_sub(self, count: usize) -> Self { + from_raw_parts_mut::(self.cast::().wrapping_sub(count).cast::<()>(), metadata(self)) + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// See [`ptr::read`] for safety concerns and examples. + /// + /// [`ptr::read`]: crate::ptr::read() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn read(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for ``. + unsafe { read(self) } + } + + /// Performs a volatile read of the value from `self` without moving it. This + /// leaves the memory in `self` unchanged. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// See [`ptr::read_volatile`] for safety concerns and examples. + /// + /// [`ptr::read_volatile`]: crate::ptr::read_volatile() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub unsafe fn read_volatile(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `read_volatile`. + unsafe { read_volatile(self) } + } + + /// Reads the value from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// Unlike `read`, the pointer may be unaligned. + /// + /// See [`ptr::read_unaligned`] for safety concerns and examples. + /// + /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn read_unaligned(self) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `read_unaligned`. + unsafe { read_unaligned(self) } + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *same* argument order as [`ptr::copy`]. + /// + /// See [`ptr::copy`] for safety concerns and examples. + /// + /// [`ptr::copy`]: crate::ptr::copy() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_to(self, dest: *mut T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy`. + unsafe { copy(self, dest, count) } + } + + /// Copies `count * size_of` bytes from `self` to `dest`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. + /// + /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. + /// + /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`. + unsafe { copy_nonoverlapping(self, dest, count) } + } + + /// Copies `count * size_of` bytes from `src` to `self`. The source + /// and destination may overlap. + /// + /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. + /// + /// See [`ptr::copy`] for safety concerns and examples. + /// + /// [`ptr::copy`]: crate::ptr::copy() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_from(self, src: *const T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy`. + unsafe { copy(src, self, count) } + } + + /// Copies `count * size_of` bytes from `src` to `self`. The source + /// and destination may *not* overlap. + /// + /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`]. + /// + /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. + /// + /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping() + #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`. + unsafe { copy_nonoverlapping(src, self, count) } + } + + /// Executes the destructor (if any) of the pointed-to value. + /// + /// See [`ptr::drop_in_place`] for safety concerns and examples. + /// + /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + pub unsafe fn drop_in_place(self) { + // SAFETY: the caller must uphold the safety contract for `drop_in_place`. + unsafe { drop_in_place(self) } + } + + /// Overwrites a memory location with the given value without reading or + /// dropping the old value. + /// + /// See [`ptr::write`] for safety concerns and examples. + /// + /// [`ptr::write`]: crate::ptr::write() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn write(self, val: T) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `write`. + unsafe { write(self, val) } + } + + /// Invokes memset on the specified pointer, setting `count * size_of::()` + /// bytes of memory starting at `self` to `val`. + /// + /// See [`ptr::write_bytes`] for safety concerns and examples. + /// + /// [`ptr::write_bytes`]: crate::ptr::write_bytes() + #[doc(alias = "memset")] + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn write_bytes(self, val: u8, count: usize) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `write_bytes`. + unsafe { write_bytes(self, val, count) } + } + + /// Performs a volatile write of a memory location with the given value without + /// reading or dropping the old value. + /// + /// Volatile operations are intended to act on I/O memory, and are guaranteed + /// to not be elided or reordered by the compiler across other volatile + /// operations. + /// + /// See [`ptr::write_volatile`] for safety concerns and examples. + /// + /// [`ptr::write_volatile`]: crate::ptr::write_volatile() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub unsafe fn write_volatile(self, val: T) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `write_volatile`. + unsafe { write_volatile(self, val) } + } + + /// Overwrites a memory location with the given value without reading or + /// dropping the old value. + /// + /// Unlike `write`, the pointer may be unaligned. + /// + /// See [`ptr::write_unaligned`] for safety concerns and examples. + /// + /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] + #[inline(always)] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub const unsafe fn write_unaligned(self, val: T) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `write_unaligned`. + unsafe { write_unaligned(self, val) } + } + + /// Replaces the value at `self` with `src`, returning the old + /// value, without dropping either. + /// + /// See [`ptr::replace`] for safety concerns and examples. + /// + /// [`ptr::replace`]: crate::ptr::replace() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[inline(always)] + pub unsafe fn replace(self, src: T) -> T + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `replace`. + unsafe { replace(self, src) } + } + + /// Swaps the values at two mutable locations of the same type, without + /// deinitializing either. They may overlap, unlike `mem::swap` which is + /// otherwise equivalent. + /// + /// See [`ptr::swap`] for safety concerns and examples. + /// + /// [`ptr::swap`]: crate::ptr::swap() + #[stable(feature = "pointer_methods", since = "1.26.0")] + #[rustc_const_unstable(feature = "const_swap", issue = "83163")] + #[inline(always)] + pub const unsafe fn swap(self, with: *mut T) + where + T: Sized, + { + // SAFETY: the caller must uphold the safety contract for `swap`. + unsafe { swap(self, with) } + } + + /// Computes the offset that needs to be applied to the pointer in order to make it aligned to + /// `align`. + /// + /// If it is not possible to align the pointer, the implementation returns + /// `usize::MAX`. It is permissible for the implementation to *always* + /// return `usize::MAX`. Only your algorithm's performance can depend + /// on getting a usable offset here, not its correctness. + /// + /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be + /// used with the `wrapping_add` method. + /// + /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go + /// beyond the allocation that the pointer points into. It is up to the caller to ensure that + /// the returned offset is correct in all terms other than alignment. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # fn foo(n: usize) { + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = x.as_ptr().add(n) as *const u8; + /// let offset = ptr.align_offset(align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.add(offset) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[stable(feature = "align_offset", since = "1.36.0")] + #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")] + pub const fn align_offset(self, align: usize) -> usize + where + T: Sized, + { + if !align.is_power_of_two() { + panic!("align_offset: align is not a power-of-two"); + } + + fn rt_impl(p: *mut T, align: usize) -> usize { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(p, align) } + } + + const fn ctfe_impl(_: *mut T, _: usize) -> usize { + usize::MAX + } + + // SAFETY: + // It is permissible for `align_offset` to always return `usize::MAX`, + // algorithm correctness can not depend on `align_offset` returning non-max values. + // + // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. + unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } + } + + /// Returns whether the pointer is properly aligned for `T`. + #[must_use] + #[inline] + #[unstable(feature = "pointer_is_aligned", issue = "96284")] + pub fn is_aligned(self) -> bool + where + T: Sized, + { + self.is_aligned_to(core::mem::align_of::()) + } + + /// Returns whether the pointer is aligned to `align`. + /// + /// For non-`Sized` pointees this operation considers only the data pointer, + /// ignoring the metadata. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two (this includes 0). + #[must_use] + #[inline] + #[unstable(feature = "pointer_is_aligned", issue = "96284")] + pub fn is_aligned_to(self, align: usize) -> bool { + if !align.is_power_of_two() { + panic!("is_aligned_to: align is not a power-of-two"); + } + + // SAFETY: `is_power_of_two()` will return `false` for zero. + unsafe { core::intrinsics::assume(align != 0) }; + + // Cast is needed for `T: !Sized` + self.cast::().addr() % align == 0 + } +} + +impl *mut [T] { + /// Returns the length of a raw slice. + /// + /// The returned value is the number of **elements**, not the number of bytes. + /// + /// This function is safe, even when the raw slice cannot be cast to a slice + /// reference because the pointer is null or unaligned. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_len)] + /// use std::ptr; + /// + /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3); + /// assert_eq!(slice.len(), 3); + /// ``` + #[inline(always)] + #[unstable(feature = "slice_ptr_len", issue = "71146")] + #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")] + pub const fn len(self) -> usize { + metadata(self) + } + + /// Returns `true` if the raw slice has a length of 0. + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_ptr_len)] + /// + /// let mut a = [1, 2, 3]; + /// let ptr = &mut a as *mut [_]; + /// assert!(!ptr.is_empty()); + /// ``` + #[inline(always)] + #[unstable(feature = "slice_ptr_len", issue = "71146")] + #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")] + pub const fn is_empty(self) -> bool { + self.len() == 0 + } + + /// Divides one mutable raw slice into two at an index. + /// + /// The first will contain all indices from `[0, mid)` (excluding + /// the index `mid` itself) and the second will contain all + /// indices from `[mid, len)` (excluding the index `len` itself). + /// + /// # Panics + /// + /// Panics if `mid > len`. + /// + /// # Safety + /// + /// `mid` must be [in-bounds] of the underlying [allocated object]. + /// Which means `self` must be dereferenceable and span a single allocation + /// that is at least `mid * size_of::()` bytes long. Not upholding these + /// requirements is *[undefined behavior]* even if the resulting pointers are not used. + /// + /// Since `len` being in-bounds it is not a safety invariant of `*mut [T]` the + /// safety requirements of this method are the same as for [`split_at_mut_unchecked`]. + /// The explicit bounds check is only as useful as `len` is correct. + /// + /// [`split_at_mut_unchecked`]: #method.split_at_mut_unchecked + /// [in-bounds]: #method.add + /// [allocated object]: crate::ptr#allocated-object + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// #![feature(raw_slice_split)] + /// #![feature(slice_ptr_get)] + /// + /// let mut v = [1, 0, 3, 0, 5, 6]; + /// let ptr = &mut v as *mut [_]; + /// unsafe { + /// let (left, right) = ptr.split_at_mut(2); + /// assert_eq!(&*left, [1, 0]); + /// assert_eq!(&*right, [3, 0, 5, 6]); + /// } + /// ``` + #[inline(always)] + #[track_caller] + #[unstable(feature = "raw_slice_split", issue = "95595")] + pub unsafe fn split_at_mut(self, mid: usize) -> (*mut [T], *mut [T]) { + assert!(mid <= self.len()); + // SAFETY: The assert above is only a safety-net as long as `self.len()` is correct + // The actual safety requirements of this function are the same as for `split_at_mut_unchecked` + unsafe { self.split_at_mut_unchecked(mid) } + } + + /// Divides one mutable raw slice into two at an index, without doing bounds checking. + /// + /// The first will contain all indices from `[0, mid)` (excluding + /// the index `mid` itself) and the second will contain all + /// indices from `[mid, len)` (excluding the index `len` itself). + /// + /// # Safety + /// + /// `mid` must be [in-bounds] of the underlying [allocated object]. + /// Which means `self` must be dereferenceable and span a single allocation + /// that is at least `mid * size_of::()` bytes long. Not upholding these + /// requirements is *[undefined behavior]* even if the resulting pointers are not used. + /// + /// [in-bounds]: #method.add + /// [out-of-bounds index]: #method.add + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// #![feature(raw_slice_split)] + /// + /// let mut v = [1, 0, 3, 0, 5, 6]; + /// // scoped to restrict the lifetime of the borrows + /// unsafe { + /// let ptr = &mut v as *mut [_]; + /// let (left, right) = ptr.split_at_mut_unchecked(2); + /// assert_eq!(&*left, [1, 0]); + /// assert_eq!(&*right, [3, 0, 5, 6]); + /// (&mut *left)[1] = 2; + /// (&mut *right)[1] = 4; + /// } + /// assert_eq!(v, [1, 2, 3, 4, 5, 6]); + /// ``` + #[inline(always)] + #[unstable(feature = "raw_slice_split", issue = "95595")] + pub unsafe fn split_at_mut_unchecked(self, mid: usize) -> (*mut [T], *mut [T]) { + let len = self.len(); + let ptr = self.as_mut_ptr(); + + // SAFETY: Caller must pass a valid pointer and an index that is in-bounds. + let tail = unsafe { ptr.add(mid) }; + ( + crate::ptr::slice_from_raw_parts_mut(ptr, mid), + crate::ptr::slice_from_raw_parts_mut(tail, len - mid), + ) + } + + /// Returns a raw pointer to the slice's buffer. + /// + /// This is equivalent to casting `self` to `*mut T`, but more type-safe. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_get)] + /// use std::ptr; + /// + /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3); + /// assert_eq!(slice.as_mut_ptr(), ptr::null_mut()); + /// ``` + #[inline(always)] + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")] + pub const fn as_mut_ptr(self) -> *mut T { + self as *mut T + } + + /// Returns a raw pointer to an element or subslice, without doing bounds + /// checking. + /// + /// Calling this method with an [out-of-bounds index] or when `self` is not dereferenceable + /// is *[undefined behavior]* even if the resulting pointer is not used. + /// + /// [out-of-bounds index]: #method.add + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_ptr_get)] + /// + /// let x = &mut [1, 2, 4] as *mut [i32]; + /// + /// unsafe { + /// assert_eq!(x.get_unchecked_mut(1), x.as_mut_ptr().add(1)); + /// } + /// ``` + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_slice_index", issue = "none")] + #[inline(always)] + pub const unsafe fn get_unchecked_mut(self, index: I) -> *mut I::Output + where + I: ~const SliceIndex<[T]>, + { + // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. + unsafe { index.get_unchecked_mut(self) } + } + + /// Returns `None` if the pointer is null, or else returns a shared slice to + /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require + /// that the value has to be initialized. + /// + /// For the mutable counterpart see [`as_uninit_slice_mut`]. + /// + /// [`as_ref`]: #method.as_ref-1 + /// [`as_uninit_slice_mut`]: #method.as_uninit_slice_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// and it must be properly aligned. This means in particular: + /// + /// * The entire memory range of this slice must be contained within a single [allocated object]! + /// Slices can never span across multiple allocated objects. + /// + /// * The pointer must be aligned even for zero-length slices. One + /// reason for this is that enum layout optimizations may rely on references + /// (including slices of any length) being aligned and non-null to distinguish + /// them from other data. You can obtain a pointer that is usable as `data` + /// for zero-length slices using [`NonNull::dangling()`]. + /// + /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// See also [`slice::from_raw_parts`][]. + /// + /// [valid]: crate::ptr#safety + /// [allocated object]: crate::ptr#allocated-object + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit]> { + if self.is_null() { + None + } else { + // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`. + Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit, self.len()) }) + } + } + + /// Returns `None` if the pointer is null, or else returns a unique slice to + /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require + /// that the value has to be initialized. + /// + /// For the shared counterpart see [`as_uninit_slice`]. + /// + /// [`as_mut`]: #method.as_mut + /// [`as_uninit_slice`]: #method.as_uninit_slice-1 + /// + /// # Safety + /// + /// When calling this method, you have to ensure that *either* the pointer is null *or* + /// all of the following is true: + /// + /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::()` + /// many bytes, and it must be properly aligned. This means in particular: + /// + /// * The entire memory range of this slice must be contained within a single [allocated object]! + /// Slices can never span across multiple allocated objects. + /// + /// * The pointer must be aligned even for zero-length slices. One + /// reason for this is that enum layout optimizations may rely on references + /// (including slices of any length) being aligned and non-null to distinguish + /// them from other data. You can obtain a pointer that is usable as `data` + /// for zero-length slices using [`NonNull::dangling()`]. + /// + /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// + /// See also [`slice::from_raw_parts_mut`][]. + /// + /// [valid]: crate::ptr#safety + /// [allocated object]: crate::ptr#allocated-object + #[inline] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_slice_mut<'a>(self) -> Option<&'a mut [MaybeUninit]> { + if self.is_null() { + None + } else { + // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`. + Some(unsafe { slice::from_raw_parts_mut(self as *mut MaybeUninit, self.len()) }) + } + } +} + +// Equality for pointers +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for *mut T { + #[inline(always)] + fn eq(&self, other: &*mut T) -> bool { + *self == *other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Eq for *mut T {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for *mut T { + #[inline] + fn cmp(&self, other: &*mut T) -> Ordering { + if self < other { + Less + } else if self == other { + Equal + } else { + Greater + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for *mut T { + #[inline(always)] + fn partial_cmp(&self, other: &*mut T) -> Option { + Some(self.cmp(other)) + } + + #[inline(always)] + fn lt(&self, other: &*mut T) -> bool { + *self < *other + } + + #[inline(always)] + fn le(&self, other: &*mut T) -> bool { + *self <= *other + } + + #[inline(always)] + fn gt(&self, other: &*mut T) -> bool { + *self > *other + } + + #[inline(always)] + fn ge(&self, other: &*mut T) -> bool { + *self >= *other + } +} diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs new file mode 100644 index 000000000..f3ef094cb --- /dev/null +++ b/library/core/src/ptr/non_null.rs @@ -0,0 +1,802 @@ +use crate::cmp::Ordering; +use crate::convert::From; +use crate::fmt; +use crate::hash; +use crate::marker::Unsize; +use crate::mem::{self, MaybeUninit}; +use crate::num::NonZeroUsize; +use crate::ops::{CoerceUnsized, DispatchFromDyn}; +use crate::ptr::Unique; +use crate::slice::{self, SliceIndex}; + +/// `*mut T` but non-zero and [covariant]. +/// +/// This is often the correct thing to use when building data structures using +/// raw pointers, but is ultimately more dangerous to use because of its additional +/// properties. If you're not sure if you should use `NonNull`, just use `*mut T`! +/// +/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer +/// is never dereferenced. This is so that enums may use this forbidden value +/// as a discriminant -- `Option>` has the same size as `*mut T`. +/// However the pointer may still dangle if it isn't dereferenced. +/// +/// Unlike `*mut T`, `NonNull` was chosen to be covariant over `T`. This makes it +/// possible to use `NonNull` when building covariant types, but introduces the +/// risk of unsoundness if used in a type that shouldn't actually be covariant. +/// (The opposite choice was made for `*mut T` even though technically the unsoundness +/// could only be caused by calling unsafe functions.) +/// +/// Covariance is correct for most safe abstractions, such as `Box`, `Rc`, `Arc`, `Vec`, +/// and `LinkedList`. This is the case because they provide a public API that follows the +/// normal shared XOR mutable rules of Rust. +/// +/// If your type cannot safely be covariant, you must ensure it contains some +/// additional field to provide invariance. Often this field will be a [`PhantomData`] +/// type like `PhantomData>` or `PhantomData<&'a mut T>`. +/// +/// Notice that `NonNull` has a `From` instance for `&T`. However, this does +/// not change the fact that mutating through a (pointer derived from a) shared +/// reference is undefined behavior unless the mutation happens inside an +/// [`UnsafeCell`]. The same goes for creating a mutable reference from a shared +/// reference. When using this `From` instance without an `UnsafeCell`, +/// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr` +/// is never used for mutation. +/// +/// [covariant]: https://doc.rust-lang.org/reference/subtyping.html +/// [`PhantomData`]: crate::marker::PhantomData +/// [`UnsafeCell`]: crate::cell::UnsafeCell +#[stable(feature = "nonnull", since = "1.25.0")] +#[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(1)] +#[rustc_nonnull_optimization_guaranteed] +pub struct NonNull { + pointer: *const T, +} + +/// `NonNull` pointers are not `Send` because the data they reference may be aliased. +// N.B., this impl is unnecessary, but should provide better error messages. +#[stable(feature = "nonnull", since = "1.25.0")] +impl !Send for NonNull {} + +/// `NonNull` pointers are not `Sync` because the data they reference may be aliased. +// N.B., this impl is unnecessary, but should provide better error messages. +#[stable(feature = "nonnull", since = "1.25.0")] +impl !Sync for NonNull {} + +impl NonNull { + /// Creates a new `NonNull` that is dangling, but well-aligned. + /// + /// This is useful for initializing types which lazily allocate, like + /// `Vec::new` does. + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let ptr = NonNull::::dangling(); + /// // Important: don't try to access the value of `ptr` without + /// // initializing it first! The pointer is not null but isn't valid either! + /// ``` + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_stable(feature = "const_nonnull_dangling", since = "1.36.0")] + #[must_use] + #[inline] + pub const fn dangling() -> Self { + // SAFETY: mem::align_of() returns a non-zero usize which is then casted + // to a *mut T. Therefore, `ptr` is not null and the conditions for + // calling new_unchecked() are respected. + unsafe { + let ptr = crate::ptr::invalid_mut::(mem::align_of::()); + NonNull::new_unchecked(ptr) + } + } + + /// Returns a shared references to the value. In contrast to [`as_ref`], this does not require + /// that the value has to be initialized. + /// + /// For the mutable counterpart see [`as_uninit_mut`]. + /// + /// [`as_ref`]: NonNull::as_ref + /// [`as_uninit_mut`]: NonNull::as_uninit_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// [the module documentation]: crate::ptr#safety + #[inline] + #[must_use] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_ref<'a>(self) -> &'a MaybeUninit { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + unsafe { &*self.cast().as_ptr() } + } + + /// Returns a unique references to the value. In contrast to [`as_mut`], this does not require + /// that the value has to be initialized. + /// + /// For the shared counterpart see [`as_uninit_ref`]. + /// + /// [`as_mut`]: NonNull::as_mut + /// [`as_uninit_ref`]: NonNull::as_uninit_ref + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// + /// [the module documentation]: crate::ptr#safety + #[inline] + #[must_use] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_mut<'a>(self) -> &'a mut MaybeUninit { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + unsafe { &mut *self.cast().as_ptr() } + } +} + +impl NonNull { + /// Creates a new `NonNull`. + /// + /// # Safety + /// + /// `ptr` must be non-null. + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let ptr = unsafe { NonNull::new_unchecked(&mut x as *mut _) }; + /// ``` + /// + /// *Incorrect* usage of this function: + /// + /// ```rust,no_run + /// use std::ptr::NonNull; + /// + /// // NEVER DO THAT!!! This is undefined behavior. ⚠️ + /// let ptr = unsafe { NonNull::::new_unchecked(std::ptr::null_mut()) }; + /// ``` + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.25.0")] + #[inline] + pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { + // SAFETY: the caller must guarantee that `ptr` is non-null. + unsafe { NonNull { pointer: ptr as _ } } + } + + /// Creates a new `NonNull` if `ptr` is non-null. + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let ptr = NonNull::::new(&mut x as *mut _).expect("ptr is null!"); + /// + /// if let Some(ptr) = NonNull::::new(std::ptr::null_mut()) { + /// unreachable!(); + /// } + /// ``` + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_unstable(feature = "const_nonnull_new", issue = "93235")] + #[inline] + pub const fn new(ptr: *mut T) -> Option { + if !ptr.is_null() { + // SAFETY: The pointer is already checked and is not null + Some(unsafe { Self::new_unchecked(ptr) }) + } else { + None + } + } + + /// Performs the same functionality as [`std::ptr::from_raw_parts`], except that a + /// `NonNull` pointer is returned, as opposed to a raw `*const` pointer. + /// + /// See the documentation of [`std::ptr::from_raw_parts`] for more details. + /// + /// [`std::ptr::from_raw_parts`]: crate::ptr::from_raw_parts + #[unstable(feature = "ptr_metadata", issue = "81513")] + #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] + #[inline] + pub const fn from_raw_parts( + data_address: NonNull<()>, + metadata: ::Metadata, + ) -> NonNull { + // SAFETY: The result of `ptr::from::raw_parts_mut` is non-null because `data_address` is. + unsafe { + NonNull::new_unchecked(super::from_raw_parts_mut(data_address.as_ptr(), metadata)) + } + } + + /// Decompose a (possibly wide) pointer into its address and metadata components. + /// + /// The pointer can be later reconstructed with [`NonNull::from_raw_parts`]. + #[unstable(feature = "ptr_metadata", issue = "81513")] + #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_raw_parts(self) -> (NonNull<()>, ::Metadata) { + (self.cast(), super::metadata(self.as_ptr())) + } + + /// Gets the "address" portion of the pointer. + /// + /// For more details see the equivalent method on a raw pointer, [`pointer::addr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [`ptr` module documentation][crate::ptr]. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn addr(self) -> NonZeroUsize + where + T: Sized, + { + // SAFETY: The pointer is guaranteed by the type to be non-null, + // meaning that the address will be non-zero. + unsafe { NonZeroUsize::new_unchecked(self.pointer.addr()) } + } + + /// Creates a new pointer with the given address. + /// + /// For more details see the equivalent method on a raw pointer, [`pointer::with_addr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [`ptr` module documentation][crate::ptr]. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn with_addr(self, addr: NonZeroUsize) -> Self + where + T: Sized, + { + // SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero. + unsafe { NonNull::new_unchecked(self.pointer.with_addr(addr.get()) as *mut _) } + } + + /// Creates a new pointer by mapping `self`'s address to a new one. + /// + /// For more details see the equivalent method on a raw pointer, [`pointer::map_addr`]. + /// + /// This API and its claimed semantics are part of the Strict Provenance experiment, + /// see the [`ptr` module documentation][crate::ptr]. + #[must_use] + #[inline] + #[unstable(feature = "strict_provenance", issue = "95228")] + pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self + where + T: Sized, + { + self.with_addr(f(self.addr())) + } + + /// Acquires the underlying `*mut` pointer. + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let ptr = NonNull::new(&mut x).expect("ptr is null!"); + /// + /// let x_value = unsafe { *ptr.as_ptr() }; + /// assert_eq!(x_value, 0); + /// + /// unsafe { *ptr.as_ptr() += 2; } + /// let x_value = unsafe { *ptr.as_ptr() }; + /// assert_eq!(x_value, 2); + /// ``` + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")] + #[must_use] + #[inline] + pub const fn as_ptr(self) -> *mut T { + self.pointer as *mut T + } + + /// Returns a shared reference to the value. If the value may be uninitialized, [`as_uninit_ref`] + /// must be used instead. + /// + /// For the mutable counterpart see [`as_mut`]. + /// + /// [`as_uninit_ref`]: NonNull::as_uninit_ref + /// [`as_mut`]: NonNull::as_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * The pointer must point to an initialized instance of `T`. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// (The part about being initialized is not yet fully decided, but until + /// it is, the only safe approach is to ensure that they are indeed initialized.) + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let ptr = NonNull::new(&mut x as *mut _).expect("ptr is null!"); + /// + /// let ref_x = unsafe { ptr.as_ref() }; + /// println!("{ref_x}"); + /// ``` + /// + /// [the module documentation]: crate::ptr#safety + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + #[must_use] + #[inline] + pub const unsafe fn as_ref<'a>(&self) -> &'a T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + unsafe { &*self.as_ptr() } + } + + /// Returns a unique reference to the value. If the value may be uninitialized, [`as_uninit_mut`] + /// must be used instead. + /// + /// For the shared counterpart see [`as_ref`]. + /// + /// [`as_uninit_mut`]: NonNull::as_uninit_mut + /// [`as_ref`]: NonNull::as_ref + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be properly aligned. + /// + /// * It must be "dereferenceable" in the sense defined in [the module documentation]. + /// + /// * The pointer must point to an initialized instance of `T`. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// (The part about being initialized is not yet fully decided, but until + /// it is, the only safe approach is to ensure that they are indeed initialized.) + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let mut ptr = NonNull::new(&mut x).expect("null pointer"); + /// + /// let x_ref = unsafe { ptr.as_mut() }; + /// assert_eq!(*x_ref, 0); + /// *x_ref += 2; + /// assert_eq!(*x_ref, 2); + /// ``` + /// + /// [the module documentation]: crate::ptr#safety + #[stable(feature = "nonnull", since = "1.25.0")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + #[must_use] + #[inline] + pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a mutable reference. + unsafe { &mut *self.as_ptr() } + } + + /// Casts to a pointer of another type. + /// + /// # Examples + /// + /// ``` + /// use std::ptr::NonNull; + /// + /// let mut x = 0u32; + /// let ptr = NonNull::new(&mut x as *mut _).expect("null pointer"); + /// + /// let casted_ptr = ptr.cast::(); + /// let raw_ptr: *mut i8 = casted_ptr.as_ptr(); + /// ``` + #[stable(feature = "nonnull_cast", since = "1.27.0")] + #[rustc_const_stable(feature = "const_nonnull_cast", since = "1.36.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn cast(self) -> NonNull { + // SAFETY: `self` is a `NonNull` pointer which is necessarily non-null + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } + } +} + +impl NonNull<[T]> { + /// Creates a non-null raw slice from a thin pointer and a length. + /// + /// The `len` argument is the number of **elements**, not the number of bytes. + /// + /// This function is safe, but dereferencing the return value is unsafe. + /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements. + /// + /// # Examples + /// + /// ```rust + /// #![feature(nonnull_slice_from_raw_parts)] + /// + /// use std::ptr::NonNull; + /// + /// // create a slice pointer when starting out with a pointer to the first element + /// let mut x = [5, 6, 7]; + /// let nonnull_pointer = NonNull::new(x.as_mut_ptr()).unwrap(); + /// let slice = NonNull::slice_from_raw_parts(nonnull_pointer, 3); + /// assert_eq!(unsafe { slice.as_ref()[2] }, 7); + /// ``` + /// + /// (Note that this example artificially demonstrates a use of this method, + /// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.) + #[unstable(feature = "nonnull_slice_from_raw_parts", issue = "71941")] + #[rustc_const_unstable(feature = "const_nonnull_slice_from_raw_parts", issue = "71941")] + #[must_use] + #[inline] + pub const fn slice_from_raw_parts(data: NonNull, len: usize) -> Self { + // SAFETY: `data` is a `NonNull` pointer which is necessarily non-null + unsafe { Self::new_unchecked(super::slice_from_raw_parts_mut(data.as_ptr(), len)) } + } + + /// Returns the length of a non-null raw slice. + /// + /// The returned value is the number of **elements**, not the number of bytes. + /// + /// This function is safe, even when the non-null raw slice cannot be dereferenced to a slice + /// because the pointer does not have a valid address. + /// + /// # Examples + /// + /// ```rust + /// #![feature(nonnull_slice_from_raw_parts)] + /// use std::ptr::NonNull; + /// + /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); + /// assert_eq!(slice.len(), 3); + /// ``` + #[stable(feature = "slice_ptr_len_nonnull", since = "1.63.0")] + #[rustc_const_stable(feature = "const_slice_ptr_len_nonnull", since = "1.63.0")] + #[rustc_allow_const_fn_unstable(const_slice_ptr_len)] + #[must_use] + #[inline] + pub const fn len(self) -> usize { + self.as_ptr().len() + } + + /// Returns a non-null pointer to the slice's buffer. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)] + /// use std::ptr::NonNull; + /// + /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); + /// assert_eq!(slice.as_non_null_ptr(), NonNull::::dangling()); + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")] + pub const fn as_non_null_ptr(self) -> NonNull { + // SAFETY: We know `self` is non-null. + unsafe { NonNull::new_unchecked(self.as_ptr().as_mut_ptr()) } + } + + /// Returns a raw pointer to the slice's buffer. + /// + /// # Examples + /// + /// ```rust + /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)] + /// use std::ptr::NonNull; + /// + /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); + /// assert_eq!(slice.as_mut_ptr(), NonNull::::dangling().as_ptr()); + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")] + pub const fn as_mut_ptr(self) -> *mut T { + self.as_non_null_ptr().as_ptr() + } + + /// Returns a shared reference to a slice of possibly uninitialized values. In contrast to + /// [`as_ref`], this does not require that the value has to be initialized. + /// + /// For the mutable counterpart see [`as_uninit_slice_mut`]. + /// + /// [`as_ref`]: NonNull::as_ref + /// [`as_uninit_slice_mut`]: NonNull::as_uninit_slice_mut + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::()` many bytes, + /// and it must be properly aligned. This means in particular: + /// + /// * The entire memory range of this slice must be contained within a single allocated object! + /// Slices can never span across multiple allocated objects. + /// + /// * The pointer must be aligned even for zero-length slices. One + /// reason for this is that enum layout optimizations may rely on references + /// (including slices of any length) being aligned and non-null to distinguish + /// them from other data. You can obtain a pointer that is usable as `data` + /// for zero-length slices using [`NonNull::dangling()`]. + /// + /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get mutated (except inside `UnsafeCell`). + /// + /// This applies even if the result of this method is unused! + /// + /// See also [`slice::from_raw_parts`]. + /// + /// [valid]: crate::ptr#safety + #[inline] + #[must_use] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_slice<'a>(self) -> &'a [MaybeUninit] { + // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`. + unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) } + } + + /// Returns a unique reference to a slice of possibly uninitialized values. In contrast to + /// [`as_mut`], this does not require that the value has to be initialized. + /// + /// For the shared counterpart see [`as_uninit_slice`]. + /// + /// [`as_mut`]: NonNull::as_mut + /// [`as_uninit_slice`]: NonNull::as_uninit_slice + /// + /// # Safety + /// + /// When calling this method, you have to ensure that all of the following is true: + /// + /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::()` + /// many bytes, and it must be properly aligned. This means in particular: + /// + /// * The entire memory range of this slice must be contained within a single allocated object! + /// Slices can never span across multiple allocated objects. + /// + /// * The pointer must be aligned even for zero-length slices. One + /// reason for this is that enum layout optimizations may rely on references + /// (including slices of any length) being aligned and non-null to distinguish + /// them from other data. You can obtain a pointer that is usable as `data` + /// for zero-length slices using [`NonNull::dangling()`]. + /// + /// * The total size `ptr.len() * mem::size_of::()` of the slice must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is + /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. + /// In particular, while this reference exists, the memory the pointer points to must + /// not get accessed (read or written) through any other pointer. + /// + /// This applies even if the result of this method is unused! + /// + /// See also [`slice::from_raw_parts_mut`]. + /// + /// [valid]: crate::ptr#safety + /// + /// # Examples + /// + /// ```rust + /// #![feature(allocator_api, ptr_as_uninit)] + /// + /// use std::alloc::{Allocator, Layout, Global}; + /// use std::mem::MaybeUninit; + /// use std::ptr::NonNull; + /// + /// let memory: NonNull<[u8]> = Global.allocate(Layout::new::<[u8; 32]>())?; + /// // This is safe as `memory` is valid for reads and writes for `memory.len()` many bytes. + /// // Note that calling `memory.as_mut()` is not allowed here as the content may be uninitialized. + /// # #[allow(unused_variables)] + /// let slice: &mut [MaybeUninit] = unsafe { memory.as_uninit_slice_mut() }; + /// # Ok::<_, std::alloc::AllocError>(()) + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "ptr_as_uninit", issue = "75402")] + #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")] + pub const unsafe fn as_uninit_slice_mut<'a>(self) -> &'a mut [MaybeUninit] { + // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`. + unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) } + } + + /// Returns a raw pointer to an element or subslice, without doing bounds + /// checking. + /// + /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable + /// is *[undefined behavior]* even if the resulting pointer is not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)] + /// use std::ptr::NonNull; + /// + /// let x = &mut [1, 2, 4]; + /// let x = NonNull::slice_from_raw_parts(NonNull::new(x.as_mut_ptr()).unwrap(), x.len()); + /// + /// unsafe { + /// assert_eq!(x.get_unchecked_mut(1).as_ptr(), x.as_non_null_ptr().as_ptr().add(1)); + /// } + /// ``` + #[unstable(feature = "slice_ptr_get", issue = "74265")] + #[rustc_const_unstable(feature = "const_slice_index", issue = "none")] + #[inline] + pub const unsafe fn get_unchecked_mut(self, index: I) -> NonNull + where + I: ~const SliceIndex<[T]>, + { + // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. + // As a consequence, the resulting pointer cannot be null. + unsafe { NonNull::new_unchecked(self.as_ptr().get_unchecked_mut(index)) } + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +#[rustc_const_unstable(feature = "const_clone", issue = "91805")] +impl const Clone for NonNull { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl Copy for NonNull {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl CoerceUnsized> for NonNull where T: Unsize {} + +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl DispatchFromDyn> for NonNull where T: Unsize {} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl fmt::Debug for NonNull { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl fmt::Pointer for NonNull { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl Eq for NonNull {} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl PartialEq for NonNull { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.as_ptr() == other.as_ptr() + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl Ord for NonNull { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + self.as_ptr().cmp(&other.as_ptr()) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl PartialOrd for NonNull { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + self.as_ptr().partial_cmp(&other.as_ptr()) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl hash::Hash for NonNull { + #[inline] + fn hash(&self, state: &mut H) { + self.as_ptr().hash(state) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +#[rustc_const_unstable(feature = "const_convert", issue = "88674")] +impl const From> for NonNull { + #[inline] + fn from(unique: Unique) -> Self { + // SAFETY: A Unique pointer cannot be null, so the conditions for + // new_unchecked() are respected. + unsafe { NonNull::new_unchecked(unique.as_ptr()) } + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +#[rustc_const_unstable(feature = "const_convert", issue = "88674")] +impl const From<&mut T> for NonNull { + /// Converts a `&mut T` to a `NonNull`. + /// + /// This conversion is safe and infallible since references cannot be null. + #[inline] + fn from(reference: &mut T) -> Self { + // SAFETY: A mutable reference cannot be null. + unsafe { NonNull { pointer: reference as *mut T } } + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +#[rustc_const_unstable(feature = "const_convert", issue = "88674")] +impl const From<&T> for NonNull { + /// Converts a `&T` to a `NonNull`. + /// + /// This conversion is safe and infallible since references cannot be null. + #[inline] + fn from(reference: &T) -> Self { + // SAFETY: A reference cannot be null, so the conditions for + // new_unchecked() are respected. + unsafe { NonNull { pointer: reference as *const T } } + } +} diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs new file mode 100644 index 000000000..64616142b --- /dev/null +++ b/library/core/src/ptr/unique.rs @@ -0,0 +1,193 @@ +use crate::convert::From; +use crate::fmt; +use crate::marker::{PhantomData, Unsize}; +use crate::ops::{CoerceUnsized, DispatchFromDyn}; +use crate::ptr::NonNull; + +/// A wrapper around a raw non-null `*mut T` that indicates that the possessor +/// of this wrapper owns the referent. Useful for building abstractions like +/// `Box`, `Vec`, `String`, and `HashMap`. +/// +/// Unlike `*mut T`, `Unique` behaves "as if" it were an instance of `T`. +/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies +/// the kind of strong aliasing guarantees an instance of `T` can expect: +/// the referent of the pointer should not be modified without a unique path to +/// its owning Unique. +/// +/// If you're uncertain of whether it's correct to use `Unique` for your purposes, +/// consider using `NonNull`, which has weaker semantics. +/// +/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer +/// is never dereferenced. This is so that enums may use this forbidden value +/// as a discriminant -- `Option>` has the same size as `Unique`. +/// However the pointer may still dangle if it isn't dereferenced. +/// +/// Unlike `*mut T`, `Unique` is covariant over `T`. This should always be correct +/// for any type which upholds Unique's aliasing requirements. +#[unstable( + feature = "ptr_internals", + issue = "none", + reason = "use `NonNull` instead and consider `PhantomData` \ + (if you also use `#[may_dangle]`), `Send`, and/or `Sync`" +)] +#[doc(hidden)] +#[repr(transparent)] +pub struct Unique { + pointer: NonNull, + // NOTE: this marker has no consequences for variance, but is necessary + // for dropck to understand that we logically own a `T`. + // + // For details, see: + // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data + _marker: PhantomData, +} + +/// `Unique` pointers are `Send` if `T` is `Send` because the data they +/// reference is unaliased. Note that this aliasing invariant is +/// unenforced by the type system; the abstraction using the +/// `Unique` must enforce it. +#[unstable(feature = "ptr_internals", issue = "none")] +unsafe impl Send for Unique {} + +/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they +/// reference is unaliased. Note that this aliasing invariant is +/// unenforced by the type system; the abstraction using the +/// `Unique` must enforce it. +#[unstable(feature = "ptr_internals", issue = "none")] +unsafe impl Sync for Unique {} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl Unique { + /// Creates a new `Unique` that is dangling, but well-aligned. + /// + /// This is useful for initializing types which lazily allocate, like + /// `Vec::new` does. + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + #[must_use] + #[inline] + pub const fn dangling() -> Self { + Self::from(NonNull::dangling()) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl Unique { + /// Creates a new `Unique`. + /// + /// # Safety + /// + /// `ptr` must be non-null. + #[inline] + pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { + // SAFETY: the caller must guarantee that `ptr` is non-null. + unsafe { Unique { pointer: NonNull::new_unchecked(ptr), _marker: PhantomData } } + } + + /// Creates a new `Unique` if `ptr` is non-null. + #[inline] + pub const fn new(ptr: *mut T) -> Option { + if let Some(pointer) = NonNull::new(ptr) { + Some(Unique { pointer, _marker: PhantomData }) + } else { + None + } + } + + /// Acquires the underlying `*mut` pointer. + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub const fn as_ptr(self) -> *mut T { + self.pointer.as_ptr() + } + + /// Dereferences the content. + /// + /// The resulting lifetime is bound to self so this behaves "as if" + /// it were actually an instance of T that is getting borrowed. If a longer + /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. + #[must_use] + #[inline] + pub const unsafe fn as_ref(&self) -> &T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + unsafe { self.pointer.as_ref() } + } + + /// Mutably dereferences the content. + /// + /// The resulting lifetime is bound to self so this behaves "as if" + /// it were actually an instance of T that is getting borrowed. If a longer + /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. + #[must_use] + #[inline] + pub const unsafe fn as_mut(&mut self) -> &mut T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a mutable reference. + unsafe { self.pointer.as_mut() } + } + + /// Casts to a pointer of another type. + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub const fn cast(self) -> Unique { + Unique::from(self.pointer.cast()) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +#[rustc_const_unstable(feature = "const_clone", issue = "91805")] +impl const Clone for Unique { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl Copy for Unique {} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl CoerceUnsized> for Unique where T: Unsize {} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl DispatchFromDyn> for Unique where T: Unsize {} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl fmt::Debug for Unique { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl fmt::Pointer for Unique { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl const From<&mut T> for Unique { + /// Converts a `&mut T` to a `Unique`. + /// + /// This conversion is infallible since references cannot be null. + #[inline] + fn from(reference: &mut T) -> Self { + Self::from(NonNull::from(reference)) + } +} + +#[unstable(feature = "ptr_internals", issue = "none")] +impl const From> for Unique { + /// Converts a `NonNull` to a `Unique`. + /// + /// This conversion is infallible since `NonNull` cannot be null. + #[inline] + fn from(pointer: NonNull) -> Self { + Unique { pointer, _marker: PhantomData } + } +} -- cgit v1.2.3