summaryrefslogtreecommitdiffstats
path: root/library/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /library/core
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core')
-rw-r--r--library/core/benches/iter.rs52
-rw-r--r--library/core/benches/slice.rs9
-rw-r--r--library/core/src/alloc/mod.rs5
-rw-r--r--library/core/src/any.rs31
-rw-r--r--library/core/src/array/mod.rs23
-rw-r--r--library/core/src/cell.rs6
-rw-r--r--library/core/src/convert/mod.rs5
-rw-r--r--library/core/src/default.rs2
-rw-r--r--library/core/src/ffi/c_str.rs19
-rw-r--r--library/core/src/ffi/mod.rs32
-rw-r--r--library/core/src/future/poll_fn.rs87
-rw-r--r--library/core/src/intrinsics.rs54
-rw-r--r--library/core/src/iter/adapters/flatten.rs12
-rw-r--r--library/core/src/iter/adapters/step_by.rs411
-rw-r--r--library/core/src/iter/range.rs25
-rw-r--r--library/core/src/iter/sources/successors.rs2
-rw-r--r--library/core/src/iter/traits/iterator.rs8
-rw-r--r--library/core/src/iter/traits/marker.rs2
-rw-r--r--library/core/src/lib.rs1
-rw-r--r--library/core/src/macros/mod.rs2
-rw-r--r--library/core/src/marker.rs75
-rw-r--r--library/core/src/mem/mod.rs8
-rw-r--r--library/core/src/mem/transmutability.rs7
-rw-r--r--library/core/src/net/ip_addr.rs10
-rw-r--r--library/core/src/num/f32.rs1
-rw-r--r--library/core/src/num/f64.rs1
-rw-r--r--library/core/src/num/mod.rs17
-rw-r--r--library/core/src/num/nonzero.rs21
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/try_trait.rs45
-rw-r--r--library/core/src/option.rs22
-rw-r--r--library/core/src/panicking.rs5
-rw-r--r--library/core/src/primitive_docs.rs2
-rw-r--r--library/core/src/ptr/const_ptr.rs11
-rw-r--r--library/core/src/ptr/metadata.rs3
-rw-r--r--library/core/src/ptr/mod.rs1
-rw-r--r--library/core/src/ptr/mut_ptr.rs22
-rw-r--r--library/core/src/ptr/unique.rs2
-rw-r--r--library/core/src/slice/index.rs5
-rw-r--r--library/core/src/slice/iter/macros.rs33
-rw-r--r--library/core/src/slice/mod.rs17
-rw-r--r--library/core/src/slice/raw.rs24
-rw-r--r--library/core/src/str/converts.rs4
-rw-r--r--library/core/src/str/mod.rs40
-rw-r--r--library/core/src/task/wake.rs40
-rw-r--r--library/core/src/tuple.rs25
-rwxr-xr-xlibrary/core/src/unicode/printable.py2
-rw-r--r--library/core/tests/array.rs15
-rw-r--r--library/core/tests/clone.rs2
-rw-r--r--library/core/tests/future.rs1
-rw-r--r--library/core/tests/iter/adapters/step_by.rs55
-rw-r--r--library/core/tests/lib.rs2
-rw-r--r--library/core/tests/manually_drop.rs2
-rw-r--r--library/core/tests/mem.rs11
-rw-r--r--library/core/tests/net/ip_addr.rs6
-rw-r--r--library/core/tests/net/socket_addr.rs2
-rw-r--r--library/core/tests/ptr.rs2
57 files changed, 992 insertions, 339 deletions
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
index 60ef83223..5ec22e514 100644
--- a/library/core/benches/iter.rs
+++ b/library/core/benches/iter.rs
@@ -2,6 +2,7 @@ use core::borrow::Borrow;
use core::iter::*;
use core::mem;
use core::num::Wrapping;
+use core::ops::Range;
use test::{black_box, Bencher};
#[bench]
@@ -69,6 +70,57 @@ fn bench_max(b: &mut Bencher) {
})
}
+#[bench]
+fn bench_range_step_by_sum_reducible(b: &mut Bencher) {
+ let r = 0u32..1024;
+ b.iter(|| {
+ let r = black_box(r.clone()).step_by(8);
+
+ let mut sum: u32 = 0;
+ for i in r {
+ sum += i;
+ }
+
+ sum
+ })
+}
+
+#[bench]
+fn bench_range_step_by_loop_u32(b: &mut Bencher) {
+ let r = 0..(u16::MAX as u32);
+ b.iter(|| {
+ let r = black_box(r.clone()).step_by(64);
+
+ let mut sum: u32 = 0;
+ for i in r {
+ let i = i ^ i.wrapping_sub(1);
+ sum = sum.wrapping_add(i);
+ }
+
+ sum
+ })
+}
+
+#[bench]
+fn bench_range_step_by_fold_usize(b: &mut Bencher) {
+ let r: Range<usize> = 0..(u16::MAX as usize);
+ b.iter(|| {
+ let r = black_box(r.clone());
+ r.step_by(64)
+ .map(|x: usize| x ^ (x.wrapping_sub(1)))
+ .fold(0usize, |acc, i| acc.wrapping_add(i))
+ })
+}
+
+#[bench]
+fn bench_range_step_by_fold_u16(b: &mut Bencher) {
+ let r: Range<u16> = 0..u16::MAX;
+ b.iter(|| {
+ let r = black_box(r.clone());
+ r.step_by(64).map(|x: u16| x ^ (x.wrapping_sub(1))).fold(0u16, |acc, i| acc.wrapping_add(i))
+ })
+}
+
pub fn copy_zip(xs: &[u8], ys: &mut [u8]) {
for (a, b) in ys.iter_mut().zip(xs) {
*a = *b;
diff --git a/library/core/benches/slice.rs b/library/core/benches/slice.rs
index 9b86a0ca9..3bfb35e68 100644
--- a/library/core/benches/slice.rs
+++ b/library/core/benches/slice.rs
@@ -1,3 +1,4 @@
+use core::ptr::NonNull;
use test::black_box;
use test::Bencher;
@@ -162,3 +163,11 @@ fn fill_byte_sized(b: &mut Bencher) {
black_box(slice.fill(black_box(NewType(42))));
});
}
+
+// Tests the ability of the compiler to recognize that only the last slice item is needed
+// based on issue #106288
+#[bench]
+fn fold_to_last(b: &mut Bencher) {
+ let slice: &[i32] = &[0; 1024];
+ b.iter(|| black_box(slice).iter().fold(None, |_, r| Some(NonNull::from(r))));
+}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
index d6ae2b821..78091c017 100644
--- a/library/core/src/alloc/mod.rs
+++ b/library/core/src/alloc/mod.rs
@@ -94,8 +94,9 @@ impl fmt::Display for AllocError {
///
/// # Safety
///
-/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
-/// until the instance and all of its copies and clones are dropped,
+/// * Memory blocks returned from an allocator that are [*currently allocated*] must point to
+/// valid memory and retain their validity while they are [*currently allocated*] and at
+/// least one of the instance and all of its clones has not been dropped.
///
/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this
/// allocator. A copied or cloned allocator must behave like the same allocator, and
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 7969f4055..09f52d692 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -153,6 +153,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::fmt;
+use crate::hash;
use crate::intrinsics;
///////////////////////////////////////////////////////////////////////////////
@@ -662,10 +663,10 @@ impl dyn Any + Send + Sync {
/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
/// noting that the hashes and ordering will vary between Rust releases. Beware
/// of relying on them inside of your code!
-#[derive(Clone, Copy, Debug, Hash, Eq, PartialOrd, Ord)]
+#[derive(Clone, Copy, Debug, Eq, PartialOrd, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TypeId {
- t: u64,
+ t: u128,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -696,7 +697,31 @@ impl TypeId {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
pub const fn of<T: ?Sized + 'static>() -> TypeId {
- TypeId { t: intrinsics::type_id::<T>() }
+ #[cfg(bootstrap)]
+ let t = intrinsics::type_id::<T>() as u128;
+ #[cfg(not(bootstrap))]
+ let t: u128 = intrinsics::type_id::<T>();
+ TypeId { t }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for TypeId {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ // We only hash the lower 64 bits of our (128 bit) internal numeric ID,
+ // because:
+ // - The hashing algorithm which backs `TypeId` is expected to be
+ // unbiased and high quality, meaning further mixing would be somewhat
+ // redundant compared to choosing (the lower) 64 bits arbitrarily.
+ // - `Hasher::finish` returns a u64 anyway, so the extra entropy we'd
+ // get from hashing the full value would probably not be useful
+ // (especially given the previous point about the lower 64 bits being
+ // high quality on their own).
+ // - It is correct to do so -- only hashing a subset of `self` is still
+ // with an `Eq` implementation that considers the entire value, as
+ // ours does.
+ (self.t as u64).hash(state);
}
}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index fec92320a..76b3589b9 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -538,29 +538,6 @@ impl<T, const N: usize> [T; N] {
drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f)))
}
- /// 'Zips up' two arrays into a single array of pairs.
- ///
- /// `zip()` returns a new array where every element is a tuple where the
- /// first element comes from the first array, and the second element comes
- /// from the second array. In other words, it zips two arrays together,
- /// into a single one.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(array_zip)]
- /// let x = [1, 2, 3];
- /// let y = [4, 5, 6];
- /// let z = x.zip(y);
- /// assert_eq!(z, [(1, 4), (2, 5), (3, 6)]);
- /// ```
- #[unstable(feature = "array_zip", issue = "80094")]
- pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] {
- drain_array_with(self, |lhs| {
- drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs)))
- })
- }
-
/// Returns a slice containing the entire array. Equivalent to `&s[..]`.
#[stable(feature = "array_as_slice", since = "1.57.0")]
#[rustc_const_stable(feature = "array_as_slice", since = "1.57.0")]
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 744767aae..909b32547 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -59,7 +59,7 @@
//! [`borrow`](`RefCell::borrow`), and a mutable borrow (`&mut T`) can be obtained with
//! [`borrow_mut`](`RefCell::borrow_mut`). When these functions are called, they first verify that
//! Rust's borrow rules will be satisfied: any number of immutable borrows are allowed or a
-//! single immutable borrow is allowed, but never both. If a borrow is attempted that would violate
+//! single mutable borrow is allowed, but never both. If a borrow is attempted that would violate
//! these rules, the thread will panic.
//!
//! The corresponding [`Sync`] version of `RefCell<T>` is [`RwLock<T>`].
@@ -1374,7 +1374,7 @@ impl Clone for BorrowRef<'_> {
debug_assert!(is_reading(borrow));
// Prevent the borrow counter from overflowing into
// a writing borrow.
- assert!(borrow != isize::MAX);
+ assert!(borrow != BorrowFlag::MAX);
self.borrow.set(borrow + 1);
BorrowRef { borrow: self.borrow }
}
@@ -1756,7 +1756,7 @@ impl<'b> BorrowRefMut<'b> {
let borrow = self.borrow.get();
debug_assert!(is_writing(borrow));
// Prevent the borrow counter from underflowing.
- assert!(borrow != isize::MIN);
+ assert!(borrow != BorrowFlag::MIN);
self.borrow.set(borrow - 1);
BorrowRefMut { borrow: self.borrow }
}
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index 38a6d1ccd..ff5a4c913 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -495,8 +495,7 @@ pub trait Into<T>: Sized {
/// By converting underlying error types to our own custom error type that encapsulates the
/// underlying error type, we can return a single error type without losing information on the
/// underlying cause. The '?' operator automatically converts the underlying error type to our
-/// custom error type by calling `Into<CliError>::into` which is automatically provided when
-/// implementing `From`. The compiler then infers which implementation of `Into` should be used.
+/// custom error type with `From::from`.
///
/// ```
/// use std::fs;
@@ -533,7 +532,7 @@ pub trait Into<T>: Sized {
#[rustc_diagnostic_item = "From"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(on(
- all(_Self = "&str", T = "std::string::String"),
+ all(_Self = "&str", any(T = "alloc::string::String", T = "std::string::String")),
note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
))]
pub trait From<T>: Sized {
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index 09dbc9581..1f7be85d3 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -190,7 +190,7 @@ macro_rules! default_impl {
($t:ty, $v:expr, $doc:tt) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for $t {
- #[inline]
+ #[inline(always)]
#[doc = $doc]
fn default() -> $t {
$v
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index e1e1a9b40..39f795c1f 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -81,7 +81,7 @@ use crate::str;
#[derive(Hash)]
#[stable(feature = "core_c_str", since = "1.64.0")]
#[rustc_has_incoherent_inherent_impls]
-#[cfg_attr(not(bootstrap), lang = "CStr")]
+#[lang = "CStr"]
// FIXME:
// `fn from` in `impl From<&CStr> for Box<CStr>` current implementation relies
// on `CStr` being layout-compatible with `[u8]`.
@@ -241,7 +241,7 @@ impl CStr {
/// ```
///
/// ```
- /// #![feature(const_cstr_methods)]
+ /// #![feature(const_cstr_from_ptr)]
///
/// use std::ffi::{c_char, CStr};
///
@@ -256,7 +256,7 @@ impl CStr {
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ #[rustc_const_unstable(feature = "const_cstr_from_ptr", issue = "101719")]
pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator of size less than `isize::MAX`, whose
@@ -377,7 +377,7 @@ impl CStr {
/// assert!(cstr.is_err());
/// ```
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
- #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ #[rustc_const_stable(feature = "const_cstr_methods", since = "1.72.0")]
pub const fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
let nul_pos = memchr::memchr(0, bytes);
match nul_pos {
@@ -561,10 +561,12 @@ impl CStr {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn to_bytes(&self) -> &[u8] {
+ #[rustc_const_stable(feature = "const_cstr_methods", since = "1.72.0")]
+ pub const fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
+ // FIXME(const-hack) replace with range index
// SAFETY: to_bytes_with_nul returns slice with length at least 1
- unsafe { bytes.get_unchecked(..bytes.len() - 1) }
+ unsafe { slice::from_raw_parts(bytes.as_ptr(), bytes.len() - 1) }
}
/// Converts this C string to a byte slice containing the trailing 0 byte.
@@ -588,7 +590,7 @@ impl CStr {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")]
+ #[rustc_const_stable(feature = "const_cstr_methods", since = "1.72.0")]
pub const fn to_bytes_with_nul(&self) -> &[u8] {
// SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s
// is safe on all supported targets.
@@ -612,7 +614,8 @@ impl CStr {
/// assert_eq!(cstr.to_str(), Ok("foo"));
/// ```
#[stable(feature = "cstr_to_str", since = "1.4.0")]
- pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
+ #[rustc_const_stable(feature = "const_cstr_methods", since = "1.72.0")]
+ pub const fn to_str(&self) -> Result<&str, str::Utf8Error> {
// N.B., when `CStr` is changed to perform the length check in `.to_bytes()`
// instead of in `from_ptr()`, it may be worth considering if this should
// be rewritten to do the UTF-8 check inline with the length calculation
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index b73abbbac..0488c8076 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -132,7 +132,12 @@ mod c_char_definition {
),
all(
target_os = "netbsd",
- any(target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc")
+ any(
+ target_arch = "aarch64",
+ target_arch = "arm",
+ target_arch = "powerpc",
+ target_arch = "riscv64"
+ )
),
all(
target_os = "vxworks",
@@ -202,7 +207,7 @@ mod c_long_definition {
// would be uninhabited and at least dereferencing such pointers would
// be UB.
#[doc = include_str!("c_void.md")]
-#[cfg_attr(not(bootstrap), lang = "c_void")]
+#[lang = "c_void"]
#[cfg_attr(not(doc), repr(u8))] // work around https://github.com/rust-lang/rust/issues/90435
#[stable(feature = "core_c_void", since = "1.30.0")]
pub enum c_void {
@@ -238,7 +243,7 @@ impl fmt::Debug for c_void {
not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
- all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
target_arch = "asmjs",
target_os = "uefi",
@@ -267,7 +272,7 @@ pub struct VaListImpl<'f> {
not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
- all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
target_arch = "asmjs",
target_os = "uefi",
@@ -292,7 +297,7 @@ impl<'f> fmt::Debug for VaListImpl<'f> {
/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
#[cfg(all(
target_arch = "aarch64",
- not(any(target_os = "macos", target_os = "ios")),
+ not(any(target_os = "macos", target_os = "ios", target_os = "tvos")),
not(target_os = "uefi"),
not(windows),
))]
@@ -389,7 +394,10 @@ pub struct VaList<'a, 'f: 'a> {
not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
- all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ all(
+ target_arch = "aarch64",
+ any(target_os = "macos", target_os = "ios", target_os = "tvos")
+ ),
target_family = "wasm",
target_arch = "asmjs",
target_os = "uefi",
@@ -404,7 +412,10 @@ pub struct VaList<'a, 'f: 'a> {
target_arch = "s390x",
target_arch = "x86_64"
),
- any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ any(
+ not(target_arch = "aarch64"),
+ not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
+ ),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
not(target_os = "uefi"),
@@ -422,7 +433,7 @@ pub struct VaList<'a, 'f: 'a> {
not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
- all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios", target_os = "tvos")),
target_family = "wasm",
target_arch = "asmjs",
target_os = "uefi",
@@ -449,7 +460,10 @@ impl<'f> VaListImpl<'f> {
target_arch = "s390x",
target_arch = "x86_64"
),
- any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ any(
+ not(target_arch = "aarch64"),
+ not(any(target_os = "macos", target_os = "ios", target_os = "tvos"))
+ ),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
not(target_os = "uefi"),
diff --git a/library/core/src/future/poll_fn.rs b/library/core/src/future/poll_fn.rs
index 90cb79739..d27a9dfc1 100644
--- a/library/core/src/future/poll_fn.rs
+++ b/library/core/src/future/poll_fn.rs
@@ -24,6 +24,93 @@ use crate::task::{Context, Poll};
/// assert_eq!(read_future.await, "Hello, World!".to_owned());
/// # }
/// ```
+///
+/// ## Capturing a pinned state
+///
+/// Example of a closure wrapping inner futures:
+///
+/// ```
+/// # async fn run() {
+/// use core::future::{self, Future};
+/// use core::task::Poll;
+///
+/// /// Resolves to the first future that completes. In the event of a tie, `a` wins.
+/// fn naive_select<T>(
+/// a: impl Future<Output = T>,
+/// b: impl Future<Output = T>,
+/// ) -> impl Future<Output = T>
+/// {
+/// let (mut a, mut b) = (Box::pin(a), Box::pin(b));
+/// future::poll_fn(move |cx| {
+/// if let Poll::Ready(r) = a.as_mut().poll(cx) {
+/// Poll::Ready(r)
+/// } else if let Poll::Ready(r) = b.as_mut().poll(cx) {
+/// Poll::Ready(r)
+/// } else {
+/// Poll::Pending
+/// }
+/// })
+/// }
+///
+/// let a = async { 42 };
+/// let b = future::pending();
+/// let v = naive_select(a, b).await;
+/// assert_eq!(v, 42);
+///
+/// let a = future::pending();
+/// let b = async { 27 };
+/// let v = naive_select(a, b).await;
+/// assert_eq!(v, 27);
+///
+/// let a = async { 42 };
+/// let b = async { 27 };
+/// let v = naive_select(a, b).await;
+/// assert_eq!(v, 42); // biased towards `a` in case of tie!
+/// # }
+/// ```
+///
+/// This time without [`Box::pin`]ning:
+///
+/// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin
+///
+/// ```
+/// # async fn run() {
+/// use core::future::{self, Future};
+/// use core::pin::pin;
+/// use core::task::Poll;
+///
+/// /// Resolves to the first future that completes. In the event of a tie, `a` wins.
+/// fn naive_select<T>(
+/// a: impl Future<Output = T>,
+/// b: impl Future<Output = T>,
+/// ) -> impl Future<Output = T>
+/// {
+/// async {
+/// let (mut a, mut b) = (pin!(a), pin!(b));
+/// future::poll_fn(move |cx| {
+/// if let Poll::Ready(r) = a.as_mut().poll(cx) {
+/// Poll::Ready(r)
+/// } else if let Poll::Ready(r) = b.as_mut().poll(cx) {
+/// Poll::Ready(r)
+/// } else {
+/// Poll::Pending
+/// }
+/// }).await
+/// }
+/// }
+///
+/// let a = async { 42 };
+/// let b = future::pending();
+/// let v = naive_select(a, b).await;
+/// assert_eq!(v, 42);
+/// # }
+/// ```
+///
+/// - Notice how, by virtue of being in an `async` context, we have been able to make the [`pin!`]
+/// macro work, thereby avoiding any need for the `unsafe`
+/// <code>[Pin::new_unchecked](&mut fut)</code> constructor.
+///
+/// [`pin!`]: crate::pin::pin!
#[stable(feature = "future_poll_fn", since = "1.64.0")]
pub fn poll_fn<T, F>(f: F) -> PollFn<F>
where
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index f5c5dd29f..5a9a7013a 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -1057,8 +1057,25 @@ extern "rust-intrinsic" {
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
#[rustc_safe_intrinsic]
#[rustc_nounwind]
+ #[cfg(bootstrap)]
pub fn type_id<T: ?Sized + 'static>() -> u64;
+ /// Gets an identifier which is globally unique to the specified type. This
+ /// function will return the same value for a type regardless of whichever
+ /// crate it is invoked in.
+ ///
+ /// Note that, unlike most intrinsics, this is safe to call;
+ /// it does not require an `unsafe` block.
+ /// Therefore, implementations must not require the user to uphold
+ /// any safety invariants.
+ ///
+ /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
+ #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ #[rustc_safe_intrinsic]
+ #[rustc_nounwind]
+ #[cfg(not(bootstrap))]
+ pub fn type_id<T: ?Sized + 'static>() -> u128;
+
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
/// This will statically either panic, or do nothing.
///
@@ -1385,7 +1402,6 @@ extern "rust-intrinsic" {
///
/// This is not expected to ever be exposed directly to users, rather it
/// may eventually be exposed through some more-constrained API.
- #[cfg(not(bootstrap))]
#[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
#[rustc_nounwind]
pub fn transmute_unchecked<Src, Dst>(src: Src) -> Dst;
@@ -1425,19 +1441,11 @@ extern "rust-intrinsic" {
/// returned value will result in undefined behavior.
///
/// The stabilized version of this intrinsic is [`pointer::offset`].
- #[cfg(not(bootstrap))]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[rustc_nounwind]
pub fn offset<Ptr, Delta>(dst: Ptr, offset: Delta) -> Ptr;
- /// The bootstrap version of this is more restricted.
- #[cfg(bootstrap)]
- #[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[rustc_nounwind]
- pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
-
/// Calculates the offset from a pointer, potentially wrapping.
///
/// This is implemented as an intrinsic to avoid converting to and from an
@@ -2270,7 +2278,6 @@ extern "rust-intrinsic" {
/// This intrinsic can *only* be called where the pointer is a local without
/// projections (`write_via_move(ptr, x)`, not `write_via_move(*ptr, x)`) so
/// that it trivially obeys runtime-MIR rules about derefs in operands.
- #[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
#[rustc_nounwind]
pub fn write_via_move<T>(ptr: *mut T, value: T);
@@ -2650,7 +2657,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
-#[inline]
+#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
@@ -2741,7 +2748,7 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
-#[inline]
+#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
@@ -2814,7 +2821,7 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_allowed_through_unstable_modules]
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
-#[inline]
+#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
extern "rust-intrinsic" {
@@ -2832,24 +2839,3 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
write_bytes(dst, val, count)
}
}
-
-/// Polyfill for bootstrap
-#[cfg(bootstrap)]
-pub const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst {
- use crate::mem::*;
- // SAFETY: It's a transmute -- the caller promised it's fine.
- unsafe { transmute_copy(&ManuallyDrop::new(src)) }
-}
-
-/// Polyfill for bootstrap
-#[cfg(bootstrap)]
-pub const unsafe fn write_via_move<T>(ptr: *mut T, value: T) {
- use crate::mem::*;
- // SAFETY: the caller must guarantee that `dst` is valid for writes.
- // `dst` cannot overlap `src` because the caller has mutable access
- // to `dst` while `src` is owned by this function.
- unsafe {
- copy_nonoverlapping::<T>(&value, ptr, 1);
- forget(value);
- }
-}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index 2568aaf34..d3e454563 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -310,7 +310,7 @@ where
/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
/// this type.
#[derive(Clone, Debug)]
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
struct FlattenCompat<I, U> {
iter: Fuse<I>,
frontiter: Option<U>,
@@ -464,7 +464,7 @@ where
}
}
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
impl<I, U> Iterator for FlattenCompat<I, U>
where
I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
@@ -579,7 +579,7 @@ where
}
}
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
impl<I, U> DoubleEndedIterator for FlattenCompat<I, U>
where
I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
@@ -649,7 +649,7 @@ where
}
}
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<const N: usize, I, T> TrustedLen
for FlattenCompat<I, <[T; N] as IntoIterator>::IntoIter>
where
@@ -657,7 +657,7 @@ where
{
}
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<'a, const N: usize, I, T> TrustedLen
for FlattenCompat<I, <&'a [T; N] as IntoIterator>::IntoIter>
where
@@ -665,7 +665,7 @@ where
{
}
-#[unstable(feature = "trusted_len", issue = "37572")]
+#[cfg_attr(bootstrap, unstable(feature = "trusted_len", issue = "37572"))]
unsafe impl<'a, const N: usize, I, T> TrustedLen
for FlattenCompat<I, <&'a mut [T; N] as IntoIterator>::IntoIter>
where
diff --git a/library/core/src/iter/adapters/step_by.rs b/library/core/src/iter/adapters/step_by.rs
index 4252c34a0..7f58f7d17 100644
--- a/library/core/src/iter/adapters/step_by.rs
+++ b/library/core/src/iter/adapters/step_by.rs
@@ -1,4 +1,9 @@
-use crate::{intrinsics, iter::from_fn, ops::Try};
+use crate::convert::TryFrom;
+use crate::{
+ intrinsics,
+ iter::{from_fn, TrustedLen},
+ ops::{Range, Try},
+};
/// An iterator for stepping iterators by a custom amount.
///
@@ -11,14 +16,22 @@ use crate::{intrinsics, iter::from_fn, ops::Try};
#[stable(feature = "iterator_step_by", since = "1.28.0")]
#[derive(Clone, Debug)]
pub struct StepBy<I> {
+ /// This field is guaranteed to be preprocessed by the specialized `SpecRangeSetup::setup`
+ /// in the constructor.
+ /// For most iterators that processing is a no-op, but for Range<{integer}> types it is lossy
+ /// which means the inner iterator cannot be returned to user code.
+ /// Additionally this type-dependent preprocessing means specialized implementations
+ /// cannot be used interchangeably.
iter: I,
step: usize,
first_take: bool,
}
impl<I> StepBy<I> {
+ #[inline]
pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> {
assert!(step != 0);
+ let iter = <I as SpecRangeSetup<I>>::setup(iter, step);
StepBy { iter, step: step - 1, first_take: true }
}
}
@@ -32,16 +45,174 @@ where
#[inline]
fn next(&mut self) -> Option<Self::Item> {
+ self.spec_next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.spec_size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.spec_nth(n)
+ }
+
+ fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.spec_try_fold(acc, f)
+ }
+
+ #[inline]
+ fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.spec_fold(acc, f)
+ }
+}
+
+impl<I> StepBy<I>
+where
+ I: ExactSizeIterator,
+{
+ // The zero-based index starting from the end of the iterator of the
+ // last element. Used in the `DoubleEndedIterator` implementation.
+ fn next_back_index(&self) -> usize {
+ let rem = self.iter.len() % (self.step + 1);
if self.first_take {
- self.first_take = false;
- self.iter.next()
+ if rem == 0 { self.step } else { rem - 1 }
} else {
- self.iter.nth(self.step)
+ rem
}
}
+}
+#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for StepBy<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
#[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.spec_next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.spec_nth_back(n)
+ }
+
+ fn try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ self.spec_try_rfold(init, f)
+ }
+
+ #[inline]
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.spec_rfold(init, f)
+ }
+}
+
+// StepBy can only make the iterator shorter, so the len will still fit.
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
+
+trait SpecRangeSetup<T> {
+ fn setup(inner: T, step: usize) -> T;
+}
+
+impl<T> SpecRangeSetup<T> for T {
+ #[inline]
+ default fn setup(inner: T, _step: usize) -> T {
+ inner
+ }
+}
+
+/// Specialization trait to optimize `StepBy<Range<{integer}>>` iteration.
+///
+/// # Safety
+///
+/// Technically this is safe to implement (look ma, no unsafe!), but in reality
+/// a lot of unsafe code relies on ranges over integers being correct.
+///
+/// For correctness *all* public StepBy methods must be specialized
+/// because `setup` drastically alters the meaning of the struct fields so that mixing
+/// different implementations would lead to incorrect results.
+unsafe trait StepByImpl<I> {
+ type Item;
+
+ fn spec_next(&mut self) -> Option<Self::Item>;
+
+ fn spec_size_hint(&self) -> (usize, Option<usize>);
+
+ fn spec_nth(&mut self, n: usize) -> Option<Self::Item>;
+
+ fn spec_try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>;
+
+ fn spec_fold<Acc, F>(self, acc: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc;
+}
+
+/// Specialization trait for double-ended iteration.
+///
+/// See also: `StepByImpl`
+///
+/// # Safety
+///
+/// The specializations must be implemented together with `StepByImpl`
+/// where applicable. I.e. if `StepBy` does support backwards iteration
+/// for a given iterator and that is specialized for forward iteration then
+/// it must also be specialized for backwards iteration.
+unsafe trait StepByBackImpl<I> {
+ type Item;
+
+ fn spec_next_back(&mut self) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator + ExactSizeIterator;
+
+ fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator + ExactSizeIterator;
+
+ fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, f: F) -> R
+ where
+ I: DoubleEndedIterator + ExactSizeIterator,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>;
+
+ fn spec_rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ I: DoubleEndedIterator + ExactSizeIterator,
+ F: FnMut(Acc, Self::Item) -> Acc;
+}
+
+unsafe impl<I: Iterator> StepByImpl<I> for StepBy<I> {
+ type Item = I::Item;
+
+ #[inline]
+ default fn spec_next(&mut self) -> Option<I::Item> {
+ let step_size = if self.first_take { 0 } else { self.step };
+ self.first_take = false;
+ self.iter.nth(step_size)
+ }
+
+ #[inline]
+ default fn spec_size_hint(&self) -> (usize, Option<usize>) {
#[inline]
fn first_size(step: usize) -> impl Fn(usize) -> usize {
move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) }
@@ -64,7 +235,7 @@ where
}
#[inline]
- fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> {
if self.first_take {
self.first_take = false;
let first = self.iter.next();
@@ -108,7 +279,7 @@ where
}
}
- fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
@@ -128,7 +299,7 @@ where
from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f)
}
- fn fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
+ default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
@@ -148,34 +319,16 @@ where
}
}
-impl<I> StepBy<I>
-where
- I: ExactSizeIterator,
-{
- // The zero-based index starting from the end of the iterator of the
- // last element. Used in the `DoubleEndedIterator` implementation.
- fn next_back_index(&self) -> usize {
- let rem = self.iter.len() % (self.step + 1);
- if self.first_take {
- if rem == 0 { self.step } else { rem - 1 }
- } else {
- rem
- }
- }
-}
+unsafe impl<I: DoubleEndedIterator + ExactSizeIterator> StepByBackImpl<I> for StepBy<I> {
+ type Item = I::Item;
-#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")]
-impl<I> DoubleEndedIterator for StepBy<I>
-where
- I: DoubleEndedIterator + ExactSizeIterator,
-{
#[inline]
- fn next_back(&mut self) -> Option<Self::Item> {
+ default fn spec_next_back(&mut self) -> Option<Self::Item> {
self.iter.nth_back(self.next_back_index())
}
#[inline]
- fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ default fn spec_nth_back(&mut self, n: usize) -> Option<I::Item> {
// `self.iter.nth_back(usize::MAX)` does the right thing here when `n`
// is out of bounds because the length of `self.iter` does not exceed
// `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is
@@ -184,7 +337,7 @@ where
self.iter.nth_back(n)
}
- fn try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
+ default fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
@@ -207,10 +360,10 @@ where
}
#[inline]
- fn rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ default fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
where
Self: Sized,
- F: FnMut(Acc, Self::Item) -> Acc,
+ F: FnMut(Acc, I::Item) -> Acc,
{
#[inline]
fn nth_back<I: DoubleEndedIterator>(
@@ -230,6 +383,192 @@ where
}
}
-// StepBy can only make the iterator shorter, so the len will still fit.
-#[stable(feature = "iterator_step_by", since = "1.28.0")]
-impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
+/// For these implementations, `SpecRangeSetup` calculates the number
+/// of iterations that will be needed and stores that in `iter.end`.
+///
+/// The various iterator implementations then rely on that to not need
+/// overflow checking, letting loops just be counted instead.
+///
+/// These only work for unsigned types, and will need to be reworked
+/// if you want to use it to specialize on signed types.
+///
+/// Currently these are only implemented for integers up to usize due to
+/// correctness issues around ExactSizeIterator impls on 16bit platforms.
+/// And since ExactSizeIterator is a prerequisite for backwards iteration
+/// and we must consistently specialize backwards and forwards iteration
+/// that makes the situation complicated enough that it's not covered
+/// for now.
+macro_rules! spec_int_ranges {
+ ($($t:ty)*) => ($(
+
+ const _: () = assert!(usize::BITS >= <$t>::BITS);
+
+ impl SpecRangeSetup<Range<$t>> for Range<$t> {
+ #[inline]
+ fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
+ let inner_len = r.size_hint().0;
+ // If step exceeds $t::MAX, then the count will be at most 1 and
+ // thus always fit into $t.
+ let yield_count = inner_len.div_ceil(step);
+ // Turn the range end into an iteration counter
+ r.end = yield_count as $t;
+ r
+ }
+ }
+
+ unsafe impl StepByImpl<Range<$t>> for StepBy<Range<$t>> {
+ #[inline]
+ fn spec_next(&mut self) -> Option<$t> {
+ // if a step size larger than the type has been specified fall back to
+ // t::MAX, in which case remaining will be at most 1.
+ // The `+ 1` can't overflow since the constructor substracted 1 from the original value.
+ let step = <$t>::try_from(self.step + 1).unwrap_or(<$t>::MAX);
+ let remaining = self.iter.end;
+ if remaining > 0 {
+ let val = self.iter.start;
+ // this can only overflow during the last step, after which the value
+ // will not be used
+ self.iter.start = val.wrapping_add(step);
+ self.iter.end = remaining - 1;
+ Some(val)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn spec_size_hint(&self) -> (usize, Option<usize>) {
+ let remaining = self.iter.end as usize;
+ (remaining, Some(remaining))
+ }
+
+ // The methods below are all copied from the Iterator trait default impls.
+ // We have to repeat them here so that the specialization overrides the StepByImpl defaults
+
+ #[inline]
+ fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.advance_by(n).ok()?;
+ self.next()
+ }
+
+ #[inline]
+ fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ #[inline]
+ fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc
+ {
+ // if a step size larger than the type has been specified fall back to
+ // t::MAX, in which case remaining will be at most 1.
+ let step = <$t>::try_from(self.step + 1).unwrap_or(<$t>::MAX);
+ let remaining = self.iter.end;
+ let mut acc = init;
+ let mut val = self.iter.start;
+ for _ in 0..remaining {
+ acc = f(acc, val);
+ // this can only overflow during the last step, after which the value
+ // will no longer be used
+ val = val.wrapping_add(step);
+ }
+ acc
+ }
+ }
+
+ /// Safety: This macro is only applied to ranges over types <= usize
+ /// which means the inner length is guaranteed to fit into a usize and so
+ /// the outer length calculation won't encounter clamped values
+ #[unstable(feature = "trusted_len", issue = "37572")]
+ unsafe impl TrustedLen for StepBy<Range<$t>> {}
+ )*)
+}
+
+macro_rules! spec_int_ranges_r {
+ ($($t:ty)*) => ($(
+ const _: () = assert!(usize::BITS >= <$t>::BITS);
+
+ unsafe impl StepByBackImpl<Range<$t>> for StepBy<Range<$t>> {
+
+ #[inline]
+ fn spec_next_back(&mut self) -> Option<Self::Item>
+ where Range<$t>: DoubleEndedIterator + ExactSizeIterator,
+ {
+ let step = (self.step + 1) as $t;
+ let remaining = self.iter.end;
+ if remaining > 0 {
+ let start = self.iter.start;
+ self.iter.end = remaining - 1;
+ Some(start + step * (remaining - 1))
+ } else {
+ None
+ }
+ }
+
+ // The methods below are all copied from the Iterator trait default impls.
+ // We have to repeat them here so that the specialization overrides the StepByImplBack defaults
+
+ #[inline]
+ fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>
+ where Self: DoubleEndedIterator,
+ {
+ if self.advance_back_by(n).is_err() {
+ return None;
+ }
+ self.next_back()
+ }
+
+ #[inline]
+ fn spec_try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
+ where
+ Self: DoubleEndedIterator,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ #[inline]
+ fn spec_rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ Self: DoubleEndedIterator,
+ F: FnMut(Acc, Self::Item) -> Acc
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+ }
+ )*)
+}
+
+#[cfg(target_pointer_width = "64")]
+spec_int_ranges!(u8 u16 u32 u64 usize);
+// DoubleEndedIterator requires ExactSizeIterator, which isn't implemented for Range<u64>
+#[cfg(target_pointer_width = "64")]
+spec_int_ranges_r!(u8 u16 u32 usize);
+
+#[cfg(target_pointer_width = "32")]
+spec_int_ranges!(u8 u16 u32 usize);
+#[cfg(target_pointer_width = "32")]
+spec_int_ranges_r!(u8 u16 u32 usize);
+
+#[cfg(target_pointer_width = "16")]
+spec_int_ranges!(u8 u16 usize);
+#[cfg(target_pointer_width = "16")]
+spec_int_ranges_r!(u8 u16 usize);
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index 0171d8981..462f7170a 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -619,9 +619,10 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
#[inline]
fn spec_next(&mut self) -> Option<T> {
if self.start < self.end {
+ let old = self.start;
// SAFETY: just checked precondition
- let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
- Some(mem::replace(&mut self.start, n))
+ self.start = unsafe { Step::forward_unchecked(old, 1) };
+ Some(old)
} else {
None
}
@@ -629,15 +630,15 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
#[inline]
fn spec_nth(&mut self, n: usize) -> Option<T> {
- if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ if let Some(plus_n) = Step::forward_checked(self.start, n) {
if plus_n < self.end {
// SAFETY: just checked precondition
- self.start = unsafe { Step::forward_unchecked(plus_n.clone(), 1) };
+ self.start = unsafe { Step::forward_unchecked(plus_n, 1) };
return Some(plus_n);
}
}
- self.start = self.end.clone();
+ self.start = self.end;
None
}
@@ -655,7 +656,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
// then steps_between either returns a bound to which we clamp or returns None which
// together with the initial inequality implies more than usize::MAX steps.
// Otherwise 0 is returned which always safe to use.
- self.start = unsafe { Step::forward_unchecked(self.start.clone(), taken) };
+ self.start = unsafe { Step::forward_unchecked(self.start, taken) };
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
@@ -664,8 +665,8 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
fn spec_next_back(&mut self) -> Option<T> {
if self.start < self.end {
// SAFETY: just checked precondition
- self.end = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
- Some(self.end.clone())
+ self.end = unsafe { Step::backward_unchecked(self.end, 1) };
+ Some(self.end)
} else {
None
}
@@ -673,15 +674,15 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
#[inline]
fn spec_nth_back(&mut self, n: usize) -> Option<T> {
- if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ if let Some(minus_n) = Step::backward_checked(self.end, n) {
if minus_n > self.start {
// SAFETY: just checked precondition
self.end = unsafe { Step::backward_unchecked(minus_n, 1) };
- return Some(self.end.clone());
+ return Some(self.end);
}
}
- self.end = self.start.clone();
+ self.end = self.start;
None
}
@@ -696,7 +697,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
let taken = available.min(n);
// SAFETY: same as the spec_advance_by() implementation
- self.end = unsafe { Step::backward_unchecked(self.end.clone(), taken) };
+ self.end = unsafe { Step::backward_unchecked(self.end, taken) };
NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
diff --git a/library/core/src/iter/sources/successors.rs b/library/core/src/iter/sources/successors.rs
index 99f058a90..6a6cbe905 100644
--- a/library/core/src/iter/sources/successors.rs
+++ b/library/core/src/iter/sources/successors.rs
@@ -22,7 +22,7 @@ where
Successors { next: first, succ }
}
-/// An new iterator where each successive item is computed based on the preceding one.
+/// A new iterator where each successive item is computed based on the preceding one.
///
/// This `struct` is created by the [`iter::successors()`] function.
/// See its documentation for more.
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index dabfce144..988352283 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -26,13 +26,13 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_on_unimplemented(
on(
- _Self = "std::ops::RangeTo<Idx>",
+ any(_Self = "core::ops::RangeTo<Idx>", _Self = "std::ops::RangeTo<Idx>"),
label = "if you meant to iterate until a value, add a starting value",
note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
bounded `Range`: `0..end`"
),
on(
- _Self = "std::ops::RangeToInclusive<Idx>",
+ any(_Self = "core::ops::RangeToInclusive<Idx>", _Self = "std::ops::RangeToInclusive<Idx>"),
label = "if you meant to iterate until a value (including it), add a starting value",
note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
to have a bounded `RangeInclusive`: `0..=end`"
@@ -43,7 +43,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
),
on(_Self = "&[]", label = "`{Self}` is not an iterator; try calling `.iter()`"),
on(
- _Self = "std::vec::Vec<T, A>",
+ any(_Self = "alloc::vec::Vec<T, A>", _Self = "std::vec::Vec<T, A>"),
label = "`{Self}` is not an iterator; try calling `.into_iter()` or `.iter()`"
),
on(
@@ -51,7 +51,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
- _Self = "std::string::String",
+ any(_Self = "alloc::string::String", _Self = "std::string::String"),
label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
),
on(
diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs
index af0284823..c21a2aac1 100644
--- a/library/core/src/iter/traits/marker.rs
+++ b/library/core/src/iter/traits/marker.rs
@@ -86,4 +86,4 @@ pub unsafe trait InPlaceIterable: Iterator {}
/// for details. Consumers are free to rely on the invariants in unsafe code.
#[unstable(feature = "trusted_step", issue = "85731")]
#[rustc_specialization_trait]
-pub unsafe trait TrustedStep: Step {}
+pub unsafe trait TrustedStep: Step + Copy {}
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 6c419eb16..05876f5fc 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -113,7 +113,6 @@
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_char_from_u32_unchecked)]
-#![feature(const_cstr_methods)]
#![feature(const_discriminant)]
#![feature(const_eval_select)]
#![feature(const_exact_div)]
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index c4134dbcd..45e5b7627 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -960,6 +960,8 @@ pub(crate) mod builtin {
///
/// A compile time error is never emitted when using this macro regardless
/// of whether the environment variable is present or not.
+ /// To emit a compile error if the environment variable is not present,
+ /// use the [`env!`] macro instead.
///
/// # Examples
///
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 8dab8d1a6..e251015dd 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -140,7 +140,8 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
#[rustc_coinductive]
pub trait Sized {
// Empty.
@@ -173,7 +174,8 @@ pub trait Sized {
/// [nomicon-coerce]: ../../nomicon/coercions.html
#[unstable(feature = "unsize", issue = "18598")]
#[lang = "unsize"]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -205,6 +207,20 @@ pub trait StructuralPartialEq {
// Empty.
}
+marker_impls! {
+ #[unstable(feature = "structural_match", issue = "31434")]
+ StructuralPartialEq for
+ usize, u8, u16, u32, u64, u128,
+ isize, i8, i16, i32, i64, i128,
+ bool,
+ char,
+ str /* Technically requires `[u8]: StructuralEq` */,
+ (),
+ {T, const N: usize} [T; N],
+ {T} [T],
+ {T: ?Sized} &T,
+}
+
/// Required trait for constants used in pattern matches.
///
/// Any type that derives `Eq` automatically implements this trait, *regardless*
@@ -267,6 +283,7 @@ marker_impls! {
bool,
char,
str /* Technically requires `[u8]: StructuralEq` */,
+ (),
{T, const N: usize} [T; N],
{T} [T],
{T: ?Sized} &T,
@@ -558,59 +575,59 @@ impl<T: ?Sized> Copy for &T {}
#[lang = "sync"]
#[rustc_on_unimplemented(
on(
- _Self = "std::cell::OnceCell<T>",
+ any(_Self = "core::cell:OnceCell<T>", _Self = "std::cell::OnceCell<T>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::OnceLock` instead"
),
on(
- _Self = "std::cell::Cell<u8>",
+ any(_Self = "core::cell::Cell<u8>", _Self = "std::cell::Cell<u8>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU8` instead",
),
on(
- _Self = "std::cell::Cell<u16>",
+ any(_Self = "core::cell::Cell<u16>", _Self = "std::cell::Cell<u16>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU16` instead",
),
on(
- _Self = "std::cell::Cell<u32>",
+ any(_Self = "core::cell::Cell<u32>", _Self = "std::cell::Cell<u32>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU32` instead",
),
on(
- _Self = "std::cell::Cell<u64>",
+ any(_Self = "core::cell::Cell<u64>", _Self = "std::cell::Cell<u64>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU64` instead",
),
on(
- _Self = "std::cell::Cell<usize>",
+ any(_Self = "core::cell::Cell<usize>", _Self = "std::cell::Cell<usize>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicUsize` instead",
),
on(
- _Self = "std::cell::Cell<i8>",
+ any(_Self = "core::cell::Cell<i8>", _Self = "std::cell::Cell<i8>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI8` instead",
),
on(
- _Self = "std::cell::Cell<i16>",
+ any(_Self = "core::cell::Cell<i16>", _Self = "std::cell::Cell<i16>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI16` instead",
),
on(
- _Self = "std::cell::Cell<i32>",
+ any(_Self = "core::cell::Cell<i32>", _Self = "std::cell::Cell<i32>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI32` instead",
),
on(
- _Self = "std::cell::Cell<i64>",
+ any(_Self = "core::cell::Cell<i64>", _Self = "std::cell::Cell<i64>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI64` instead",
),
on(
- _Self = "std::cell::Cell<isize>",
+ any(_Self = "core::cell::Cell<isize>", _Self = "std::cell::Cell<isize>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicIsize` instead",
),
on(
- _Self = "std::cell::Cell<bool>",
+ any(_Self = "core::cell::Cell<bool>", _Self = "std::cell::Cell<bool>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicBool` instead",
),
on(
- _Self = "std::cell::Cell<T>",
+ any(_Self = "core::cell::Cell<T>", _Self = "std::cell::Cell<T>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock`",
),
on(
- _Self = "std::cell::RefCell<T>",
+ any(_Self = "core::cell::RefCell<T>", _Self = "std::cell::RefCell<T>"),
note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` instead",
),
message = "`{Self}` cannot be shared between threads safely",
@@ -839,7 +856,8 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -944,7 +962,8 @@ marker_impls! {
#[unstable(feature = "const_trait_impl", issue = "67792")]
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
#[const_trait]
pub trait Destruct {}
@@ -955,7 +974,8 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub trait Tuple {}
/// A marker for pointer-like types.
@@ -971,21 +991,20 @@ pub trait Tuple {}
pub trait PointerLike {}
/// A marker for types which can be used as types of `const` generic parameters.
-#[cfg_attr(not(bootstrap), lang = "const_param_ty")]
+#[lang = "const_param_ty"]
#[unstable(feature = "adt_const_params", issue = "95174")]
#[rustc_on_unimplemented(message = "`{Self}` can't be used as a const parameter type")]
-pub trait ConstParamTy: StructuralEq {}
+#[allow(multiple_supertrait_upcastable)]
+pub trait ConstParamTy: StructuralEq + StructuralPartialEq {}
/// Derive macro generating an impl of the trait `ConstParamTy`.
#[rustc_builtin_macro]
#[unstable(feature = "adt_const_params", issue = "95174")]
-#[cfg(not(bootstrap))]
pub macro ConstParamTy($item:item) {
/* compiler built-in */
}
-// FIXME(generic_const_parameter_types): handle `ty::FnDef`/`ty::Closure`
-// FIXME(generic_const_parameter_types): handle `ty::Tuple`
+// FIXME(adt_const_params): handle `ty::FnDef`/`ty::Closure`
marker_impls! {
#[unstable(feature = "adt_const_params", issue = "95174")]
ConstParamTy for
@@ -999,6 +1018,11 @@ marker_impls! {
{T: ?Sized + ConstParamTy} &T,
}
+// FIXME(adt_const_params): Add to marker_impls call above once not in bootstrap
+#[unstable(feature = "adt_const_params", issue = "95174")]
+#[cfg(not(bootstrap))]
+impl ConstParamTy for () {}
+
/// A common trait implemented by all function pointers.
#[unstable(
feature = "fn_ptr_trait",
@@ -1006,7 +1030,8 @@ marker_impls! {
reason = "internal trait for implementing various traits for all function pointers"
)]
#[lang = "fn_ptr_trait"]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub trait FnPtr: Copy + Clone {
/// Returns the address of the function pointer.
#[lang = "fn_ptr_addr"]
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index afbfd6d36..2fff3f0ef 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -968,7 +968,7 @@ pub const fn replace<T>(dest: &mut T, src: T) -> T {
/// Integers and other types implementing [`Copy`] are unaffected by `drop`.
///
/// ```
-/// # #![cfg_attr(not(bootstrap), allow(dropping_copy_types))]
+/// # #![allow(dropping_copy_types)]
/// #[derive(Copy, Clone)]
/// struct Foo(u8);
///
@@ -1316,9 +1316,9 @@ impl<T> SizedTypeProperties for T {}
///
/// assert_eq!(mem::offset_of!(NestedA, b.0), 0);
/// ```
-#[cfg(not(bootstrap))]
#[unstable(feature = "offset_of", issue = "106655")]
-#[allow_internal_unstable(builtin_syntax)]
+#[allow_internal_unstable(builtin_syntax, hint_must_use)]
pub macro offset_of($Container:ty, $($fields:tt).+ $(,)?) {
- builtin # offset_of($Container, $($fields).+)
+ // The `{}` is for better error messages
+ crate::hint::must_use({builtin # offset_of($Container, $($fields).+)})
}
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
index 87ae30619..3805d149b 100644
--- a/library/core/src/mem/transmutability.rs
+++ b/library/core/src/mem/transmutability.rs
@@ -1,3 +1,5 @@
+use crate::marker::ConstParamTy;
+
/// Are values of a type transmutable into values of another type?
///
/// This trait is implemented on-the-fly by the compiler for types `Src` and `Self` when the bits of
@@ -5,6 +7,8 @@
/// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied.
#[unstable(feature = "transmutability", issue = "99571")]
#[lang = "transmute_trait"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub unsafe trait BikeshedIntrinsicFrom<Src, Context, const ASSUME: Assume = { Assume::NOTHING }>
where
Src: ?Sized,
@@ -33,6 +37,9 @@ pub struct Assume {
pub validity: bool,
}
+#[unstable(feature = "transmutability", issue = "99571")]
+impl ConstParamTy for Assume {}
+
impl Assume {
/// Do not assume that *you* have ensured any safety properties are met.
#[unstable(feature = "transmutability", issue = "99571")]
diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs
index 954d88d54..c51913fa8 100644
--- a/library/core/src/net/ip_addr.rs
+++ b/library/core/src/net/ip_addr.rs
@@ -1770,14 +1770,8 @@ impl fmt::Display for Ipv6Addr {
f.write_str("::")
} else if self.is_loopback() {
f.write_str("::1")
- } else if let Some(ipv4) = self.to_ipv4() {
- match segments[5] {
- // IPv4 Compatible address
- 0 => write!(f, "::{}", ipv4),
- // IPv4 Mapped address
- 0xffff => write!(f, "::ffff:{}", ipv4),
- _ => unreachable!(),
- }
+ } else if let Some(ipv4) = self.to_ipv4_mapped() {
+ write!(f, "::ffff:{}", ipv4)
} else {
#[derive(Copy, Clone, Default)]
struct Span {
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 4a035ad61..d050d21c8 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -403,6 +403,7 @@ impl f32 {
/// and the stability of its representation over Rust versions
/// and target platforms isn't guaranteed.
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ #[rustc_diagnostic_item = "f32_nan"]
pub const NAN: f32 = 0.0_f32 / 0.0_f32;
/// Infinity (∞).
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 3aafc435f..d9a738191 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -401,6 +401,7 @@ impl f64 {
/// This constant isn't guaranteed to equal to any specific NaN bitpattern,
/// and the stability of its representation over Rust versions
/// and target platforms isn't guaranteed.
+ #[rustc_diagnostic_item = "f64_nan"]
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NAN: f64 = 0.0_f64 / 0.0_f64;
/// Infinity (∞).
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index c9baa09f4..95dcaf5dd 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -3,7 +3,6 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::ascii;
-use crate::convert::TryInto;
use crate::intrinsics;
use crate::mem;
use crate::ops::{Add, Mul, Sub};
@@ -278,18 +277,12 @@ macro_rules! widening_impl {
macro_rules! conv_rhs_for_unchecked_shift {
($SelfT:ty, $x:expr) => {{
- #[inline]
- fn conv(x: u32) -> $SelfT {
- // FIXME(const-hack) replace with `.try_into().ok().unwrap_unchecked()`.
- // SAFETY: Any legal shift amount must be losslessly representable in the self type.
- unsafe { x.try_into().ok().unwrap_unchecked() }
- }
- #[inline]
- const fn const_conv(x: u32) -> $SelfT {
- x as _
+ // If the `as` cast will truncate, ensure we still tell the backend
+ // that the pre-truncation value was also small.
+ if <$SelfT>::BITS < 32 {
+ intrinsics::assume($x <= (<$SelfT>::MAX as u32));
}
-
- intrinsics::const_eval_select(($x,), const_conv, conv)
+ $x as $SelfT
}};
}
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 7f06e170a..5939dedbd 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -348,7 +348,7 @@ macro_rules! nonzero_unsigned_operations {
}
/// Adds an unsigned integer to a non-zero value.
- #[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
+ #[doc = concat!("Return [`", stringify!($Ty), "::MAX`] on overflow.")]
///
/// # Examples
///
@@ -579,7 +579,7 @@ macro_rules! nonzero_signed_operations {
/// Checked absolute value.
/// Checks for overflow and returns [`None`] if
- #[doc = concat!("`self == ", stringify!($Int), "::MIN`.")]
+ #[doc = concat!("`self == ", stringify!($Ty), "::MIN`.")]
/// The result cannot be zero.
///
/// # Example
@@ -800,7 +800,8 @@ macro_rules! nonzero_signed_operations {
self.get().is_negative()
}
- /// Checked negation. Computes `-self`, returning `None` if `self == i32::MIN`.
+ /// Checked negation. Computes `-self`,
+ #[doc = concat!("returning `None` if `self == ", stringify!($Ty), "::MIN`.")]
///
/// # Example
///
@@ -859,8 +860,10 @@ macro_rules! nonzero_signed_operations {
((unsafe { $Ty::new_unchecked(result) }), overflow)
}
- /// Saturating negation. Computes `-self`, returning `MAX` if
- /// `self == i32::MIN` instead of overflowing.
+ /// Saturating negation. Computes `-self`,
+ #[doc = concat!("returning [`", stringify!($Ty), "::MAX`]")]
+ #[doc = concat!("if `self == ", stringify!($Ty), "::MIN`")]
+ /// instead of overflowing.
///
/// # Example
///
@@ -993,7 +996,7 @@ macro_rules! nonzero_unsigned_signed_operations {
}
/// Multiplies two non-zero integers together.
- #[doc = concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")]
+ #[doc = concat!("Return [`", stringify!($Ty), "::MAX`] on overflow.")]
///
/// # Examples
///
@@ -1102,11 +1105,11 @@ macro_rules! nonzero_unsigned_signed_operations {
#[doc = sign_dependent_expr!{
$signedness ?
if signed {
- concat!("Return [`", stringify!($Int), "::MIN`] ",
- "or [`", stringify!($Int), "::MAX`] on overflow.")
+ concat!("Return [`", stringify!($Ty), "::MIN`] ",
+ "or [`", stringify!($Ty), "::MAX`] on overflow.")
}
if unsigned {
- concat!("Return [`", stringify!($Int), "::MAX`] on overflow.")
+ concat!("Return [`", stringify!($Ty), "::MAX`] on overflow.")
}
}]
///
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index 1f1784ec9..f4649be54 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -153,7 +153,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
on(
- _Self = "std::string::String",
+ any(_Self = "alloc::string::String", _Self = "std::string::String"),
note = "you can use `.chars().nth()` or `.bytes().nth()`
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
index b4f69d0b2..17625dacc 100644
--- a/library/core/src/ops/try_trait.rs
+++ b/library/core/src/ops/try_trait.rs
@@ -226,8 +226,14 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::result::Result<T, E>",
- R = "std::option::Option<std::convert::Infallible>"
+ any(
+ _Self = "core::result::Result<T, E>",
+ _Self = "std::result::Result<T, E>",
+ ),
+ any(
+ R = "core::option::Option<core::convert::Infallible>",
+ R = "std::option::Option<std::convert::Infallible>",
+ )
),
message = "the `?` operator can only be used on `Result`s, not `Option`s, \
in {ItemContext} that returns `Result`",
@@ -237,7 +243,10 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::result::Result<T, E>",
+ any(
+ _Self = "core::result::Result<T, E>",
+ _Self = "std::result::Result<T, E>",
+ )
),
// There's a special error message in the trait selection code for
// `From` in `?`, so this is not shown for result-in-result errors,
@@ -250,8 +259,14 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::option::Option<T>",
- R = "std::result::Result<T, E>",
+ any(
+ _Self = "core::option::Option<T>",
+ _Self = "std::option::Option<T>",
+ ),
+ any(
+ R = "core::result::Result<T, E>",
+ R = "std::result::Result<T, E>",
+ )
),
message = "the `?` operator can only be used on `Option`s, not `Result`s, \
in {ItemContext} that returns `Option`",
@@ -261,7 +276,10 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::option::Option<T>",
+ any(
+ _Self = "core::option::Option<T>",
+ _Self = "std::option::Option<T>",
+ )
),
// `Option`-in-`Option` always works, as there's only one possible
// residual, so this can also be phrased strongly.
@@ -273,8 +291,14 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::ops::ControlFlow<B, C>",
- R = "std::ops::ControlFlow<B, C>",
+ any(
+ _Self = "core::ops::ControlFlow<B, C>",
+ _Self = "std::ops::ControlFlow<B, C>",
+ ),
+ any(
+ R = "core::ops::ControlFlow<B, C>",
+ R = "std::ops::ControlFlow<B, C>",
+ )
),
message = "the `?` operator in {ItemContext} that returns `ControlFlow<B, _>` \
can only be used on other `ControlFlow<B, _>`s (with the same Break type)",
@@ -285,7 +309,10 @@ pub trait Try: FromResidual {
on(
all(
from_desugaring = "QuestionMark",
- _Self = "std::ops::ControlFlow<B, C>",
+ any(
+ _Self = "core::ops::ControlFlow<B, C>",
+ _Self = "std::ops::ControlFlow<B, C>",
+ )
// `R` is not a `ControlFlow`, as that case was matched previously
),
message = "the `?` operator can only be used on `ControlFlow`s \
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index ec1ef3cf4..9b6ff76b2 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -1138,7 +1138,7 @@ impl<T> Option<T> {
/// Computes a default function result (if none), or
/// applies a different function to the contained value (if any).
///
- /// # Examples
+ /// # Basic examples
///
/// ```
/// let k = 21;
@@ -1149,6 +1149,25 @@ impl<T> Option<T> {
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
/// ```
+ ///
+ /// # Handling a Result-based fallback
+ ///
+ /// A somewhat common occurrence when dealing with optional values
+ /// in combination with [`Result<T, E>`] is the case where one wants to invoke
+ /// a fallible fallback if the option is not present. This example
+ /// parses a command line argument (if present), or the contents of a file to
+ /// an integer. However, unlike accessing the command line argument, reading
+ /// the file is fallible, so it must be wrapped with `Ok`.
+ ///
+ /// ```no_run
+ /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
+ /// let v: u64 = std::env::args()
+ /// .nth(1)
+ /// .map_or_else(|| std::fs::read_to_string("/etc/someconfig.conf"), Ok)?
+ /// .parse()?;
+ /// # Ok(())
+ /// # }
+ /// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or_else<U, D, F>(self, default: D, f: F) -> U
@@ -1383,6 +1402,7 @@ impl<T> Option<T> {
/// let item_2_0 = arr_2d.get(2).and_then(|row| row.get(0));
/// assert_eq!(item_2_0, None);
/// ```
+ #[doc(alias = "flatmap")]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and_then<U, F>(self, f: F) -> Option<U>
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 81be3fb22..f0fcdab00 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -166,14 +166,15 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
#[lang = "panic_misaligned_pointer_dereference"] // needed by codegen for panic on misaligned pointer deref
+#[rustc_nounwind] // `CheckAlignment` MIR pass requires this function to never unwind
fn panic_misaligned_pointer_dereference(required: usize, found: usize) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
}
- panic!(
+ panic_nounwind_fmt(format_args!(
"misaligned pointer dereference: address must be a multiple of {required:#x} but is {found:#x}"
- )
+ ))
}
/// Panic because we cannot unwind out of a function.
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 8266e8990..80289ca08 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -308,7 +308,7 @@ mod prim_never {}
///
/// ```no_run
/// // Undefined behaviour
-/// unsafe { char::from_u32_unchecked(0x110000) };
+/// let _ = unsafe { char::from_u32_unchecked(0x110000) };
/// ```
///
/// USVs are also the exact set of values that may be encoded in UTF-8. Because
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 6e1e862d3..926189a17 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -104,6 +104,7 @@ impl<T: ?Sized> *const T {
/// refactored.
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[rustc_diagnostic_item = "ptr_cast_mut"]
#[inline(always)]
pub const fn cast_mut(self) -> *mut T {
self as _
@@ -916,16 +917,8 @@ impl<T: ?Sized> *const T {
where
T: Sized,
{
- #[cfg(bootstrap)]
// SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe {
- self.offset(count as isize)
- }
- #[cfg(not(bootstrap))]
- // SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe {
- intrinsics::offset(self, count)
- }
+ unsafe { intrinsics::offset(self, count) }
}
/// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index 2ea032d4a..daaa44b1d 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,7 +50,8 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
-#[rustc_deny_explicit_impl]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl(implement_via_object = false))]
+#[cfg_attr(bootstrap, rustc_deny_explicit_impl)]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index d0cb2f715..acc9ca29d 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -698,6 +698,7 @@ where
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
+#[rustc_diagnostic_item = "ptr_from_ref"]
pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
r
}
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 2fe5164c3..c6f438578 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -473,20 +473,10 @@ impl<T: ?Sized> *mut T {
where
T: Sized,
{
- #[cfg(bootstrap)]
// SAFETY: the caller must uphold the safety contract for `offset`.
// The obtained pointer is valid for writes since the caller must
// guarantee that it points to the same allocated object as `self`.
- unsafe {
- intrinsics::offset(self, count) as *mut T
- }
- #[cfg(not(bootstrap))]
- // SAFETY: the caller must uphold the safety contract for `offset`.
- // The obtained pointer is valid for writes since the caller must
- // guarantee that it points to the same allocated object as `self`.
- unsafe {
- intrinsics::offset(self, count)
- }
+ unsafe { intrinsics::offset(self, count) }
}
/// Calculates the offset from a pointer in bytes.
@@ -1026,16 +1016,8 @@ impl<T: ?Sized> *mut T {
where
T: Sized,
{
- #[cfg(bootstrap)]
- // SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe {
- self.offset(count as isize)
- }
- #[cfg(not(bootstrap))]
// SAFETY: the caller must uphold the safety contract for `offset`.
- unsafe {
- intrinsics::offset(self, count)
- }
+ unsafe { intrinsics::offset(self, count) }
}
/// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs
index a853f15ed..ff7e91d3e 100644
--- a/library/core/src/ptr/unique.rs
+++ b/library/core/src/ptr/unique.rs
@@ -32,6 +32,8 @@ use crate::ptr::NonNull;
)]
#[doc(hidden)]
#[repr(transparent)]
+// Lang item used experimentally by Miri to define the semantics of `Unique`.
+#[cfg_attr(not(bootstrap), lang = "ptr_unique")]
pub struct Unique<T: ?Sized> {
pointer: NonNull<T>,
// NOTE: this marker has no consequences for variance, but is necessary
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 6ef9f9c95..e1e3bcc05 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -152,7 +152,10 @@ mod private_slice_index {
#[rustc_on_unimplemented(
on(T = "str", label = "string indices are ranges of `usize`",),
on(
- all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
+ all(
+ any(T = "str", T = "&str", T = "alloc::string::String", T = "std::string::String"),
+ _Self = "{integer}"
+ ),
note = "you can use `.chars().nth()` or `.bytes().nth()`\n\
for more information, see chapter 8 in The Book: \
<https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index 3462c0e02..96a145e22 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -191,6 +191,39 @@ macro_rules! iterator {
self.next_back()
}
+ #[inline]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ // this implementation consists of the following optimizations compared to the
+ // default implementation:
+ // - do-while loop, as is llvm's preferred loop shape,
+ // see https://releases.llvm.org/16.0.0/docs/LoopTerminology.html#more-canonical-loops
+ // - bumps an index instead of a pointer since the latter case inhibits
+ // some optimizations, see #111603
+ // - avoids Option wrapping/matching
+ if is_empty!(self) {
+ return init;
+ }
+ let mut acc = init;
+ let mut i = 0;
+ let len = len!(self);
+ loop {
+ // SAFETY: the loop iterates `i in 0..len`, which always is in bounds of
+ // the slice allocation
+ acc = f(acc, unsafe { & $( $mut_ )? *self.ptr.add(i).as_ptr() });
+ // SAFETY: `i` can't overflow since it'll only reach usize::MAX if the
+ // slice had that length, in which case we'll break out of the loop
+ // after the increment
+ i = unsafe { i.unchecked_add(1) };
+ if i == len {
+ break;
+ }
+ }
+ acc
+ }
+
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index ea0181e35..e2a2428fb 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -851,6 +851,8 @@ impl<T> [T] {
/// Swaps two elements in the slice.
///
+ /// If `a` equals to `b`, it's guaranteed that elements won't change value.
+ ///
/// # Arguments
///
/// * a - The index of the first element
@@ -2995,7 +2997,7 @@ impl<T> [T] {
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
- /// (i.e. does not allocate), and *O*(*n*) on average. The worst-case performance is *O*(*n* log *n*).
+ /// (i.e. does not allocate), and runs in *O*(*n*) time.
/// This function is also known as "kth element" in other libraries.
///
/// It returns a triplet of the following from the reordered slice:
@@ -3045,9 +3047,8 @@ impl<T> [T] {
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
- /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average.
- /// The worst-case performance is *O*(*n* log *n*). This function is also known as
- /// "kth element" in other libraries.
+ /// position `index`), in-place (i.e. does not allocate), and runs in *O*(*n*) time.
+ /// This function is also known as "kth element" in other libraries.
///
/// It returns a triplet of the following from
/// the slice reordered according to the provided comparator function: the subslice prior to
@@ -3101,8 +3102,7 @@ impl<T> [T] {
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
- /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average.
- /// The worst-case performance is *O*(*n* log *n*).
+ /// position `index`), in-place (i.e. does not allocate), and runs in *O*(*n*) time.
/// This function is also known as "kth element" in other libraries.
///
/// It returns a triplet of the following from
@@ -3113,8 +3113,9 @@ impl<T> [T] {
///
/// # Current implementation
///
- /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
- /// used for [`sort_unstable`].
+ /// The current algorithm is an introselect implementation based on Pattern Defeating Quicksort, which is also
+ /// the basis for [`sort_unstable`]. The fallback algorithm is Median of Medians using Tukey's Ninther for
+ /// pivot selection, which guarantees linear runtime for all inputs.
///
/// [`sort_unstable`]: slice::sort_unstable
///
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index 052fd34d0..48a6eb03b 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -32,7 +32,8 @@ use crate::ptr;
/// * The memory referenced by the returned slice must not be mutated for the duration
/// of lifetime `'a`, except inside an `UnsafeCell`.
///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///
/// # Caveat
@@ -125,7 +126,8 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
/// (not derived from the return value) for the duration of lifetime `'a`.
/// Both read and write accesses are forbidden.
///
-/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`,
+/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///
/// [valid]: ptr#safety
@@ -179,15 +181,16 @@ pub const fn from_mut<T>(s: &mut T) -> &mut [T] {
/// the last element, such that the offset from the end to the start pointer is
/// the length of the slice.
///
-/// * The range must contain `N` consecutive properly initialized values of type `T`:
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
///
-/// * The entire memory range of this slice must be contained within a single allocated object!
-/// Slices can never span across multiple allocated objects.
+/// * The range must contain `N` consecutive properly initialized values of type `T`.
///
/// * The memory referenced by the returned slice must not be mutated for the duration
/// of lifetime `'a`, except inside an `UnsafeCell`.
///
-/// * The total length of the range must be no larger than `isize::MAX`.
+/// * The total length of the range must be no larger than `isize::MAX`,
+/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///
/// Note that a range created from [`slice::as_ptr_range`] fulfills these requirements.
@@ -247,16 +250,17 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] {
/// the last element, such that the offset from the end to the start pointer is
/// the length of the slice.
///
-/// * The range must contain `N` consecutive properly initialized values of type `T`:
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
///
-/// * The entire memory range of this slice must be contained within a single allocated object!
-/// Slices can never span across multiple allocated objects.
+/// * The range must contain `N` consecutive properly initialized values of type `T`.
///
/// * The memory referenced by the returned slice must not be accessed through any other pointer
/// (not derived from the return value) for the duration of lifetime `'a`.
/// Both read and write accesses are forbidden.
///
-/// * The total length of the range must be no larger than `isize::MAX`.
+/// * The total length of the range must be no larger than `isize::MAX`,
+/// and adding that size to `data` must not "wrap around" the address space.
/// See the safety documentation of [`pointer::offset`].
///
/// Note that a range created from [`slice::as_mut_ptr_range`] fulfills these requirements.
diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs
index 5f8748206..0f23cf7ae 100644
--- a/library/core/src/str/converts.rs
+++ b/library/core/src/str/converts.rs
@@ -84,6 +84,7 @@ use super::Utf8Error;
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_str_from_utf8_shared", since = "1.63.0")]
#[rustc_allow_const_fn_unstable(str_internals)]
+#[rustc_diagnostic_item = "str_from_utf8"]
pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
// FIXME: This should use `?` again, once it's `const`
match run_utf8_validation(v) {
@@ -127,6 +128,7 @@ pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
/// errors that can be returned.
#[stable(feature = "str_mut_extras", since = "1.20.0")]
#[rustc_const_unstable(feature = "const_str_from_utf8", issue = "91006")]
+#[rustc_diagnostic_item = "str_from_utf8_mut"]
pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
// This should use `?` again, once it's `const`
match run_utf8_validation(v) {
@@ -167,6 +169,7 @@ pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_str_from_utf8_unchecked", since = "1.55.0")]
+#[rustc_diagnostic_item = "str_from_utf8_unchecked"]
pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
// Also relies on `&str` and `&[u8]` having the same layout.
@@ -194,6 +197,7 @@ pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
#[must_use]
#[stable(feature = "str_mut_extras", since = "1.20.0")]
#[rustc_const_unstable(feature = "const_str_from_utf8_unchecked_mut", issue = "91005")]
+#[rustc_diagnostic_item = "str_from_utf8_unchecked_mut"]
pub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: the caller must guarantee that the bytes `v`
// are valid UTF-8, thus the cast to `*mut str` is safe.
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index ef05b25fd..9a93bb729 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -144,8 +144,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let len = "foo".len();
/// assert_eq!(3, len);
@@ -165,8 +163,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "";
/// assert!(s.is_empty());
@@ -311,8 +307,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let bytes = "bors".as_bytes();
/// assert_eq!(b"bors", bytes);
@@ -387,8 +381,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "Hello";
/// let ptr = s.as_ptr();
@@ -570,8 +562,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "Löwe 老虎 Léopard";
///
@@ -649,8 +639,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "Per Martin-Löf";
///
@@ -691,8 +679,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut s = "Per Martin-Löf".to_string();
/// {
@@ -840,8 +826,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let mut bytes = "bors".bytes();
///
@@ -1020,8 +1004,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let text = "Zażółć gęślą jaźń";
///
@@ -1050,8 +1032,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let bananas = "bananas";
///
@@ -1077,8 +1057,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let bananas = "bananas";
///
@@ -1103,8 +1081,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let bananas = "bananas";
///
@@ -1463,8 +1439,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v: Vec<&str> = "A.B.".split_terminator('.').collect();
/// assert_eq!(v, ["A", "B"]);
@@ -1696,8 +1670,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect();
/// assert_eq!(v, ["abc", "abc", "abc"]);
@@ -1732,8 +1704,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect();
/// assert_eq!(v, ["abc", "abc", "abc"]);
@@ -1775,8 +1745,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect();
/// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]);
@@ -1817,8 +1785,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect();
/// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]);
@@ -1845,8 +1811,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// let s = "\n Hello\tworld\t\n";
///
@@ -2085,8 +2049,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// assert_eq!("11foo1bar11".trim_start_matches('1'), "foo1bar11");
/// assert_eq!("123foo1bar123".trim_start_matches(char::is_numeric), "foo1bar123");
@@ -2232,8 +2194,6 @@ impl str {
///
/// # Examples
///
- /// Basic usage:
- ///
/// ```
/// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
/// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123");
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 7043ab5ff..b63fd5c90 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -2,6 +2,7 @@
use crate::fmt;
use crate::marker::{PhantomData, Unpin};
+use crate::ptr;
/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`]
/// which provides customized wakeup behavior.
@@ -322,6 +323,45 @@ impl Waker {
Waker { waker }
}
+ /// Creates a new `Waker` that does nothing when `wake` is called.
+ ///
+ /// This is mostly useful for writing tests that need a [`Context`] to poll
+ /// some futures, but are not expecting those futures to wake the waker or
+ /// do not need to do anything specific if it happens.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(noop_waker)]
+ ///
+ /// use std::future::Future;
+ /// use std::task;
+ ///
+ /// let waker = task::Waker::noop();
+ /// let mut cx = task::Context::from_waker(&waker);
+ ///
+ /// let mut future = Box::pin(async { 10 });
+ /// assert_eq!(future.as_mut().poll(&mut cx), task::Poll::Ready(10));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "noop_waker", issue = "98286")]
+ pub const fn noop() -> Waker {
+ const VTABLE: RawWakerVTable = RawWakerVTable::new(
+ // Cloning just returns a new no-op raw waker
+ |_| RAW,
+ // `wake` does nothing
+ |_| {},
+ // `wake_by_ref` does nothing
+ |_| {},
+ // Dropping does nothing as we don't allocate anything
+ |_| {},
+ );
+ const RAW: RawWaker = RawWaker::new(ptr::null(), &VTABLE);
+
+ Waker { waker: RAW }
+ }
+
/// Get a reference to the underlying [`RawWaker`].
#[inline]
#[must_use]
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index a1388dfee..ac8d04a82 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -1,6 +1,9 @@
// See src/libstd/primitive_docs.rs for documentation.
use crate::cmp::Ordering::{self, *};
+#[cfg(not(bootstrap))]
+use crate::marker::ConstParamTy;
+use crate::marker::{StructuralEq, StructuralPartialEq};
// Recursive macro for implementing n-ary tuple functions and operations
//
@@ -47,6 +50,28 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ #[cfg(not(bootstrap))]
+ impl<$($T: ConstParamTy),+> ConstParamTy for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralPartialEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<$($T),+> StructuralEq for ($($T,)+)
+ {}
+ }
+
+ maybe_tuple_doc! {
+ $($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
impl<$($T: PartialOrd),+> PartialOrd for ($($T,)+)
where
diff --git a/library/core/src/unicode/printable.py b/library/core/src/unicode/printable.py
index 7c37f5f09..4d39ace06 100755
--- a/library/core/src/unicode/printable.py
+++ b/library/core/src/unicode/printable.py
@@ -119,7 +119,7 @@ def print_singletons(uppers, lowers, uppersname, lowersname):
print("#[rustfmt::skip]")
print("const {}: &[u8] = &[".format(lowersname))
for i in range(0, len(lowers), 8):
- print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8])))
+ print(" {}".format(" ".join("{:#04x},".format(x) for x in lowers[i:i+8])))
print("];")
def print_normal(normal, normalname):
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
index 0869644c0..982d7853f 100644
--- a/library/core/tests/array.rs
+++ b/library/core/tests/array.rs
@@ -257,14 +257,8 @@ fn iterator_drops() {
assert_eq!(i.get(), 5);
}
-// This test does not work on targets without panic=unwind support.
-// To work around this problem, test is marked is should_panic, so it will
-// be automagically skipped on unsuitable targets, such as
-// wasm32-unknown-unknown.
-//
-// It means that we use panic for indicating success.
-#[test]
-#[should_panic(expected = "test succeeded")]
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn array_default_impl_avoids_leaks_on_panic() {
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
static COUNTER: AtomicUsize = AtomicUsize::new(0);
@@ -296,7 +290,6 @@ fn array_default_impl_avoids_leaks_on_panic() {
assert_eq!(*panic_msg, "bomb limit exceeded");
// check that all bombs are successfully dropped
assert_eq!(COUNTER.load(Relaxed), 0);
- panic!("test succeeded")
}
#[test]
@@ -317,9 +310,8 @@ fn array_map() {
assert_eq!(b, [1, 2, 3]);
}
-// See note on above test for why `should_panic` is used.
#[test]
-#[should_panic(expected = "test succeeded")]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn array_map_drop_safety() {
static DROPPED: AtomicUsize = AtomicUsize::new(0);
struct DropCounter;
@@ -341,7 +333,6 @@ fn array_map_drop_safety() {
});
assert!(success.is_err());
assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
- panic!("test succeeded")
}
#[test]
diff --git a/library/core/tests/clone.rs b/library/core/tests/clone.rs
index aafe5ced2..64193e115 100644
--- a/library/core/tests/clone.rs
+++ b/library/core/tests/clone.rs
@@ -1,5 +1,5 @@
#[test]
-#[cfg_attr(not(bootstrap), allow(suspicious_double_ref_op))]
+#[allow(suspicious_double_ref_op)]
fn test_borrowed_clone() {
let x = 5;
let y: &i32 = &x;
diff --git a/library/core/tests/future.rs b/library/core/tests/future.rs
index 74b6f74e4..db417256d 100644
--- a/library/core/tests/future.rs
+++ b/library/core/tests/future.rs
@@ -30,7 +30,6 @@ fn poll_n(val: usize, num: usize) -> PollN {
}
#[test]
-#[cfg_attr(miri, ignore)] // self-referential generators do not work with Miri's aliasing checks
fn test_join() {
block_on(async move {
let x = join!(async { 0 }).await;
diff --git a/library/core/tests/iter/adapters/step_by.rs b/library/core/tests/iter/adapters/step_by.rs
index 94f2fa8c2..4c5b1dd9a 100644
--- a/library/core/tests/iter/adapters/step_by.rs
+++ b/library/core/tests/iter/adapters/step_by.rs
@@ -244,3 +244,58 @@ fn test_step_by_skip() {
assert_eq!((0..=50).step_by(10).nth(3), Some(30));
assert_eq!((200..=255u8).step_by(10).nth(3), Some(230));
}
+
+
+struct DeOpt<I: Iterator>(I);
+
+impl<I: Iterator> Iterator for DeOpt<I> {
+ type Item = I::Item;
+
+ fn next(&mut self) -> core::option::Option<Self::Item> {
+ self.0.next()
+ }
+}
+
+impl<I: DoubleEndedIterator> DoubleEndedIterator for DeOpt<I> {
+ fn next_back(&mut self) -> core::option::Option<Self::Item> {
+ self.0.next_back()
+ }
+}
+
+#[test]
+fn test_step_by_fold_range_specialization() {
+ macro_rules! t {
+ ($range:expr, $var: ident, $body:tt) => {
+ {
+ // run the same tests for the non-optimized version
+ let mut $var = DeOpt($range);
+ $body
+ }
+ {
+ let mut $var = $range;
+ $body
+ }
+ }
+ }
+
+ t!((1usize..5).step_by(1), r, {
+ assert_eq!(r.next_back(), Some(4));
+ assert_eq!(r.sum::<usize>(), 6);
+ });
+
+ t!((0usize..4).step_by(2), r, {
+ assert_eq!(r.next(), Some(0));
+ assert_eq!(r.sum::<usize>(), 2);
+ });
+
+
+ t!((0usize..5).step_by(2), r, {
+ assert_eq!(r.next(), Some(0));
+ assert_eq!(r.sum::<usize>(), 6);
+ });
+
+ t!((usize::MAX - 6 .. usize::MAX).step_by(5), r, {
+ assert_eq!(r.next(), Some(usize::MAX - 6));
+ assert_eq!(r.sum::<usize>(), usize::MAX - 1);
+ });
+}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 3933e3289..3e6d31fcd 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -109,7 +109,7 @@
#![feature(utf8_chunks)]
#![feature(is_ascii_octdigit)]
#![feature(get_many_mut)]
-#![cfg_attr(not(bootstrap), feature(offset_of))]
+#![feature(offset_of)]
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
diff --git a/library/core/tests/manually_drop.rs b/library/core/tests/manually_drop.rs
index 9eac27973..22d72d219 100644
--- a/library/core/tests/manually_drop.rs
+++ b/library/core/tests/manually_drop.rs
@@ -1,3 +1,5 @@
+#![cfg_attr(not(bootstrap), allow(undropped_manually_drops))]
+
use core::mem::ManuallyDrop;
#[test]
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index aee9c89b5..5c2e18745 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -366,7 +366,6 @@ fn const_maybe_uninit() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of() {
#[repr(C)]
struct Foo {
@@ -391,7 +390,7 @@ fn offset_of() {
struct Generic<T> {
x: u8,
y: u32,
- z: T
+ z: T,
}
trait Trait {}
@@ -409,7 +408,6 @@ fn offset_of() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_union() {
#[repr(C)]
union Foo {
@@ -429,7 +427,6 @@ fn offset_of_union() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_dst() {
#[repr(C)]
struct Alpha {
@@ -469,7 +466,6 @@ fn offset_of_dst() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_packed() {
#[repr(C, packed)]
struct Foo {
@@ -482,7 +478,6 @@ fn offset_of_packed() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_projection() {
#[repr(C)]
struct Foo {
@@ -503,7 +498,6 @@ fn offset_of_projection() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_alias() {
#[repr(C)]
struct Foo {
@@ -518,7 +512,6 @@ fn offset_of_alias() {
}
#[test]
-#[cfg(not(bootstrap))]
fn const_offset_of() {
#[repr(C)]
struct Foo {
@@ -534,7 +527,6 @@ fn const_offset_of() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_without_const_promotion() {
#[repr(C)]
struct Foo<SuppressConstPromotion> {
@@ -555,7 +547,6 @@ fn offset_of_without_const_promotion() {
}
#[test]
-#[cfg(not(bootstrap))]
fn offset_of_addr() {
#[repr(C)]
struct Foo {
diff --git a/library/core/tests/net/ip_addr.rs b/library/core/tests/net/ip_addr.rs
index 5a6ac08c0..7530fc084 100644
--- a/library/core/tests/net/ip_addr.rs
+++ b/library/core/tests/net/ip_addr.rs
@@ -139,7 +139,7 @@ fn ipv6_addr_to_string() {
// ipv4-compatible address
let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
- assert_eq!(a1.to_string(), "::192.0.2.128");
+ assert_eq!(a1.to_string(), "::c000:280");
// v6 address with no zero segments
assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
@@ -316,7 +316,7 @@ fn ip_properties() {
check!("::", unspec);
check!("::1", loopback);
- check!("::0.0.0.2", global);
+ check!("::2", global);
check!("1::", global);
check!("fc00::");
check!("fdff:ffff::");
@@ -607,7 +607,7 @@ fn ipv6_properties() {
check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
- check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global);
+ check!("::2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global);
check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
diff --git a/library/core/tests/net/socket_addr.rs b/library/core/tests/net/socket_addr.rs
index 68c7cd94d..35a69cead 100644
--- a/library/core/tests/net/socket_addr.rs
+++ b/library/core/tests/net/socket_addr.rs
@@ -34,7 +34,7 @@ fn ipv6_socket_addr_to_string() {
// IPv4-compatible address.
assert_eq!(
SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280), 8080, 0, 0).to_string(),
- "[::192.0.2.128]:8080"
+ "[::c000:280]:8080"
);
// IPv6 address with no zero segments.
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index c02cd99cc..ee885adfe 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -1001,7 +1001,7 @@ fn nonnull_tagged_pointer_with_provenance() {
assert_eq!(p.tag(), 3);
assert_eq!(unsafe { *p.pointer().as_ptr() }, 10);
- unsafe { Box::from_raw(p.pointer().as_ptr()) };
+ unsafe { drop(Box::from_raw(p.pointer().as_ptr())) };
/// A non-null pointer type which carries several bits of metadata and maintains provenance.
#[repr(transparent)]