summaryrefslogtreecommitdiffstats
path: root/library/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
commit218caa410aa38c29984be31a5229b9fa717560ee (patch)
treec54bd55eeb6e4c508940a30e94c0032fbd45d677 /library/core
parentReleasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz
rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core')
-rw-r--r--library/core/Cargo.toml4
-rw-r--r--library/core/benches/num/int_log/mod.rs4
-rw-r--r--library/core/src/any.rs5
-rw-r--r--library/core/src/array/iter.rs6
-rw-r--r--library/core/src/array/mod.rs4
-rw-r--r--library/core/src/borrow.rs2
-rw-r--r--library/core/src/cell.rs35
-rw-r--r--library/core/src/cell/lazy.rs8
-rw-r--r--library/core/src/cell/once.rs13
-rw-r--r--library/core/src/char/decode.rs2
-rw-r--r--library/core/src/cmp.rs82
-rw-r--r--library/core/src/const_closure.rs29
-rw-r--r--library/core/src/convert/num.rs20
-rw-r--r--library/core/src/ffi/mod.rs55
-rw-r--r--library/core/src/fmt/mod.rs13
-rw-r--r--library/core/src/future/future.rs1
-rw-r--r--library/core/src/future/mod.rs9
-rw-r--r--library/core/src/hash/mod.rs4
-rw-r--r--library/core/src/hint.rs69
-rw-r--r--library/core/src/intrinsics.rs69
-rw-r--r--library/core/src/intrinsics/mir.rs302
-rw-r--r--library/core/src/iter/range.rs2
-rw-r--r--library/core/src/iter/sources/empty.rs7
-rw-r--r--library/core/src/iter/sources/from_generator.rs23
-rw-r--r--library/core/src/iter/sources/once_with.rs14
-rw-r--r--library/core/src/iter/sources/repeat_n.rs2
-rw-r--r--library/core/src/iter/sources/repeat_with.rs10
-rw-r--r--library/core/src/iter/traits/accum.rs8
-rw-r--r--library/core/src/iter/traits/iterator.rs43
-rw-r--r--library/core/src/lib.rs30
-rw-r--r--library/core/src/macros/mod.rs46
-rw-r--r--library/core/src/marker.rs20
-rw-r--r--library/core/src/mem/mod.rs3
-rw-r--r--library/core/src/num/dec2flt/fpu.rs2
-rw-r--r--library/core/src/num/f32.rs2
-rw-r--r--library/core/src/num/f64.rs2
-rw-r--r--library/core/src/num/int_macros.rs111
-rw-r--r--library/core/src/ops/function.rs319
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/mod.rs10
-rw-r--r--library/core/src/ops/unsize.rs20
-rw-r--r--library/core/src/option.rs57
-rw-r--r--library/core/src/panic.rs8
-rw-r--r--library/core/src/panic/panic_info.rs2
-rw-r--r--library/core/src/panicking.rs39
-rw-r--r--library/core/src/pin.rs50
-rw-r--r--library/core/src/prelude/mod.rs8
-rw-r--r--library/core/src/prelude/v1.rs9
-rw-r--r--library/core/src/ptr/alignment.rs12
-rw-r--r--library/core/src/ptr/const_ptr.rs110
-rw-r--r--library/core/src/ptr/metadata.rs2
-rw-r--r--library/core/src/ptr/mod.rs54
-rw-r--r--library/core/src/ptr/mut_ptr.rs110
-rw-r--r--library/core/src/ptr/non_null.rs17
-rw-r--r--library/core/src/result.rs7
-rw-r--r--library/core/src/slice/iter.rs12
-rw-r--r--library/core/src/slice/iter/macros.rs22
-rw-r--r--library/core/src/slice/mod.rs61
-rw-r--r--library/core/src/slice/sort.rs544
-rw-r--r--library/core/src/str/iter.rs185
-rw-r--r--library/core/src/str/mod.rs8
-rw-r--r--library/core/src/sync/atomic.rs40
-rw-r--r--library/core/src/sync/exclusive.rs4
-rw-r--r--library/core/src/task/poll.rs2
-rw-r--r--library/core/src/task/wake.rs10
-rw-r--r--library/core/src/unicode/mod.rs2
-rw-r--r--library/core/tests/any.rs1
-rw-r--r--library/core/tests/char.rs4
-rw-r--r--library/core/tests/lazy.rs6
-rw-r--r--library/core/tests/lib.rs14
-rw-r--r--library/core/tests/mem.rs1
-rw-r--r--library/core/tests/num/flt2dec/random.rs6
-rw-r--r--library/core/tests/ptr.rs20
-rw-r--r--library/core/tests/slice.rs11
-rw-r--r--library/core/tests/str.rs2
-rw-r--r--library/core/tests/task.rs8
-rw-r--r--library/core/tests/time.rs26
77 files changed, 1846 insertions, 1040 deletions
diff --git a/library/core/Cargo.toml b/library/core/Cargo.toml
index 2a7df9556..3dc8c84e0 100644
--- a/library/core/Cargo.toml
+++ b/library/core/Cargo.toml
@@ -24,8 +24,8 @@ path = "benches/lib.rs"
test = true
[dev-dependencies]
-rand = "0.7"
-rand_xorshift = "0.2"
+rand = { version = "0.8.5", default-features = false }
+rand_xorshift = { version = "0.3.0", default-features = false }
[features]
# Make panics and failed asserts immediately abort without formatting any message
diff --git a/library/core/benches/num/int_log/mod.rs b/library/core/benches/num/int_log/mod.rs
index 3c01e2998..bb61224b5 100644
--- a/library/core/benches/num/int_log/mod.rs
+++ b/library/core/benches/num/int_log/mod.rs
@@ -21,7 +21,7 @@ macro_rules! int_log_bench {
/* Exponentially distributed random numbers from the whole range of the type. */
let numbers: Vec<$t> = (0..256)
.map(|_| {
- let x = rng.gen::<$t>() >> rng.gen_range(0, <$t>::BITS);
+ let x = rng.gen::<$t>() >> rng.gen_range(0..<$t>::BITS);
if x != 0 { x } else { 1 }
})
.collect();
@@ -38,7 +38,7 @@ macro_rules! int_log_bench {
/* Exponentially distributed random numbers from the range 0..256. */
let numbers: Vec<$t> = (0..256)
.map(|_| {
- let x = (rng.gen::<u8>() >> rng.gen_range(0, u8::BITS)) as $t;
+ let x = (rng.gen::<u8>() >> rng.gen_range(0..u8::BITS)) as $t;
if x != 0 { x } else { 1 }
})
.collect();
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 1a379ecc1..c0fb0d993 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -148,7 +148,7 @@
//! ```
//!
//! In this example, if the concrete type of `obj` in `use_my_trait` is `SomeConcreteType`, then
-//! the `get_context_ref` call will return a reference to `obj.some_string` with type `&String`.
+//! the `get_context_by_ref` call will return a reference to `obj.some_string` with type `&String`.
#![stable(feature = "rust1", since = "1.0.0")]
@@ -662,7 +662,8 @@ impl dyn Any + Send + Sync {
/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
/// noting that the hashes and ordering will vary between Rust releases. Beware
/// of relying on them inside of your code!
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+#[derive(Clone, Copy, Debug, Hash, Eq)]
+#[derive_const(PartialEq, PartialOrd, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TypeId {
t: u64,
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index b91c63018..8259c087d 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -109,8 +109,8 @@ impl<T, const N: usize> IntoIter<T, N> {
/// use std::array::IntoIter;
/// use std::mem::MaybeUninit;
///
- /// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
- /// # // otherwise it could leak. A fully-general version this would need a drop
+ /// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
+ /// # // otherwise it could leak. A fully-general version this would need a drop
/// # // guard to handle panics from the iterator, but this works for an example.
/// fn next_chunk<T: Copy, const N: usize>(
/// it: &mut impl Iterator<Item = T>,
@@ -211,7 +211,7 @@ impl<T, const N: usize> IntoIter<T, N> {
let initialized = 0..0;
// SAFETY: We're telling it that none of the elements are initialized,
- // which is trivially true. And ∀N: usize, 0 <= N.
+ // which is trivially true. And ∀N: usize, 0 <= N.
unsafe { Self::new_unchecked(buffer, initialized) }
}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index 94a1a1d32..2825e0bbb 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -69,7 +69,7 @@ where
/// if any element creation was unsuccessful.
///
/// The return type of this function depends on the return type of the closure.
-/// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+/// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N], E>`.
/// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
///
/// # Arguments
@@ -522,7 +522,7 @@ impl<T, const N: usize> [T; N] {
/// return an array the same size as `self` or the first error encountered.
///
/// The return type of this function depends on the return type of the closure.
- /// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+ /// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N], E>`.
/// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
///
/// # Examples
diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs
index fdd56cb4e..4a8302ee4 100644
--- a/library/core/src/borrow.rs
+++ b/library/core/src/borrow.rs
@@ -26,7 +26,7 @@
/// to be modified, it can additionally implement [`BorrowMut<T>`].
///
/// Further, when providing implementations for additional traits, it needs
-/// to be considered whether they should behave identical to those of the
+/// to be considered whether they should behave identically to those of the
/// underlying type as a consequence of acting as a representation of that
/// underlying type. Generic code typically uses `Borrow<T>` when it relies
/// on the identical behavior of these additional trait implementations.
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 47cce2aa3..129213fde 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -568,7 +568,7 @@ impl<T: Default> Cell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {}
impl<T> Cell<[T]> {
@@ -807,7 +807,8 @@ impl<T> RefCell<T> {
///
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently borrowed, or
+ /// if `self` and `other` point to the same `RefCell`.
///
/// # Examples
///
@@ -1193,7 +1194,7 @@ impl<T: Default> Default for RefCell<T> {
impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn eq(&self, other: &RefCell<T>) -> bool {
*self.borrow() == *other.borrow()
@@ -1207,7 +1208,7 @@ impl<T: ?Sized + Eq> Eq for RefCell<T> {}
impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn partial_cmp(&self, other: &RefCell<T>) -> Option<Ordering> {
self.borrow().partial_cmp(&*other.borrow())
@@ -1215,7 +1216,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn lt(&self, other: &RefCell<T>) -> bool {
*self.borrow() < *other.borrow()
@@ -1223,7 +1224,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn le(&self, other: &RefCell<T>) -> bool {
*self.borrow() <= *other.borrow()
@@ -1231,7 +1232,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn gt(&self, other: &RefCell<T>) -> bool {
*self.borrow() > *other.borrow()
@@ -1239,7 +1240,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn ge(&self, other: &RefCell<T>) -> bool {
*self.borrow() >= *other.borrow()
@@ -1250,7 +1251,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
impl<T: ?Sized + Ord> Ord for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn cmp(&self, other: &RefCell<T>) -> Ordering {
self.borrow().cmp(&*other.borrow())
@@ -1266,7 +1267,7 @@ impl<T> const From<T> for RefCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<RefCell<U>> for RefCell<T> {}
struct BorrowRef<'b> {
@@ -1492,7 +1493,7 @@ impl<'b, T: ?Sized> Ref<'b, T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Ref<'b, U>> for Ref<'b, T> {}
#[stable(feature = "std_guard_impls", since = "1.20.0")]
@@ -1738,7 +1739,7 @@ impl<T: ?Sized> DerefMut for RefMut<'_, T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<RefMut<'b, U>> for RefMut<'b, T> {}
#[stable(feature = "std_guard_impls", since = "1.20.0")]
@@ -1783,7 +1784,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// until the reference expires. As a special exception, given an `&T`, any part of it that is
/// inside an `UnsafeCell<_>` may be deallocated during the lifetime of the reference, after the
/// last time the reference is used (dereferenced or reborrowed). Since you cannot deallocate a part
-/// of what a reference points to, this means the memory an `&T` points to can be deallocted only if
+/// of what a reference points to, this means the memory an `&T` points to can be deallocated only if
/// *every part of it* (including padding) is inside an `UnsafeCell`.
///
/// However, whenever a `&UnsafeCell<T>` is constructed or dereferenced, it must still point to
@@ -1993,7 +1994,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
- // #[repr(transparent)]. This exploits libstd's special status, there is
+ // #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
self as *const UnsafeCell<T> as *const T as *mut T
}
@@ -2051,7 +2052,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[rustc_const_stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
pub const fn raw_get(this: *const Self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
- // #[repr(transparent)]. This exploits libstd's special status, there is
+ // #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
this as *const T as *mut T
}
@@ -2074,7 +2075,7 @@ impl<T> const From<T> for UnsafeCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {}
/// [`UnsafeCell`], but [`Sync`].
@@ -2164,7 +2165,7 @@ impl<T> const From<T> for SyncUnsafeCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
//#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<SyncUnsafeCell<U>> for SyncUnsafeCell<T> {}
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
index b355d94ce..65d12c25c 100644
--- a/library/core/src/cell/lazy.rs
+++ b/library/core/src/cell/lazy.rs
@@ -35,7 +35,7 @@ pub struct LazyCell<T, F = fn() -> T> {
init: Cell<Option<F>>,
}
-impl<T, F> LazyCell<T, F> {
+impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// Creates a new lazy value with the given initializing function.
///
/// # Examples
@@ -51,13 +51,12 @@ impl<T, F> LazyCell<T, F> {
///
/// assert_eq!(&*lazy, "HELLO, WORLD!");
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub const fn new(init: F) -> LazyCell<T, F> {
LazyCell { cell: OnceCell::new(), init: Cell::new(Some(init)) }
}
-}
-impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// Forces the evaluation of this lazy value and returns a reference to
/// the result.
///
@@ -75,6 +74,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// assert_eq!(LazyCell::force(&lazy), &92);
/// assert_eq!(&*lazy, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn force(this: &LazyCell<T, F>) -> &T {
this.cell.get_or_init(|| match this.init.take() {
@@ -87,6 +87,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
type Target = T;
+ #[inline]
fn deref(&self) -> &T {
LazyCell::force(self)
}
@@ -95,6 +96,7 @@ impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Default> Default for LazyCell<T> {
/// Creates a new lazy value using `Default` as the initializing function.
+ #[inline]
fn default() -> LazyCell<T> {
LazyCell::new(T::default)
}
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 8c01643c7..7757068a4 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -37,8 +37,9 @@ pub struct OnceCell<T> {
impl<T> OnceCell<T> {
/// Creates a new empty cell.
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[inline]
#[must_use]
+ #[unstable(feature = "once_cell", issue = "74465")]
pub const fn new() -> OnceCell<T> {
OnceCell { inner: UnsafeCell::new(None) }
}
@@ -46,6 +47,7 @@ impl<T> OnceCell<T> {
/// Gets the reference to the underlying value.
///
/// Returns `None` if the cell is empty.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get(&self) -> Option<&T> {
// SAFETY: Safe due to `inner`'s invariant
@@ -55,6 +57,7 @@ impl<T> OnceCell<T> {
/// Gets the mutable reference to the underlying value.
///
/// Returns `None` if the cell is empty.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_mut(&mut self) -> Option<&mut T> {
self.inner.get_mut().as_mut()
@@ -82,6 +85,7 @@ impl<T> OnceCell<T> {
///
/// assert!(cell.get().is_some());
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn set(&self, value: T) -> Result<(), T> {
// SAFETY: Safe because we cannot have overlapping mutable borrows
@@ -123,6 +127,7 @@ impl<T> OnceCell<T> {
/// let value = cell.get_or_init(|| unreachable!());
/// assert_eq!(value, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_or_init<F>(&self, f: F) -> &T
where
@@ -205,6 +210,7 @@ impl<T> OnceCell<T> {
/// cell.set("hello".to_string()).unwrap();
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn into_inner(self) -> Option<T> {
// Because `into_inner` takes `self` by value, the compiler statically verifies
@@ -233,6 +239,7 @@ impl<T> OnceCell<T> {
/// assert_eq!(cell.take(), Some("hello".to_string()));
/// assert_eq!(cell.get(), None);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn take(&mut self) -> Option<T> {
mem::take(self).into_inner()
@@ -241,6 +248,7 @@ impl<T> OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T> Default for OnceCell<T> {
+ #[inline]
fn default() -> Self {
Self::new()
}
@@ -258,6 +266,7 @@ impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Clone> Clone for OnceCell<T> {
+ #[inline]
fn clone(&self) -> OnceCell<T> {
let res = OnceCell::new();
if let Some(value) = self.get() {
@@ -272,6 +281,7 @@ impl<T: Clone> Clone for OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: PartialEq> PartialEq for OnceCell<T> {
+ #[inline]
fn eq(&self, other: &Self) -> bool {
self.get() == other.get()
}
@@ -283,6 +293,7 @@ impl<T: Eq> Eq for OnceCell<T> {}
#[unstable(feature = "once_cell", issue = "74465")]
impl<T> const From<T> for OnceCell<T> {
/// Creates a new `OnceCell<T>` which already contains the given `value`.
+ #[inline]
fn from(value: T) -> Self {
OnceCell { inner: UnsafeCell::new(Some(value)) }
}
diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs
index 11f1c30f6..eeb088030 100644
--- a/library/core/src/char/decode.rs
+++ b/library/core/src/char/decode.rs
@@ -67,7 +67,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
}
// all ok, so lets decode it.
- let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
+ let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000;
// SAFETY: we checked that it's a legal unicode value
Some(Ok(unsafe { from_u32_unchecked(c) }))
}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 949896e57..a7d6fec7d 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -24,8 +24,6 @@
use crate::const_closure::ConstFnMutClosure;
use crate::marker::Destruct;
-#[cfg(bootstrap)]
-use crate::marker::StructuralPartialEq;
use self::Ordering::*;
@@ -333,7 +331,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> {
/// assert_eq!(Ordering::Greater, result);
/// ```
#[derive(Clone, Copy, Eq, Debug, Hash)]
-#[cfg_attr(not(bootstrap), derive_const(PartialOrd, Ord, PartialEq))]
+#[derive_const(PartialOrd, Ord, PartialEq)]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
@@ -800,9 +798,12 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- // HACK(fee1-dead): go back to using `self.max_by(other, Ord::cmp)`
- // when trait methods are allowed to be used when a const closure is
- // expected.
+ #[cfg(not(bootstrap))]
+ {
+ max_by(self, other, Ord::cmp)
+ }
+
+ #[cfg(bootstrap)]
match self.cmp(&other) {
Ordering::Less | Ordering::Equal => other,
Ordering::Greater => self,
@@ -827,9 +828,12 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- // HACK(fee1-dead): go back to using `self.min_by(other, Ord::cmp)`
- // when trait methods are allowed to be used when a const closure is
- // expected.
+ #[cfg(not(bootstrap))]
+ {
+ min_by(self, other, Ord::cmp)
+ }
+
+ #[cfg(bootstrap)]
match self.cmp(&other) {
Ordering::Less | Ordering::Equal => self,
Ordering::Greater => other,
@@ -879,40 +883,6 @@ pub macro Ord($item:item) {
/* compiler built-in */
}
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg(bootstrap)]
-impl StructuralPartialEq for Ordering {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const PartialEq for Ordering {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- (*self as i32).eq(&(*other as i32))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const Ord for Ordering {
- #[inline]
- fn cmp(&self, other: &Ordering) -> Ordering {
- (*self as i32).cmp(&(*other as i32))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const PartialOrd for Ordering {
- #[inline]
- fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
- (*self as i32).partial_cmp(&(*other as i32))
- }
-}
-
/// Trait for types that form a [partial order](https://en.wikipedia.org/wiki/Partial_order).
///
/// The `lt`, `le`, `gt`, and `ge` methods of this trait can be called using
@@ -1264,17 +1234,23 @@ where
F: ~const Destruct,
K: ~const Destruct,
{
- const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
- f: &mut F,
- (v1, v2): (&T, &T),
- ) -> Ordering
- where
- T: ~const Destruct,
- K: ~const Destruct,
- {
- f(v1).cmp(&f(v2))
+ cfg_if! {
+ if #[cfg(bootstrap)] {
+ const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
+ f: &mut F,
+ (v1, v2): (&T, &T),
+ ) -> Ordering
+ where
+ T: ~const Destruct,
+ K: ~const Destruct,
+ {
+ f(v1).cmp(&f(v2))
+ }
+ min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
+ } else {
+ min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2)))
+ }
}
- min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
}
/// Compares and returns the maximum of two values.
diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs
index 151c8e6d8..97900a486 100644
--- a/library/core/src/const_closure.rs
+++ b/library/core/src/const_closure.rs
@@ -1,5 +1,4 @@
use crate::marker::Destruct;
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
/// Struct representing a closure with mutably borrowed data.
@@ -46,33 +45,6 @@ impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData,
macro_rules! impl_fn_mut_tuple {
($($var:ident)*) => {
- #[cfg(bootstrap)]
- #[allow(unused_parens)]
- impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
- FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
- where
- Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct,
- {
- type Output = ClosureReturnValue;
-
- extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output {
- self.call_mut(args)
- }
- }
- #[cfg(bootstrap)]
- #[allow(unused_parens)]
- impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
- FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
- where
- Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue,
- {
- extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output {
- #[allow(non_snake_case)]
- let ($($var),*) = &mut self.data;
- (self.func)(($($var),*), args)
- }
- }
- #[cfg(not(bootstrap))]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -85,7 +57,6 @@ macro_rules! impl_fn_mut_tuple {
self.call_mut(args)
}
}
- #[cfg(not(bootstrap))]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 9c0d7e9a1..4da7c3234 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -168,6 +168,26 @@ impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0"
// Float -> Float
impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+// bool -> Float
+#[stable(feature = "float_from_bool", since = "1.68.0")]
+#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+impl const From<bool> for f32 {
+ /// Converts `bool` to `f32` losslessly.
+ #[inline]
+ fn from(small: bool) -> Self {
+ small as u8 as Self
+ }
+}
+#[stable(feature = "float_from_bool", since = "1.68.0")]
+#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+impl const From<bool> for f64 {
+ /// Converts `bool` to `f64` losslessly.
+ #[inline]
+ fn from(small: bool) -> Self {
+ small as u8 as Self
+ }
+}
+
// no possible bounds violation
macro_rules! try_from_unbounded {
($source:ty, $($target:ty),*) => {$(
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index ec1eaa99f..76daceecd 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -227,7 +227,12 @@ impl fmt::Debug for c_void {
/// Basic implementation of a `va_list`.
// The name is WIP, using `VaListImpl` for now.
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -251,7 +256,12 @@ pub struct VaListImpl<'f> {
}
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -319,6 +329,25 @@ pub struct VaListImpl<'f> {
_marker: PhantomData<&'f mut &'f c_void>,
}
+/// s390x ABI implementation of a `va_list`.
+#[cfg(target_arch = "s390x")]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gpr: i64,
+ fpr: i64,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
/// x86_64 ABI implementation of a `va_list`.
#[cfg(all(target_arch = "x86_64", not(target_os = "uefi"), not(windows)))]
#[repr(C)]
@@ -352,6 +381,7 @@ pub struct VaList<'a, 'f: 'a> {
all(
not(target_arch = "aarch64"),
not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
@@ -363,7 +393,12 @@ pub struct VaList<'a, 'f: 'a> {
inner: VaListImpl<'f>,
#[cfg(all(
- any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(
+ target_arch = "aarch64",
+ target_arch = "powerpc",
+ target_arch = "s390x",
+ target_arch = "x86_64"
+ ),
any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
@@ -376,7 +411,12 @@ pub struct VaList<'a, 'f: 'a> {
}
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -398,7 +438,12 @@ impl<'f> VaListImpl<'f> {
}
#[cfg(all(
- any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(
+ target_arch = "aarch64",
+ target_arch = "powerpc",
+ target_arch = "s390x",
+ target_arch = "x86_64"
+ ),
any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 48b617743..fa5073e33 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -174,6 +174,11 @@ pub trait Write {
/// This method should generally not be invoked manually, but rather through
/// the [`write!`] macro itself.
///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error. Please see
+ /// [write_str](Write::write_str) for details.
+ ///
/// # Examples
///
/// ```
@@ -405,7 +410,7 @@ impl<'a> Arguments<'a> {
/// 1. The `pieces` slice must be at least as long as `fmt`.
/// 2. Every [`rt::v1::Argument::position`] value within `fmt` must be a
/// valid index of `args`.
- /// 3. Every [`Count::Param`] within `fmt` must contain a valid index of
+ /// 3. Every [`rt::v1::Count::Param`] within `fmt` must contain a valid index of
/// `args`.
#[doc(hidden)]
#[inline]
@@ -558,7 +563,7 @@ impl Display for Arguments<'_> {
///
/// Derived `Debug` formats are not stable, and so may change with future Rust
/// versions. Additionally, `Debug` implementations of types provided by the
-/// standard library (`libstd`, `libcore`, `liballoc`, etc.) are not stable, and
+/// standard library (`std`, `core`, `alloc`, etc.) are not stable, and
/// may also change with future Rust versions.
///
/// # Examples
@@ -2471,8 +2476,8 @@ impl Display for char {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Pointer for *const T {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- // Cast is needed here because `.addr()` requires `T: Sized`.
- pointer_fmt_inner((*self as *const ()).addr(), f)
+ // Cast is needed here because `.expose_addr()` requires `T: Sized`.
+ pointer_fmt_inner((*self as *const ()).expose_addr(), f)
}
}
diff --git a/library/core/src/future/future.rs b/library/core/src/future/future.rs
index f29d3e1e9..8c7111cb3 100644
--- a/library/core/src/future/future.rs
+++ b/library/core/src/future/future.rs
@@ -37,6 +37,7 @@ use crate::task::{Context, Poll};
pub trait Future {
/// The type of value produced on completion.
#[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_diagnostic_item = "FutureOutput"]
type Output;
/// Attempt to resolve the future to a final value, registering
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index f2b961d62..c4fb36209 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -44,7 +44,7 @@ pub use poll_fn::{poll_fn, PollFn};
/// non-Send/Sync as well, and we don't want that.
///
/// It also simplifies the HIR lowering of `.await`.
-#[cfg_attr(not(bootstrap), lang = "ResumeTy")]
+#[lang = "ResumeTy"]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[derive(Debug, Copy, Clone)]
@@ -61,7 +61,6 @@ unsafe impl Sync for ResumeTy {}
/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
// This is `const` to avoid extra errors after we recover from `const async fn`
-#[cfg_attr(bootstrap, lang = "from_generator")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
@@ -113,10 +112,14 @@ pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
unsafe { &mut *cx.0.as_ptr().cast() }
}
-#[cfg_attr(not(bootstrap), lang = "identity_future")]
+// FIXME(swatinem): This fn is currently needed to work around shortcomings
+// in type and lifetime inference.
+// See the comment at the bottom of `LoweringContext::make_async_expr` and
+// <https://github.com/rust-lang/rust/issues/104826>.
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[inline]
+#[lang = "identity_future"]
pub const fn identity_future<O, Fut: Future<Output = O>>(f: Fut) -> Fut {
f
}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index c755afa39..71a0d1825 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -199,7 +199,7 @@ pub trait Hash {
/// println!("Hash is {:x}!", hasher.finish());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- fn hash<H: Hasher>(&self, state: &mut H);
+ fn hash<H: ~const Hasher>(&self, state: &mut H);
/// Feeds a slice of this type into the given [`Hasher`].
///
@@ -980,7 +980,7 @@ mod impls {
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
impl<T: ?Sized + ~const Hash> const Hash for &mut T {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index e8d724ab1..5a76e8669 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -219,6 +219,75 @@ pub fn spin_loop() {
/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
///
/// [`std::convert::identity`]: crate::convert::identity
+///
+/// # When is this useful?
+///
+/// First and foremost: `black_box` does _not_ guarantee any exact behavior and, in some cases, may
+/// do nothing at all. As such, it **must not be relied upon to control critical program behavior.**
+/// This _immediately_ precludes any direct use of this function for cryptographic or security
+/// purposes.
+///
+/// While not suitable in those mission-critical cases, `back_box`'s functionality can generally be
+/// relied upon for benchmarking, and should be used there. It will try to ensure that the
+/// compiler doesn't optimize away part of the intended test code based on context. For
+/// example:
+///
+/// ```
+/// fn contains(haystack: &[&str], needle: &str) -> bool {
+/// haystack.iter().any(|x| x == &needle)
+/// }
+///
+/// pub fn benchmark() {
+/// let haystack = vec!["abc", "def", "ghi", "jkl", "mno"];
+/// let needle = "ghi";
+/// for _ in 0..10 {
+/// contains(&haystack, needle);
+/// }
+/// }
+/// ```
+///
+/// The compiler could theoretically make optimizations like the following:
+///
+/// - `needle` and `haystack` are always the same, move the call to `contains` outside the loop and
+/// delete the loop
+/// - Inline `contains`
+/// - `needle` and `haystack` have values known at compile time, `contains` is always true. Remove
+/// the call and replace with `true`
+/// - Nothing is done with the result of `contains`: delete this function call entirely
+/// - `benchmark` now has no purpose: delete this function
+///
+/// It is not likely that all of the above happens, but the compiler is definitely able to make some
+/// optimizations that could result in a very inaccurate benchmark. This is where `black_box` comes
+/// in:
+///
+/// ```
+/// use std::hint::black_box;
+///
+/// // Same `contains` function
+/// fn contains(haystack: &[&str], needle: &str) -> bool {
+/// haystack.iter().any(|x| x == &needle)
+/// }
+///
+/// pub fn benchmark() {
+/// let haystack = vec!["abc", "def", "ghi", "jkl", "mno"];
+/// let needle = "ghi";
+/// for _ in 0..10 {
+/// // Adjust our benchmark loop contents
+/// black_box(contains(black_box(&haystack), black_box(needle)));
+/// }
+/// }
+/// ```
+///
+/// This essentially tells the compiler to block optimizations across any calls to `black_box`. So,
+/// it now:
+///
+/// - Treats both arguments to `contains` as unpredictable: the body of `contains` can no longer be
+/// optimized based on argument values
+/// - Treats the call to `contains` and its result as volatile: the body of `benchmark` cannot
+/// optimize this away
+///
+/// This makes our benchmark much more realistic to how the function would be used in situ, where
+/// arguments are usually not known at compile time and the result is used in some way.
#[inline]
#[stable(feature = "bench_black_box", since = "1.66.0")]
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 7ed7d767f..a315a28fb 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -55,7 +55,6 @@
#![allow(missing_docs)]
use crate::marker::DiscriminantKind;
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
use crate::mem;
@@ -959,13 +958,13 @@ extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
pub fn assert_zero_valid<T>();
- /// A guard for unsafe functions that cannot ever be executed if `T` has invalid
- /// bit patterns: This will statically either panic, or do nothing.
+ /// A guard for `std::mem::uninitialized`. This will statically either panic, or do nothing.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
#[rustc_safe_intrinsic]
- pub fn assert_uninit_valid<T>();
+ #[cfg(not(bootstrap))]
+ pub fn assert_mem_uninitialized_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
///
@@ -2175,66 +2174,6 @@ extern "rust-intrinsic" {
/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
/// which violates the principle that a `const fn` must behave the same at
/// compile-time and at run-time. The unsafe code in crate B is fine.
- #[cfg(bootstrap)]
- #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
- pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
- where
- G: FnOnce<ARG, Output = RET>,
- F: FnOnce<ARG, Output = RET>;
-
- /// Selects which function to call depending on the context.
- ///
- /// If this function is evaluated at compile-time, then a call to this
- /// intrinsic will be replaced with a call to `called_in_const`. It gets
- /// replaced with a call to `called_at_rt` otherwise.
- ///
- /// # Type Requirements
- ///
- /// The two functions must be both function items. They cannot be function
- /// pointers or closures. The first function must be a `const fn`.
- ///
- /// `arg` will be the tupled arguments that will be passed to either one of
- /// the two functions, therefore, both functions must accept the same type of
- /// arguments. Both functions must return RET.
- ///
- /// # Safety
- ///
- /// The two functions must behave observably equivalent. Safe code in other
- /// crates may assume that calling a `const fn` at compile-time and at run-time
- /// produces the same result. A function that produces a different result when
- /// evaluated at run-time, or has any other observable side-effects, is
- /// *unsound*.
- ///
- /// Here is an example of how this could cause a problem:
- /// ```no_run
- /// #![feature(const_eval_select)]
- /// #![feature(core_intrinsics)]
- /// use std::hint::unreachable_unchecked;
- /// use std::intrinsics::const_eval_select;
- ///
- /// // Crate A
- /// pub const fn inconsistent() -> i32 {
- /// fn runtime() -> i32 { 1 }
- /// const fn compiletime() -> i32 { 2 }
- ///
- /// unsafe {
- // // ⚠ This code violates the required equivalence of `compiletime`
- /// // and `runtime`.
- /// const_eval_select((), compiletime, runtime)
- /// }
- /// }
- ///
- /// // Crate B
- /// const X: i32 = inconsistent();
- /// let x = inconsistent();
- /// if x != X { unsafe { unreachable_unchecked(); }}
- /// ```
- ///
- /// This code causes Undefined Behavior when being run, since the
- /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
- /// which violates the principle that a `const fn` must behave the same at
- /// compile-time and at run-time. The unsafe code in crate B is fine.
- #[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
pub fn const_eval_select<ARG: Tuple, F, G, RET>(
arg: ARG,
@@ -2281,7 +2220,7 @@ macro_rules! assert_unsafe_precondition {
fn runtime$(<$($tt)*>)?($($i:$ty),*) {
if !$e {
// don't unwind to reduce impact on code size
- ::core::panicking::panic_str_nounwind(
+ ::core::panicking::panic_nounwind(
concat!("unsafe precondition(s) violated: ", $name)
);
}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index 8ba1c1228..e3157b669 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -21,15 +21,14 @@
//! #[custom_mir(dialect = "built")]
//! pub fn simple(x: i32) -> i32 {
//! mir!(
-//! let temp1: i32;
-//! let temp2: _;
+//! let temp2: i32;
//!
//! {
-//! temp1 = x;
-//! Goto(exit)
+//! let temp1 = x;
+//! Goto(my_second_block)
//! }
//!
-//! exit = {
+//! my_second_block = {
//! temp2 = Move(temp1);
//! RET = temp2;
//! Return()
@@ -38,22 +37,200 @@
//! }
//! ```
//!
-//! Hopefully most of this is fairly self-explanatory. Expanding on some notable details:
+//! The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
+//! attribute only works on functions - there is no way to insert custom MIR into the middle of
+//! another function. The `dialect` and `phase` parameters indicate which [version of MIR][dialect
+//! docs] you are inserting here. Generally you'll want to use `#![custom_mir(dialect = "built")]`
+//! if you want your MIR to be modified by the full MIR pipeline, or `#![custom_mir(dialect =
+//! "runtime", phase = "optimized")] if you don't.
//!
-//! - The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
-//! attribute only works on functions - there is no way to insert custom MIR into the middle of
-//! another function.
-//! - The `dialect` and `phase` parameters indicate which version of MIR you are inserting here.
-//! This will normally be the phase that corresponds to the thing you are trying to test. The
-//! phase can be omitted for dialects that have just one.
-//! - You should define your function signature like you normally would. Externally, this function
-//! can be called like any other function.
-//! - Type inference works - you don't have to spell out the type of all of your locals.
+//! [dialect docs]:
+//! https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/enum.MirPhase.html
//!
-//! For now, all statements and terminators are parsed from nested invocations of the special
-//! functions provided in this module. We additionally want to (but do not yet) support more
-//! "normal" Rust syntax in places where it makes sense. Also, most kinds of instructions are not
-//! supported yet.
+//! The input to the [`mir!`] macro is:
+//!
+//! - A possibly empty list of local declarations. Locals can also be declared inline on
+//! assignments via `let`. Type inference generally works. Shadowing does not.
+//! - A list of basic blocks. The first of these is the start block and is where execution begins.
+//! All blocks other than the start block need to be given a name, so that they can be referred
+//! to later.
+//! - Each block is a list of semicolon terminated statements, followed by a terminator. The
+//! syntax for the various statements and terminators is designed to be as similar as possible
+//! to the syntax for analogous concepts in native Rust. See below for a list.
+//!
+//! # Examples
+//!
+#![cfg_attr(bootstrap, doc = "```rust,compile_fail")]
+#![cfg_attr(not(bootstrap), doc = "```rust")]
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! pub fn choose_load(a: &i32, b: &i32, c: bool) -> i32 {
+//! mir!(
+//! {
+//! match c {
+//! true => t,
+//! _ => f,
+//! }
+//! }
+//!
+//! t = {
+//! let temp = a;
+//! Goto(load_and_exit)
+//! }
+//!
+//! f = {
+//! temp = b;
+//! Goto(load_and_exit)
+//! }
+//!
+//! load_and_exit = {
+//! RET = *temp;
+//! Return()
+//! }
+//! )
+//! }
+//!
+//! #[custom_mir(dialect = "built")]
+//! fn unwrap_unchecked<T>(opt: Option<T>) -> T {
+//! mir!({
+//! RET = Move(Field(Variant(opt, 1), 0));
+//! Return()
+//! })
+//! }
+//!
+//! #[custom_mir(dialect = "runtime", phase = "optimized")]
+//! fn push_and_pop<T>(v: &mut Vec<T>, value: T) {
+//! mir!(
+//! let unused;
+//! let popped;
+//!
+//! {
+//! Call(unused, pop, Vec::push(v, value))
+//! }
+//!
+//! pop = {
+//! Call(popped, drop, Vec::pop(v))
+//! }
+//!
+//! drop = {
+//! Drop(popped, ret)
+//! }
+//!
+//! ret = {
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! We can also set off compilation failures that happen in sufficiently late stages of the
+//! compiler:
+//!
+//! ```rust,compile_fail
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! fn borrow_error(should_init: bool) -> i32 {
+//! mir!(
+//! let temp: i32;
+//!
+//! {
+//! match should_init {
+//! true => init,
+//! _ => use_temp,
+//! }
+//! }
+//!
+//! init = {
+//! temp = 0;
+//! Goto(use_temp)
+//! }
+//!
+//! use_temp = {
+//! RET = temp;
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! ```text
+//! error[E0381]: used binding is possibly-uninitialized
+//! --> test.rs:24:13
+//! |
+//! 8 | / mir!(
+//! 9 | | let temp: i32;
+//! 10 | |
+//! 11 | | {
+//! ... |
+//! 19 | | temp = 0;
+//! | | -------- binding initialized here in some conditions
+//! ... |
+//! 24 | | RET = temp;
+//! | | ^^^^^^^^^^ value used here but it is possibly-uninitialized
+//! 25 | | Return()
+//! 26 | | }
+//! 27 | | )
+//! | |_____- binding declared here but left uninitialized
+//!
+//! error: aborting due to previous error
+//!
+//! For more information about this error, try `rustc --explain E0381`.
+//! ```
+//!
+//! # Syntax
+//!
+//! The lists below are an exhaustive description of how various MIR constructs can be created.
+//! Anything missing from the list should be assumed to not be supported, PRs welcome.
+//!
+//! #### Locals
+//!
+//! - The `_0` return local can always be accessed via `RET`.
+//! - Arguments can be accessed via their regular name.
+//! - All other locals need to be declared with `let` somewhere and then can be accessed by name.
+//!
+//! #### Places
+//! - Locals implicit convert to places.
+//! - Field accesses, derefs, and indexing work normally.
+//! - Fields in variants can be accessed via the [`Variant`] and [`Field`] associated functions,
+//! see their documentation for details.
+//!
+//! #### Operands
+//! - Places implicitly convert to `Copy` operands.
+//! - `Move` operands can be created via [`Move`].
+//! - Const blocks, literals, named constants, and const params all just work.
+//! - [`Static`] and [`StaticMut`] can be used to create `&T` and `*mut T`s to statics. These are
+//! constants in MIR and the only way to access statics.
+//!
+//! #### Statements
+//! - Assign statements work via normal Rust assignment.
+//! - [`Retag`] statements have an associated function.
+//!
+//! #### Rvalues
+//!
+//! - Operands implicitly convert to `Use` rvalues.
+//! - `&`, `&mut`, `addr_of!`, and `addr_of_mut!` all work to create their associated rvalue.
+//! - [`Discriminant`] has an associated function.
+//!
+//! #### Terminators
+//!
+//! Custom MIR does not currently support cleanup blocks or non-trivial unwind paths. As such, there
+//! are no resume and abort terminators, and terminators that might unwind do not have any way to
+//! indicate the unwind block.
+//!
+//! - [`Goto`], [`Return`], [`Unreachable`], [`Drop`](Drop()), and [`DropAndReplace`] have associated functions.
+//! - `match some_int_operand` becomes a `SwitchInt`. Each arm should be `literal => basic_block`
+//! - The exception is the last arm, which must be `_ => basic_block` and corresponds to the
+//! otherwise branch.
+//! - [`Call`] has an associated function as well. The third argument of this function is a normal
+//! function call expresion, for example `my_other_function(a, 5)`.
//!
#![unstable(
@@ -69,21 +246,93 @@
pub struct BasicBlock;
macro_rules! define {
- ($name:literal, $($sig:tt)*) => {
+ ($name:literal, $( #[ $meta:meta ] )* fn $($sig:tt)*) => {
#[rustc_diagnostic_item = $name]
- pub $($sig)* { panic!() }
+ $( #[ $meta ] )*
+ pub fn $($sig)* { panic!() }
}
}
define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
+define!("mir_unreachable", fn Unreachable() -> BasicBlock);
+define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
+define!("mir_drop_and_replace", fn DropAndReplace<T>(place: T, value: T, goto: BasicBlock));
+define!("mir_call", fn Call<T>(place: T, goto: BasicBlock, call: T));
+define!("mir_storage_live", fn StorageLive<T>(local: T));
+define!("mir_storage_dead", fn StorageDead<T>(local: T));
define!("mir_retag", fn Retag<T>(place: T));
-define!("mir_retag_raw", fn RetagRaw<T>(place: T));
define!("mir_move", fn Move<T>(place: T) -> T);
define!("mir_static", fn Static<T>(s: T) -> &'static T);
define!("mir_static_mut", fn StaticMut<T>(s: T) -> *mut T);
+define!(
+ "mir_discriminant",
+ /// Gets the discriminant of a place.
+ fn Discriminant<T>(place: T) -> <T as ::core::marker::DiscriminantKind>::Discriminant
+);
+define!("mir_set_discriminant", fn SetDiscriminant<T>(place: T, index: u32));
+define!(
+ "mir_field",
+ /// Access the field with the given index of some place.
+ ///
+ /// This only makes sense to use in conjunction with [`Variant`]. If the type you are looking to
+ /// access the field of does not have variants, you can use normal field projection syntax.
+ ///
+ /// There is no proper way to do a place projection to a variant in Rust, and so these two
+ /// functions are a workaround. You can access a field of a variant via `Field(Variant(place,
+ /// var_idx), field_idx)`, where `var_idx` and `field_idx` are appropriate literals. Some
+ /// caveats:
+ ///
+ /// - The return type of `Variant` is always `()`. Don't worry about that, the correct MIR will
+ /// still be generated.
+ /// - In some situations, the return type of `Field` cannot be inferred. You may need to
+ /// annotate it on the function in these cases.
+ /// - Since `Field` is a function call which is not a place expression, using this on the left
+ /// hand side of an expression is rejected by the compiler. [`place!`] is a macro provided to
+ /// work around that issue. Wrap the left hand side of an assignment in the macro to convince
+ /// the compiler that it's ok.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(bootstrap, doc = "```rust,compile_fail")]
+ #[cfg_attr(not(bootstrap), doc = "```rust")]
+ /// #![feature(custom_mir, core_intrinsics)]
+ ///
+ /// extern crate core;
+ /// use core::intrinsics::mir::*;
+ ///
+ /// #[custom_mir(dialect = "built")]
+ /// fn unwrap_deref(opt: Option<&i32>) -> i32 {
+ /// mir!({
+ /// RET = *Field::<&i32>(Variant(opt, 1), 0);
+ /// Return()
+ /// })
+ /// }
+ ///
+ /// #[custom_mir(dialect = "built")]
+ /// fn set(opt: &mut Option<i32>) {
+ /// mir!({
+ /// place!(Field(Variant(*opt, 1), 0)) = 5;
+ /// Return()
+ /// })
+ /// }
+ /// ```
+ fn Field<F>(place: (), field: u32) -> F
+);
+define!(
+ "mir_variant",
+ /// Adds a variant projection with the given index to the place.
+ ///
+ /// See [`Field`] for documentation.
+ fn Variant<T>(place: T, index: u32) -> ()
+);
+define!(
+ "mir_make_place",
+ #[doc(hidden)]
+ fn __internal_make_place<T>(place: T) -> *mut T
+);
-/// Convenience macro for generating custom MIR.
+/// Macro for generating custom MIR.
///
/// See the module documentation for syntax details. This macro is not magic - it only transforms
/// your MIR into something that is easier to parse in the compiler.
@@ -139,6 +388,13 @@ pub macro mir {
}}
}
+/// Helper macro that allows you to treat a value expression like a place expression.
+///
+/// See the documentation on [`Variant`] for why this is necessary and how to use it.
+pub macro place($e:expr) {
+ (*::core::intrinsics::mir::__internal_make_place($e))
+}
+
/// Helper macro that extracts the `let` declarations out of a bunch of statements.
///
/// This macro is written using the "statement muncher" strategy. Each invocation parses the first
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index ac7b389b1..b5739f2f3 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -756,7 +756,7 @@ impl<A: Step> Iterator for ops::Range<A> {
where
Self: TrustedRandomAccessNoCoerce,
{
- // SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
+ // SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
// Additionally Self: TrustedRandomAccess is only implemented for Copy types
// which means even repeated reads of the same index would be safe.
diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs
index 98734c527..617dfd123 100644
--- a/library/core/src/iter/sources/empty.rs
+++ b/library/core/src/iter/sources/empty.rs
@@ -22,17 +22,12 @@ pub const fn empty<T>() -> Empty<T> {
Empty(marker::PhantomData)
}
-// Newtype for use in `PhantomData` to avoid
-// > error: const-stable function cannot use `#[feature(const_fn_fn_ptr_basics)]`
-// in `const fn empty<T>()` above.
-struct FnReturning<T>(fn() -> T);
-
/// An iterator that yields nothing.
///
/// This `struct` is created by the [`empty()`] function. See its documentation for more.
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "iter_empty", since = "1.2.0")]
-pub struct Empty<T>(marker::PhantomData<FnReturning<T>>);
+pub struct Empty<T>(marker::PhantomData<fn() -> T>);
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T> fmt::Debug for Empty<T> {
diff --git a/library/core/src/iter/sources/from_generator.rs b/library/core/src/iter/sources/from_generator.rs
index 8e7cbd34a..4cbe731b2 100644
--- a/library/core/src/iter/sources/from_generator.rs
+++ b/library/core/src/iter/sources/from_generator.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::ops::{Generator, GeneratorState};
use crate::pin::Pin;
@@ -23,14 +24,21 @@ use crate::pin::Pin;
/// ```
#[inline]
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-pub fn from_generator<G: Generator<Return = ()> + Unpin>(
- generator: G,
-) -> impl Iterator<Item = G::Yield> {
+pub fn from_generator<G: Generator<Return = ()> + Unpin>(generator: G) -> FromGenerator<G> {
FromGenerator(generator)
}
-struct FromGenerator<G>(G);
+/// An iterator over the values yielded by an underlying generator.
+///
+/// This `struct` is created by the [`iter::from_generator()`] function. See its documentation for
+/// more.
+///
+/// [`iter::from_generator()`]: from_generator
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
+#[derive(Clone)]
+pub struct FromGenerator<G>(G);
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
type Item = G::Yield;
@@ -41,3 +49,10 @@ impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
}
}
}
+
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
+impl<G> fmt::Debug for FromGenerator<G> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FromGenerator").finish()
+ }
+}
diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs
index d79f85c25..9309a06c8 100644
--- a/library/core/src/iter/sources/once_with.rs
+++ b/library/core/src/iter/sources/once_with.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::iter::{FusedIterator, TrustedLen};
/// Creates an iterator that lazily generates a value exactly once by invoking
@@ -66,12 +67,23 @@ pub fn once_with<A, F: FnOnce() -> A>(gen: F) -> OnceWith<F> {
///
/// This `struct` is created by the [`once_with()`] function.
/// See its documentation for more.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
#[stable(feature = "iter_once_with", since = "1.43.0")]
pub struct OnceWith<F> {
gen: Option<F>,
}
+#[stable(feature = "iter_once_with_debug", since = "1.68.0")]
+impl<F> fmt::Debug for OnceWith<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.gen.is_some() {
+ f.write_str("OnceWith(Some(_))")
+ } else {
+ f.write_str("OnceWith(None)")
+ }
+ }
+}
+
#[stable(feature = "iter_once_with", since = "1.43.0")]
impl<A, F: FnOnce() -> A> Iterator for OnceWith<F> {
type Item = A;
diff --git a/library/core/src/iter/sources/repeat_n.rs b/library/core/src/iter/sources/repeat_n.rs
index fd8d25ce1..dc61d6065 100644
--- a/library/core/src/iter/sources/repeat_n.rs
+++ b/library/core/src/iter/sources/repeat_n.rs
@@ -126,7 +126,7 @@ impl<A: Clone> Iterator for RepeatN<A> {
// zero so it won't be dropped later, and thus it's okay to take it here.
unsafe { ManuallyDrop::take(&mut self.element) }
} else {
- A::clone(&mut self.element)
+ A::clone(&self.element)
})
}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
index ab2d0472b..3f34105a3 100644
--- a/library/core/src/iter/sources/repeat_with.rs
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::iter::{FusedIterator, TrustedLen};
use crate::ops::Try;
@@ -71,12 +72,19 @@ pub fn repeat_with<A, F: FnMut() -> A>(repeater: F) -> RepeatWith<F> {
///
/// This `struct` is created by the [`repeat_with()`] function.
/// See its documentation for more.
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone)]
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub struct RepeatWith<F> {
repeater: F,
}
+#[stable(feature = "iterator_repeat_with_debug", since = "1.68.0")]
+impl<F> fmt::Debug for RepeatWith<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RepeatWith").finish_non_exhaustive()
+ }
+}
+
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
type Item = A;
diff --git a/library/core/src/iter/traits/accum.rs b/library/core/src/iter/traits/accum.rs
index 84d83ee39..e31669b39 100644
--- a/library/core/src/iter/traits/accum.rs
+++ b/library/core/src/iter/traits/accum.rs
@@ -10,6 +10,10 @@ use crate::num::Wrapping;
/// [`sum()`]: Iterator::sum
/// [`FromIterator`]: iter::FromIterator
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+#[rustc_on_unimplemented(
+ message = "a value of type `{Self}` cannot be made by summing an iterator over elements of type `{A}`",
+ label = "value of type `{Self}` cannot be made by summing a `std::iter::Iterator<Item={A}>`"
+)]
pub trait Sum<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// "summing up" the items.
@@ -27,6 +31,10 @@ pub trait Sum<A = Self>: Sized {
/// [`product()`]: Iterator::product
/// [`FromIterator`]: iter::FromIterator
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+#[rustc_on_unimplemented(
+ message = "a value of type `{Self}` cannot be made by multiplying all elements of type `{A}` from an iterator",
+ label = "value of type `{Self}` cannot be made by multiplying all elements from a `std::iter::Iterator<Item={A}>`"
+)]
pub trait Product<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// multiplying the items.
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 83c7e8977..a4a665d48 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -58,6 +58,11 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
syntax `start..end` or the inclusive range syntax `start..=end`"
),
+ on(
+ _Self = "{float}",
+ note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
+ syntax `start..end` or the inclusive range syntax `start..=end`"
+ ),
label = "`{Self}` is not an iterator",
message = "`{Self}` is not an iterator"
)]
@@ -66,6 +71,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub trait Iterator {
/// The type of the elements being iterated over.
+ #[rustc_diagnostic_item = "IteratorItem"]
#[stable(feature = "rust1", since = "1.0.0")]
type Item;
@@ -803,7 +809,7 @@ pub trait Iterator {
/// (0..5).map(|x| x * 2 + 1)
/// .for_each(move |x| tx.send(x).unwrap());
///
- /// let v: Vec<_> = rx.iter().collect();
+ /// let v: Vec<_> = rx.iter().collect();
/// assert_eq!(v, vec![1, 3, 5, 7, 9]);
/// ```
///
@@ -1380,8 +1386,8 @@ pub trait Iterator {
Take::new(self, n)
}
- /// An iterator adapter similar to [`fold`] that holds internal state and
- /// produces a new iterator.
+ /// An iterator adapter which, like [`fold`], holds internal state, but
+ /// unlike [`fold`], produces a new iterator.
///
/// [`fold`]: Iterator::fold
///
@@ -1393,20 +1399,25 @@ pub trait Iterator {
///
/// On iteration, the closure will be applied to each element of the
/// iterator and the return value from the closure, an [`Option`], is
- /// yielded by the iterator.
+ /// returned by the `next` method. Thus the closure can return
+ /// `Some(value)` to yield `value`, or `None` to end the iteration.
///
/// # Examples
///
/// Basic usage:
///
/// ```
- /// let a = [1, 2, 3];
+ /// let a = [1, 2, 3, 4];
///
/// let mut iter = a.iter().scan(1, |state, &x| {
- /// // each iteration, we'll multiply the state by the element
+ /// // each iteration, we'll multiply the state by the element ...
/// *state = *state * x;
///
- /// // then, we'll yield the negation of the state
+ /// // ... and terminate if the state exceeds 6
+ /// if *state > 6 {
+ /// return None;
+ /// }
+ /// // ... else yield the negation of the state
/// Some(-*state)
/// });
///
@@ -1508,6 +1519,18 @@ pub trait Iterator {
/// assert_eq!(merged, "alphabetagamma");
/// ```
///
+ /// Flattening works on any `IntoIterator` type, including `Option` and `Result`:
+ ///
+ /// ```
+ /// let options = vec![Some(123), Some(321), None, Some(231)];
+ /// let flattened_options: Vec<_> = options.into_iter().flatten().collect();
+ /// assert_eq!(flattened_options, vec![123, 321, 231]);
+ ///
+ /// let results = vec![Ok(123), Ok(321), Err(456), Ok(231)];
+ /// let flattened_results: Vec<_> = results.into_iter().flatten().collect();
+ /// assert_eq!(flattened_results, vec![123, 321, 231]);
+ /// ```
+ ///
/// Flattening only removes one level of nesting at a time:
///
/// ```
@@ -1829,6 +1852,7 @@ pub trait Iterator {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "iterator_collect_fn")]
fn collect<B: FromIterator<Self::Item>>(self) -> B
where
Self: Sized,
@@ -2652,7 +2676,10 @@ pub trait Iterator {
/// argument is a double reference. You can see this effect in the
/// examples below, with `&&x`.
///
+ /// If you need the index of the element, see [`position()`].
+ ///
/// [`Some(element)`]: Some
+ /// [`position()`]: Iterator::position
///
/// # Examples
///
@@ -2733,7 +2760,7 @@ pub trait Iterator {
/// the first true result or the first error.
///
/// The return type of this method depends on the return type of the closure.
- /// If you return `Result<bool, E>` from the closure, you'll get a `Result<Option<Self::Item>; E>`.
+ /// If you return `Result<bool, E>` from the closure, you'll get a `Result<Option<Self::Item>, E>`.
/// If you return `Option<bool>` from the closure, you'll get an `Option<Option<Self::Item>>`.
///
/// # Examples
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 1823fd300..8790649ab 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -38,18 +38,18 @@
//! which do not trigger a panic can be assured that this function is never
//! called. The `lang` attribute is called `eh_personality`.
-// Since libcore defines many fundamental lang items, all tests live in a
-// separate crate, libcoretest, to avoid bizarre issues.
+// Since core defines many fundamental lang items, all tests live in a
+// separate crate, libcoretest (library/core/tests), to avoid bizarre issues.
//
// Here we explicitly #[cfg]-out this whole crate when testing. If we don't do
// this, both the generated test artifact and the linked libtest (which
-// transitively includes libcore) will both define the same set of lang items,
+// transitively includes core) will both define the same set of lang items,
// and this will cause the E0152 "found duplicate lang item" error. See
// discussion in #50466 for details.
//
// This cfg won't affect doc tests.
#![cfg(not(test))]
-// To run libcore tests without x.py without ending up with two copies of libcore, Miri needs to be
+// To run core tests without x.py without ending up with two copies of core, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -158,6 +158,7 @@
#![feature(const_unsafecell_get_mut)]
#![feature(const_waker)]
#![feature(core_panic)]
+#![feature(char_indices_offset)]
#![feature(duration_consts_float)]
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
@@ -166,6 +167,8 @@
#![feature(slice_ptr_get)]
#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
+#![feature(str_split_remainder)]
+#![feature(str_split_inclusive_remainder)]
#![feature(strict_provenance)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
@@ -188,13 +191,14 @@
#![feature(cfg_sanitize)]
#![feature(cfg_target_has_atomic)]
#![feature(cfg_target_has_atomic_equal_alignment)]
+#![cfg_attr(not(bootstrap), feature(const_closures))]
#![feature(const_fn_floating_point_arithmetic)]
#![feature(const_mut_refs)]
#![feature(const_precise_live_drops)]
#![feature(const_refs_to_cell)]
#![feature(decl_macro)]
#![feature(deprecated_suggestion)]
-#![cfg_attr(not(bootstrap), feature(derive_const))]
+#![feature(derive_const)]
#![feature(doc_cfg)]
#![feature(doc_notable_trait)]
#![feature(rustdoc_internals)]
@@ -236,7 +240,6 @@
#![feature(arm_target_feature)]
#![feature(avx512_target_feature)]
#![feature(cmpxchg16b_target_feature)]
-#![feature(f16c_target_feature)]
#![feature(hexagon_target_feature)]
#![feature(mips_target_feature)]
#![feature(powerpc_target_feature)]
@@ -245,6 +248,7 @@
#![feature(sse4a_target_feature)]
#![feature(tbm_target_feature)]
#![feature(wasm_target_feature)]
+#![cfg_attr(bootstrap, feature(f16c_target_feature))]
// allow using `core::` in intra-doc links
#[allow(unused_extern_crates)]
@@ -309,7 +313,7 @@ pub mod f64;
#[macro_use]
pub mod num;
-/* The libcore prelude, not as all-encompassing as the libstd prelude */
+/* The core prelude, not as all-encompassing as the std prelude */
pub mod prelude;
@@ -376,12 +380,12 @@ mod const_closure;
#[stable(feature = "core_primitive", since = "1.43.0")]
pub mod primitive;
-// Pull in the `core_arch` crate directly into libcore. The contents of
+// Pull in the `core_arch` crate directly into core. The contents of
// `core_arch` are in a different repository: rust-lang/stdarch.
//
-// `core_arch` depends on libcore, but the contents of this module are
+// `core_arch` depends on core, but the contents of this module are
// set up in such a way that directly pulling it here works such that the
-// crate uses the this crate as its libcore.
+// crate uses the this crate as its core.
#[path = "../../stdarch/crates/core_arch/src/mod.rs"]
#[allow(
missing_docs,
@@ -400,12 +404,12 @@ mod core_arch;
#[stable(feature = "simd_arch", since = "1.27.0")]
pub mod arch;
-// Pull in the `core_simd` crate directly into libcore. The contents of
+// Pull in the `core_simd` crate directly into core. The contents of
// `core_simd` are in a different repository: rust-lang/portable-simd.
//
-// `core_simd` depends on libcore, but the contents of this module are
+// `core_simd` depends on core, but the contents of this module are
// set up in such a way that directly pulling it here works such that the
-// crate uses this crate as its libcore.
+// crate uses this crate as its core.
#[path = "../../portable-simd/crates/core_simd/src/mod.rs"]
#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
#[allow(rustdoc::bare_urls)]
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index f29cd357d..3b026bc0e 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -1315,22 +1315,41 @@ pub(crate) mod builtin {
/// Parses a file as an expression or an item according to the context.
///
- /// The file is located relative to the current file (similarly to how
- /// modules are found). The provided path is interpreted in a platform-specific
- /// way at compile time. So, for instance, an invocation with a Windows path
- /// containing backslashes `\` would not compile correctly on Unix.
+ /// **Warning**: For multi-file Rust projects, the `include!` macro is probably not what you
+ /// are looking for. Usually, multi-file Rust projects use
+ /// [modules](https://doc.rust-lang.org/reference/items/modules.html). Multi-file projects and
+ /// modules are explained in the Rust-by-Example book
+ /// [here](https://doc.rust-lang.org/rust-by-example/mod/split.html) and the module system is
+ /// explained in the Rust Book
+ /// [here](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).
+ ///
+ /// The included file is placed in the surrounding code
+ /// [unhygienically](https://doc.rust-lang.org/reference/macros-by-example.html#hygiene). If
+ /// the included file is parsed as an expression and variables or functions share names across
+ /// both files, it could result in variables or functions being different from what the
+ /// included file expected.
+ ///
+ /// The included file is located relative to the current file (similarly to how modules are
+ /// found). The provided path is interpreted in a platform-specific way at compile time. So,
+ /// for instance, an invocation with a Windows path containing backslashes `\` would not
+ /// compile correctly on Unix.
///
- /// Using this macro is often a bad idea, because if the file is
- /// parsed as an expression, it is going to be placed in the
- /// surrounding code unhygienically. This could result in variables
- /// or functions being different from what the file expected if
- /// there are variables or functions that have the same name in
- /// the current file.
+ /// # Uses
+ ///
+ /// The `include!` macro is primarily used for two purposes. It is used to include
+ /// documentation that is written in a separate file and it is used to include [build artifacts
+ /// usually as a result from the `build.rs`
+ /// script](https://doc.rust-lang.org/cargo/reference/build-scripts.html#outputs-of-the-build-script).
+ ///
+ /// When using the `include` macro to include stretches of documentation, remember that the
+ /// included file still needs to be a valid rust syntax. It is also possible to
+ /// use the [`include_str`] macro as `#![doc = include_str!("...")]` (at the module level) or
+ /// `#[doc = include_str!("...")]` (at the item level) to include documentation from a plain
+ /// text or markdown file.
///
/// # Examples
///
- /// Assume there are two files in the same directory with the following
- /// contents:
+ /// Assume there are two files in the same directory with the following contents:
///
/// File 'monkeys.in':
///
@@ -1461,7 +1480,6 @@ pub(crate) mod builtin {
/// [the reference]: ../../../reference/attributes/derive.html
#[unstable(feature = "derive_const", issue = "none")]
#[rustc_builtin_macro]
- #[cfg(not(bootstrap))]
pub macro derive_const($item:item) {
/* compiler built-in */
}
@@ -1516,7 +1534,6 @@ pub(crate) mod builtin {
/// Attribute macro applied to a function to register it as a handler for allocation failure.
///
/// See also [`std::alloc::handle_alloc_error`](../../../std/alloc/fn.handle_alloc_error.html).
- #[cfg(not(bootstrap))]
#[unstable(feature = "alloc_error_handler", issue = "51540")]
#[allow_internal_unstable(rustc_attrs)]
#[rustc_builtin_macro]
@@ -1553,7 +1570,6 @@ pub(crate) mod builtin {
issue = "23416",
reason = "placeholder syntax for type ascription"
)]
- #[cfg(not(bootstrap))]
pub macro type_ascribe($expr:expr, $ty:ty) {
/* compiler built-in */
}
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 42c342801..1326fc9ab 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -96,7 +96,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Sized {
// Empty.
}
@@ -126,9 +126,9 @@ pub trait Sized {
/// [`Rc`]: ../../std/rc/struct.Rc.html
/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
/// [nomicon-coerce]: ../../nomicon/coercions.html
-#[unstable(feature = "unsize", issue = "27732")]
+#[unstable(feature = "unsize", issue = "18598")]
#[lang = "unsize"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -623,6 +623,12 @@ impl<T: ?Sized> !Sync for *mut T {}
/// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so
/// as not to indicate ownership.
///
+/// ## Layout
+///
+/// For all `T`, the following are guaranteed:
+/// * `size_of::<PhantomData<T>>() == 0`
+/// * `align_of::<PhantomData<T>>() == 1`
+///
/// [drop check]: ../../nomicon/dropck.html
#[lang = "phantom_data"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -695,7 +701,7 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -796,7 +802,7 @@ impl<T: ?Sized> Unpin for *mut T {}
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
#[const_trait]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Destruct {}
/// A marker for tuple types.
@@ -806,12 +812,12 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Tuple {}
/// A marker for things
#[unstable(feature = "pointer_sized_trait", issue = "none")]
-#[cfg_attr(not(bootstrap), lang = "pointer_sized")]
+#[lang = "pointer_sized"]
#[rustc_on_unimplemented(
message = "`{Self}` needs to be a pointer-sized type",
label = "`{Self}` needs to be a pointer-sized type"
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 383bdc7b6..5e01ccc07 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -682,7 +682,8 @@ pub unsafe fn zeroed<T>() -> T {
pub unsafe fn uninitialized<T>() -> T {
// SAFETY: the caller must guarantee that an uninitialized value is valid for `T`.
unsafe {
- intrinsics::assert_uninit_valid::<T>();
+ #[cfg(not(bootstrap))] // If the compiler hits this itself then it deserves the UB.
+ intrinsics::assert_mem_uninitialized_valid::<T>();
let mut val = MaybeUninit::<T>::uninit();
// Fill memory with 0x01, as an imperfect mitigation for old code that uses this function on
diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs
index ec5fa45fd..3806977f7 100644
--- a/library/core/src/num/dec2flt/fpu.rs
+++ b/library/core/src/num/dec2flt/fpu.rs
@@ -26,7 +26,7 @@ mod fpu_precision {
/// Developer's Manual (Volume 1).
///
/// The only field which is relevant for the following code is PC, Precision Control. This
- /// field determines the precision of the operations performed by the FPU. It can be set to:
+ /// field determines the precision of the operations performed by the FPU. It can be set to:
/// - 0b00, single precision i.e., 32-bits
/// - 0b10, double precision i.e., 64-bits
/// - 0b11, double extended precision i.e., 80-bits (default state)
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 2c6a0ba64..1308b0770 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -428,7 +428,7 @@ impl f32 {
self != self
}
- // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // FIXME(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index fd3c18ce2..2a22c4302 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -427,7 +427,7 @@ impl f64 {
self != self
}
- // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // FIXME(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 57096f439..2cae98b8e 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -1514,37 +1514,50 @@ macro_rules! int_impl {
(a as Self, b)
}
- /// Calculates `self + rhs + carry` without the ability to overflow.
+ /// Calculates `self` + `rhs` + `carry` and checks for overflow.
///
- /// Performs "signed ternary addition" which takes in an extra bit to add, and may return an
- /// additional bit of overflow. This signed function is used only on the highest-ordered data,
- /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ /// Performs "ternary addition" of two integer operands and a carry-in
+ /// bit, and returns a tuple of the sum along with a boolean indicating
+ /// whether an arithmetic overflow would occur. On overflow, the wrapped
+ /// value is returned.
///
- /// # Examples
+ /// This allows chaining together multiple additions to create a wider
+ /// addition, and can be useful for bignum addition. This method should
+ /// only be used for the most significant word; for the less significant
+ /// words the unsigned method
+ #[doc = concat!("[`", stringify!($UnsignedT), "::carrying_add`]")]
+ /// should be used.
///
- /// Basic usage:
+ /// The output boolean returned by this method is *not* a carry flag,
+ /// and should *not* be added to a more significant word.
///
- /// ```
- /// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (", stringify!($SelfT), "::MIN + 1, true));")]
- #[doc = concat!("assert_eq!(",
- stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ",
- "(-1, true));"
- )]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.carrying_add(-1, true), (", stringify!($SelfT), "::MIN, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".carrying_add(", stringify!($SelfT), "::MAX, true), (", stringify!($SelfT), "::MIN, true));")]
- /// ```
+ /// If the input carry is false, this method is equivalent to
+ /// [`overflowing_add`](Self::overflowing_add).
///
- /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add):
+ /// # Examples
///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")]
+ /// // Only the most significant word is signed.
+ /// //
+ #[doc = concat!("// 10 MAX (a = 10 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ #[doc = concat!("// + -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
+ /// // ---------
+ #[doc = concat!("// 6 8 (sum = 6 × 2^", stringify!($BITS), " + 8)")]
+ ///
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (10, ", stringify!($UnsignedT), "::MAX);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (-5, 9);")]
+ /// let carry0 = false;
+ ///
+ #[doc = concat!("// ", stringify!($UnsignedT), "::carrying_add for the less significant words")]
+ /// let (sum0, carry1) = a0.carrying_add(b0, carry0);
+ /// assert_eq!(carry1, true);
+ ///
+ #[doc = concat!("// ", stringify!($SelfT), "::carrying_add for the most significant word")]
+ /// let (sum1, overflow) = a1.carrying_add(b1, carry1);
+ /// assert_eq!(overflow, false);
+ ///
+ /// assert_eq!((sum1, sum0), (6, 8));
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
@@ -1608,25 +1621,51 @@ macro_rules! int_impl {
(a as Self, b)
}
- /// Calculates `self - rhs - borrow` without the ability to overflow.
+ /// Calculates `self` &minus; `rhs` &minus; `borrow` and checks for
+ /// overflow.
///
- /// Performs "signed ternary subtraction" which takes in an extra bit to subtract, and may return an
- /// additional bit of overflow. This signed function is used only on the highest-ordered data,
- /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ /// Performs "ternary subtraction" by subtracting both an integer
+ /// operand and a borrow-in bit from `self`, and returns a tuple of the
+ /// difference along with a boolean indicating whether an arithmetic
+ /// overflow would occur. On overflow, the wrapped value is returned.
///
- /// # Examples
+ /// This allows chaining together multiple subtractions to create a
+ /// wider subtraction, and can be useful for bignum subtraction. This
+ /// method should only be used for the most significant word; for the
+ /// less significant words the unsigned method
+ #[doc = concat!("[`", stringify!($UnsignedT), "::borrowing_sub`]")]
+ /// should be used.
///
- /// Basic usage:
+ /// The output boolean returned by this method is *not* a borrow flag,
+ /// and should *not* be subtracted from a more significant word.
+ ///
+ /// If the input borrow is false, this method is equivalent to
+ /// [`overflowing_sub`](Self::overflowing_sub).
+ ///
+ /// # Examples
///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (-1, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (-2, false));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, false), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, true), (", stringify!($SelfT), "::MAX, false));")]
+ /// // Only the most significant word is signed.
+ /// //
+ #[doc = concat!("// 6 8 (a = 6 × 2^", stringify!($BITS), " + 8)")]
+ #[doc = concat!("// - -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
+ /// // ---------
+ #[doc = concat!("// 10 MAX (diff = 10 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ ///
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (6, 8);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (-5, 9);")]
+ /// let borrow0 = false;
+ ///
+ #[doc = concat!("// ", stringify!($UnsignedT), "::borrowing_sub for the less significant words")]
+ /// let (diff0, borrow1) = a0.borrowing_sub(b0, borrow0);
+ /// assert_eq!(borrow1, true);
+ ///
+ #[doc = concat!("// ", stringify!($SelfT), "::borrowing_sub for the most significant word")]
+ /// let (diff1, overflow) = a1.borrowing_sub(b1, borrow1);
+ /// assert_eq!(overflow, false);
+ ///
+ #[doc = concat!("assert_eq!((diff1, diff0), (10, ", stringify!($UnsignedT), "::MAX));")]
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 127b047db..b7e1aee9d 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -1,4 +1,3 @@
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
/// The version of the call operator that takes an immutable receiver.
@@ -54,87 +53,6 @@ use crate::marker::Tuple;
/// let double = |x| x * 2;
/// assert_eq!(call_with_one(double), 2);
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
- label = "expected an `Fn<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait Fn<Args>: FnMut<Args> {
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call(&self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes an immutable receiver.
-///
-/// Instances of `Fn` can be called repeatedly without mutating state.
-///
-/// *This trait (`Fn`) is not to be confused with [function pointers]
-/// (`fn`).*
-///
-/// `Fn` is implemented automatically by closures which only take immutable
-/// references to captured variables or don't capture anything at all, as well
-/// as (safe) [function pointers] (with some caveats, see their documentation
-/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
-/// implements `Fn`, too.
-///
-/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
-/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
-/// is expected.
-///
-/// Use `Fn` as a bound when you want to accept a parameter of function-like
-/// type and need to call it repeatedly and without mutating state (e.g., when
-/// calling it concurrently). If you do not need such strict requirements, use
-/// [`FnMut`] or [`FnOnce`] as bounds.
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Calling a closure
-///
-/// ```
-/// let square = |x| x * x;
-/// assert_eq!(square(5), 25);
-/// ```
-///
-/// ## Using a `Fn` parameter
-///
-/// ```
-/// fn call_with_one<F>(func: F) -> usize
-/// where F: Fn(usize) -> usize {
-/// func(1)
-/// }
-///
-/// let double = |x| x * 2;
-/// assert_eq!(call_with_one(double), 2);
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -222,95 +140,6 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
///
/// assert_eq!(x, 5);
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn_mut"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait FnMut<Args>: FnOnce<Args> {
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes a mutable receiver.
-///
-/// Instances of `FnMut` can be called repeatedly and may mutate state.
-///
-/// `FnMut` is implemented automatically by closures which take mutable
-/// references to captured variables, as well as all types that implement
-/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
-/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
-/// implements `FnMut`, too.
-///
-/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
-/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
-/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
-///
-/// Use `FnMut` as a bound when you want to accept a parameter of function-like
-/// type and need to call it repeatedly, while allowing it to mutate state.
-/// If you don't want the parameter to mutate state, use [`Fn`] as a
-/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Calling a mutably capturing closure
-///
-/// ```
-/// let mut x = 5;
-/// {
-/// let mut square_x = || x *= x;
-/// square_x();
-/// }
-/// assert_eq!(x, 25);
-/// ```
-///
-/// ## Using a `FnMut` parameter
-///
-/// ```
-/// fn do_twice<F>(mut func: F)
-/// where F: FnMut()
-/// {
-/// func();
-/// func();
-/// }
-///
-/// let mut x: usize = 1;
-/// {
-/// let add_two_to_x = || x += 2;
-/// do_twice(add_two_to_x);
-/// }
-///
-/// assert_eq!(x, 5);
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -390,92 +219,6 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
///
/// // `consume_and_return_x` can no longer be invoked at this point
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn_once"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait FnOnce<Args> {
- /// The returned type after the call operator is used.
- #[lang = "fn_once_output"]
- #[stable(feature = "fn_once_output", since = "1.12.0")]
- type Output;
-
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes a by-value receiver.
-///
-/// Instances of `FnOnce` can be called, but might not be callable multiple
-/// times. Because of this, if the only thing known about a type is that it
-/// implements `FnOnce`, it can only be called once.
-///
-/// `FnOnce` is implemented automatically by closures that might consume captured
-/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
-/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
-///
-/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
-/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
-///
-/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
-/// type and only need to call it once. If you need to call the parameter
-/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
-/// state, use [`Fn`].
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Using a `FnOnce` parameter
-///
-/// ```
-/// fn consume_with_relish<F>(func: F)
-/// where F: FnOnce() -> String
-/// {
-/// // `func` consumes its captured variables, so it cannot be run more
-/// // than once.
-/// println!("Consumed: {}", func());
-///
-/// println!("Delicious!");
-///
-/// // Attempting to invoke `func()` again will throw a `use of moved
-/// // value` error for `func`.
-/// }
-///
-/// let x = String::from("x");
-/// let consume_and_return_x = move || x;
-/// consume_with_relish(consume_and_return_x);
-///
-/// // `consume_and_return_x` can no longer be invoked at this point
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -507,68 +250,6 @@ pub trait FnOnce<Args: Tuple> {
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
}
-#[cfg(bootstrap)]
-mod impls {
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const Fn<A> for &F
- where
- F: ~const Fn<A>,
- {
- extern "rust-call" fn call(&self, args: A) -> F::Output {
- (**self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnMut<A> for &F
- where
- F: ~const Fn<A>,
- {
- extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
- (**self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnOnce<A> for &F
- where
- F: ~const Fn<A>,
- {
- type Output = F::Output;
-
- extern "rust-call" fn call_once(self, args: A) -> F::Output {
- (*self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnMut<A> for &mut F
- where
- F: ~const FnMut<A>,
- {
- extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
- (*self).call_mut(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnOnce<A> for &mut F
- where
- F: ~const FnMut<A>,
- {
- type Output = F::Output;
- extern "rust-call" fn call_once(self, args: A) -> F::Output {
- (*self).call_mut(args)
- }
- }
-}
-
-#[cfg(not(bootstrap))]
mod impls {
use crate::marker::Tuple;
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index 5e3dc48b6..228efb0bc 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -165,7 +165,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
#[doc(alias = "]")]
#[doc(alias = "[]")]
#[const_trait]
-pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
+pub trait IndexMut<Idx: ?Sized>: ~const Index<Idx> {
/// Performs the mutable indexing (`container[index]`) operation.
///
/// # Panics
diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs
index a5e5b13b3..97d9b750d 100644
--- a/library/core/src/ops/mod.rs
+++ b/library/core/src/ops/mod.rs
@@ -17,10 +17,10 @@
//! should have some resemblance to multiplication (and share expected
//! properties like associativity).
//!
-//! Note that the `&&` and `||` operators short-circuit, i.e., they only
-//! evaluate their second operand if it contributes to the result. Since this
-//! behavior is not enforceable by traits, `&&` and `||` are not supported as
-//! overloadable operators.
+//! Note that the `&&` and `||` operators are currently not supported for
+//! overloading. Due to their short circuiting nature, they require a different
+//! design from traits for other operators like [`BitAnd`]. Designs for them are
+//! under discussion.
//!
//! Many of the operators take their operands by value. In non-generic
//! contexts involving built-in types, this is usually not a problem.
@@ -201,7 +201,7 @@ pub(crate) use self::try_trait::{ChangeOutputType, NeverShortCircuit};
#[unstable(feature = "generator_trait", issue = "43122")]
pub use self::generator::{Generator, GeneratorState};
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
pub use self::unsize::CoerceUnsized;
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/core/src/ops/unsize.rs b/library/core/src/ops/unsize.rs
index a920b9165..b51f12580 100644
--- a/library/core/src/ops/unsize.rs
+++ b/library/core/src/ops/unsize.rs
@@ -31,41 +31,41 @@ use crate::marker::Unsize;
/// [dst-coerce]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
/// [unsize]: crate::marker::Unsize
/// [nomicon-coerce]: ../../nomicon/coercions.html
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
#[lang = "coerce_unsized"]
pub trait CoerceUnsized<T: ?Sized> {
// Empty.
}
// &mut T -> &mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
// &mut T -> &U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {}
// &mut T -> *mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {}
// &mut T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {}
// &T -> &U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
// &T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {}
// *mut T -> *mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
// *mut T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {}
// *const T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
/// `DispatchFromDyn` is used in the implementation of object safety checks (specifically allowing
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 505d964e5..7cc00e3f8 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -72,6 +72,50 @@
//! }
//! ```
//!
+//! # The question mark operator, `?`
+//!
+//! Similar to the [`Result`] type, when writing code that calls many functions that return the
+//! [`Option`] type, handling `Some`/`None` can be tedious. The question mark
+//! operator, [`?`], hides some of the boilerplate of propagating values
+//! up the call stack.
+//!
+//! It replaces this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! fn add_last_numbers(stack: &mut Vec<i32>) -> Option<i32> {
+//! let a = stack.pop();
+//! let b = stack.pop();
+//!
+//! match (a, b) {
+//! (Some(x), Some(y)) => Some(x + y),
+//! _ => None,
+//! }
+//! }
+//!
+//! ```
+//!
+//! With this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! fn add_last_numbers(stack: &mut Vec<i32>) -> Option<i32> {
+//! Some(stack.pop()? + stack.pop()?)
+//! }
+//! ```
+//!
+//! *It's much nicer!*
+//!
+//! Ending the expression with [`?`] will result in the [`Some`]'s unwrapped value, unless the
+//! result is [`None`], in which case [`None`] is returned early from the enclosing function.
+//!
+//! [`?`] can be used in functions that return [`Option`] because of the
+//! early return of [`None`] that it provides.
+//!
+//! [`?`]: crate::ops::Try
+//! [`Some`]: Some
+//! [`None`]: None
+//!
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
@@ -608,13 +652,14 @@ impl<T> Option<T> {
///
/// # Examples
///
- /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, preserving
- /// the original. The [`map`] method takes the `self` argument by value, consuming the original,
- /// so this technique uses `as_ref` to first take an `Option` to a reference
- /// to the value inside the original.
+ /// Calculates the length of an <code>Option<[String]></code> as an <code>Option<[usize]></code>
+ /// without moving the [`String`]. The [`map`] method takes the `self` argument by value,
+ /// consuming the original, so this technique uses `as_ref` to first take an `Option` to a
+ /// reference to the value inside the original.
///
/// [`map`]: Option::map
/// [String]: ../../std/string/struct.String.html "String"
+ /// [`String`]: ../../std/string/struct.String.html "String"
///
/// ```
/// let text: Option<String> = Some("Hello, world!".to_string());
@@ -902,8 +947,8 @@ impl<T> Option<T> {
///
/// # Examples
///
- /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, consuming
- /// the original:
+ /// Calculates the length of an <code>Option<[String]></code> as an
+ /// <code>Option<[usize]></code>, consuming the original:
///
/// [String]: ../../std/string/struct.String.html "String"
/// ```
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 461b70c32..8338a5d7e 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -90,14 +90,14 @@ pub macro unreachable_2021 {
),
}
-/// An internal trait used by libstd to pass data from libstd to `panic_unwind`
-/// and other panic runtimes. Not intended to be stabilized any time soon, do
-/// not use.
+/// An internal trait used by std to pass data from std to `panic_unwind` and
+/// other panic runtimes. Not intended to be stabilized any time soon, do not
+/// use.
#[unstable(feature = "std_internals", issue = "none")]
#[doc(hidden)]
pub unsafe trait BoxMeUp {
/// Take full ownership of the contents.
- /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in libcore.
+ /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in core.
///
/// After this method got called, only some dummy default value is left in `self`.
/// Calling this method twice, or calling `get` after calling this method, is an error.
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
index 1923155eb..0d385c9d1 100644
--- a/library/core/src/panic/panic_info.rs
+++ b/library/core/src/panic/panic_info.rs
@@ -157,7 +157,7 @@ impl fmt::Display for PanicInfo<'_> {
write!(formatter, "'{}', ", payload)?
}
// NOTE: we cannot use downcast_ref::<String>() here
- // since String is not available in libcore!
+ // since String is not available in core!
// The payload is a String when `std::panic!` is called with multiple arguments,
// but in that case the message is also available.
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index a704a00fa..48e90e6d7 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -1,8 +1,8 @@
-//! Panic support for libcore
+//! Panic support for core
//!
//! The core library cannot define panicking, but it does *declare* panicking. This
-//! means that the functions inside of libcore are allowed to panic, but to be
-//! useful an upstream crate must define panicking for libcore to use. The current
+//! means that the functions inside of core are allowed to panic, but to be
+//! useful an upstream crate must define panicking for core to use. The current
//! interface for panicking is:
//!
//! ```
@@ -13,7 +13,7 @@
//! This definition allows for panicking with any general message, but it does not
//! allow for failing with a `Box<Any>` value. (`PanicInfo` just contains a `&(dyn Any + Send)`,
//! for which we fill in a dummy value in `PanicInfo::internal_constructor`.)
-//! The reason for this is that libcore is not allowed to allocate.
+//! The reason for this is that core is not allowed to allocate.
//!
//! This module contains a few other panicking functions, but these are just the
//! necessary lang items for the compiler. All panics are funneled through this
@@ -64,12 +64,17 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
unsafe { panic_impl(&pi) }
}
-/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize.
-/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed.
+/// Like `panic_fmt`, but for non-unwinding panics.
+///
+/// Has to be a separate function so that it can carry the `rustc_nounwind` attribute.
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+// This attribute has the key side-effect that if the panic handler ignores `can_unwind`
+// and unwinds anyway, we will hit the "unwinding out of nounwind function" guard,
+// which causes a "panic in a function that cannot unwind".
#[rustc_nounwind]
-pub fn panic_str_nounwind(msg: &'static str) -> ! {
+pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
}
@@ -82,8 +87,6 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
- let pieces = [msg];
- let fmt = fmt::Arguments::new_v1(&pieces, &[]);
let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
@@ -93,7 +96,7 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions
// above.
-/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
+/// The underlying implementation of core's `panic!` macro when no formatting is used.
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
@@ -111,6 +114,15 @@ pub const fn panic(expr: &'static str) -> ! {
panic_fmt(fmt::Arguments::new_v1(&[expr], &[]));
}
+/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cfg_attr(not(bootstrap), lang = "panic_nounwind")] // needed by codegen for non-unwinding panics
+#[rustc_nounwind]
+pub fn panic_nounwind(expr: &'static str) -> ! {
+ panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+}
+
#[inline]
#[track_caller]
#[rustc_diagnostic_item = "panic_str"]
@@ -153,10 +165,11 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
/// any extra arguments (including those synthesized by track_caller).
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
+#[cfg_attr(bootstrap, lang = "panic_no_unwind")] // needed by codegen for panic in nounwind function
+#[cfg_attr(not(bootstrap), lang = "panic_cannot_unwind")] // needed by codegen for panic in nounwind function
#[rustc_nounwind]
-fn panic_no_unwind() -> ! {
- panic_str_nounwind("panic in a function that cannot unwind")
+fn panic_cannot_unwind() -> ! {
+ panic_nounwind("panic in a function that cannot unwind")
}
/// This function is used instead of panic_fmt in const eval.
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index 4524fa4c4..febe57dc9 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -485,6 +485,16 @@ impl<P: Deref<Target: Unpin>> Pin<P> {
///
/// Unlike `Pin::new_unchecked`, this method is safe because the pointer
/// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// // We can pin the value, since it doesn't care about being moved
+ /// let mut pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// ```
#[inline(always)]
#[rustc_const_unstable(feature = "const_pin", issue = "76654")]
#[stable(feature = "pin", since = "1.33.0")]
@@ -496,8 +506,20 @@ impl<P: Deref<Target: Unpin>> Pin<P> {
/// Unwraps this `Pin<P>` returning the underlying pointer.
///
- /// This requires that the data inside this `Pin` is [`Unpin`] so that we
+ /// This requires that the data inside this `Pin` implements [`Unpin`] so that we
/// can ignore the pinning invariants when unwrapping it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// let pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// // Unwrap the pin to get a reference to the value
+ /// let r = Pin::into_inner(pinned);
+ /// assert_eq!(*r, 5);
+ /// ```
#[inline(always)]
#[rustc_const_unstable(feature = "const_pin", issue = "76654")]
#[stable(feature = "pin_into_inner", since = "1.39.0")]
@@ -600,9 +622,8 @@ impl<P: Deref> Pin<P> {
/// that the closure is pinned.
///
/// The better alternative is to avoid all that trouble and do the pinning in the outer function
- /// instead (here using the unstable `pin` macro):
+ /// instead (here using the [`pin!`][crate::pin::pin] macro):
/// ```
- /// #![feature(pin_macro)]
/// use std::pin::pin;
/// use std::task::Context;
/// use std::future::Future;
@@ -707,6 +728,18 @@ impl<P: DerefMut> Pin<P> {
///
/// This overwrites pinned data, but that is okay: its destructor gets
/// run before being overwritten, so no pinning guarantee is violated.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// let mut pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// println!("{}", pinned); // 5
+ /// pinned.as_mut().set(10);
+ /// println!("{}", pinned); // 10
+ /// ```
#[stable(feature = "pin", since = "1.33.0")]
#[inline(always)]
pub fn set(&mut self, value: P::Target)
@@ -720,7 +753,7 @@ impl<P: DerefMut> Pin<P> {
impl<'a, T: ?Sized> Pin<&'a T> {
/// Constructs a new pin by mapping the interior value.
///
- /// For example, if you wanted to get a `Pin` of a field of something,
+ /// For example, if you wanted to get a `Pin` of a field of something,
/// you could use this to get access to that field in one line of code.
/// However, there are several gotchas with these "pinning projections";
/// see the [`pin` module] documentation for further details on that topic.
@@ -823,7 +856,7 @@ impl<'a, T: ?Sized> Pin<&'a mut T> {
/// Construct a new pin by mapping the interior value.
///
- /// For example, if you wanted to get a `Pin` of a field of something,
+ /// For example, if you wanted to get a `Pin` of a field of something,
/// you could use this to get access to that field in one line of code.
/// However, there are several gotchas with these "pinning projections";
/// see the [`pin` module] documentation for further details on that topic.
@@ -992,7 +1025,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### Basic usage
///
/// ```rust
-/// #![feature(pin_macro)]
/// # use core::marker::PhantomPinned as Foo;
/// use core::pin::{pin, Pin};
///
@@ -1010,7 +1042,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### Manually polling a `Future` (without `Unpin` bounds)
///
/// ```rust
-/// #![feature(pin_macro)]
/// use std::{
/// future::Future,
/// pin::pin,
@@ -1049,7 +1080,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### With `Generator`s
///
/// ```rust
-/// #![feature(generators, generator_trait, pin_macro)]
+/// #![feature(generators, generator_trait)]
/// use core::{
/// ops::{Generator, GeneratorState},
/// pin::pin,
@@ -1092,7 +1123,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// The following, for instance, fails to compile:
///
/// ```rust,compile_fail
-/// #![feature(pin_macro)]
/// use core::pin::{pin, Pin};
/// # use core::{marker::PhantomPinned as Foo, mem::drop as stuff};
///
@@ -1134,7 +1164,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// constructor.
///
/// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin
-#[unstable(feature = "pin_macro", issue = "93178")]
+#[stable(feature = "pin_macro", since = "1.68.0")]
#[rustc_macro_transparency = "semitransparent"]
#[allow_internal_unstable(unsafe_pin_internals)]
pub macro pin($value:expr $(,)?) {
diff --git a/library/core/src/prelude/mod.rs b/library/core/src/prelude/mod.rs
index 3cd3a3b78..12f762ef1 100644
--- a/library/core/src/prelude/mod.rs
+++ b/library/core/src/prelude/mod.rs
@@ -1,8 +1,8 @@
-//! The libcore prelude
+//! The core prelude
//!
-//! This module is intended for users of libcore which do not link to libstd as
-//! well. This module is imported by default when `#![no_std]` is used in the
-//! same manner as the standard library's prelude.
+//! This module is intended for users of core which do not link to std as well.
+//! This module is imported by default when `#![no_std]` is used in the same
+//! manner as the standard library's prelude.
#![stable(feature = "core_prelude", since = "1.4.0")]
diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs
index 2d67d742c..10525a16f 100644
--- a/library/core/src/prelude/v1.rs
+++ b/library/core/src/prelude/v1.rs
@@ -75,14 +75,12 @@ pub use crate::macros::builtin::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
-#[cfg(not(bootstrap))]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use crate::macros::builtin::alloc_error_handler;
-#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use crate::macros::builtin::{bench, derive, global_allocator, test, test_case};
+pub use crate::macros::builtin::{
+ alloc_error_handler, bench, derive, global_allocator, test, test_case,
+};
#[unstable(feature = "derive_const", issue = "none")]
-#[cfg(not(bootstrap))]
pub use crate::macros::builtin::derive_const;
#[unstable(
@@ -104,5 +102,4 @@ pub use crate::macros::builtin::cfg_eval;
issue = "23416",
reason = "placeholder syntax for type ascription"
)]
-#[cfg(not(bootstrap))]
pub use crate::macros::builtin::type_ascribe;
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 64a5290c3..2123147c7 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -10,8 +10,7 @@ use crate::{cmp, fmt, hash, mem, num};
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -203,8 +202,7 @@ type AlignmentEnum = AlignmentEnum32;
type AlignmentEnum = AlignmentEnum64;
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -226,8 +224,7 @@ enum AlignmentEnum16 {
}
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -265,8 +262,7 @@ enum AlignmentEnum32 {
}
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index d34813599..7b1cb5488 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics;
+use crate::intrinsics::{self, const_eval_select};
use crate::mem;
use crate::slice::{self, SliceIndex};
@@ -34,12 +34,23 @@ impl<T: ?Sized> *const T {
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
#[inline]
pub const fn is_null(self) -> bool {
- // Compare via a cast to a thin pointer, so fat pointers are only
- // considering their "data" part for null-ness.
- match (self as *const u8).guaranteed_eq(null()) {
- None => false,
- Some(res) => res,
+ #[inline]
+ fn runtime_impl(ptr: *const u8) -> bool {
+ ptr.addr() == 0
}
+
+ #[inline]
+ const fn const_impl(ptr: *const u8) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ match (ptr).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self as *const u8,), const_impl, runtime_impl) }
}
/// Casts to a pointer of another type.
@@ -191,14 +202,11 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
- unsafe { mem::transmute(self) }
+ unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
@@ -228,12 +236,9 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn expose_addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
- self as usize
+ self.cast::<()>() as usize
}
/// Creates a new pointer with the given address.
@@ -251,10 +256,7 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: usize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
@@ -277,10 +279,7 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -1008,7 +1007,7 @@ impl<T: ?Sized> *const T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
@@ -1173,7 +1172,7 @@ impl<T: ?Sized> *const T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
pub const fn wrapping_sub(self, count: usize) -> Self
where
T: Sized,
@@ -1350,26 +1349,6 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- #[cfg(bootstrap)]
- {
- fn rt_impl<T>(p: *const T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
-
- const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
- usize::MAX
- }
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
- }
-
- #[cfg(not(bootstrap))]
{
// SAFETY: `align` has been checked to be a power of 2 above
unsafe { align_offset(self, align) }
@@ -1406,8 +1385,7 @@ impl<T: ?Sized> *const T {
/// is never aligned if cast to a type with a stricter alignment than the reference's
/// underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1433,8 +1411,7 @@ impl<T: ?Sized> *const T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1460,8 +1437,7 @@ impl<T: ?Sized> *const T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1537,8 +1513,7 @@ impl<T: ?Sized> *const T {
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
/// cannot be stricter aligned than the reference's underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1563,8 +1538,7 @@ impl<T: ?Sized> *const T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1588,8 +1562,7 @@ impl<T: ?Sized> *const T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1613,11 +1586,22 @@ impl<T: ?Sized> *const T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
- // The cast to `()` is used to
- // 1. deal with fat pointers; and
- // 2. ensure that `align_offset` doesn't actually try to compute an offset.
- self.cast::<()>().align_offset(align) == 0
+ #[inline]
+ fn runtime_impl(ptr: *const (), align: usize) -> bool {
+ ptr.addr() & (align - 1) == 0
+ }
+
+ #[inline]
+ const fn const_impl(ptr: *const (), align: usize) -> bool {
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ ptr.align_offset(align) == 0
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index a8604843e..2ea032d4a 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,7 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 48b2e88da..1ad9af154 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -516,6 +516,27 @@ pub const fn null<T: ?Sized + Thin>() -> *const T {
from_raw_parts(invalid(0), ())
}
+/// Creates a null mutable raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *mut i32 = ptr::null_mut();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
+#[rustc_allow_const_fn_unstable(ptr_metadata)]
+#[rustc_diagnostic_item = "ptr_null_mut"]
+pub const fn null_mut<T: ?Sized + Thin>() -> *mut T {
+ from_raw_parts_mut(invalid_mut(0), ())
+}
+
/// Creates an invalid pointer with the given address.
///
/// This is different from `addr as *const T`, which creates a pointer that picks up a previously
@@ -663,25 +684,26 @@ where
addr as *mut T
}
-/// Creates a null mutable raw pointer.
+/// Convert a reference to a raw pointer.
///
-/// # Examples
-///
-/// ```
-/// use std::ptr;
+/// This is equivalent to `r as *const T`, but is a bit safer since it will never silently change
+/// type or mutability, in particular if the code is refactored.
+#[inline(always)]
+#[must_use]
+#[unstable(feature = "ptr_from_ref", issue = "106116")]
+pub fn from_ref<T: ?Sized>(r: &T) -> *const T {
+ r
+}
+
+/// Convert a mutable reference to a raw pointer.
///
-/// let p: *mut i32 = ptr::null_mut();
-/// assert!(p.is_null());
-/// ```
+/// This is equivalent to `r as *mut T`, but is a bit safer since it will never silently change
+/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_promotable]
-#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
-#[rustc_allow_const_fn_unstable(ptr_metadata)]
-#[rustc_diagnostic_item = "ptr_null_mut"]
-pub const fn null_mut<T: ?Sized + Thin>() -> *mut T {
- from_raw_parts_mut(invalid_mut(0), ())
+#[unstable(feature = "ptr_from_ref", issue = "106116")]
+pub fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
+ r
}
/// Forms a raw slice from a pointer and a length.
@@ -1679,7 +1701,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
- // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
+ // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index c924a90b1..ed1e3bd48 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics;
+use crate::intrinsics::{self, const_eval_select};
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *mut T {
@@ -33,12 +33,23 @@ impl<T: ?Sized> *mut T {
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
#[inline]
pub const fn is_null(self) -> bool {
- // Compare via a cast to a thin pointer, so fat pointers are only
- // considering their "data" part for null-ness.
- match (self as *mut u8).guaranteed_eq(null_mut()) {
- None => false,
- Some(res) => res,
+ #[inline]
+ fn runtime_impl(ptr: *mut u8) -> bool {
+ ptr.addr() == 0
}
+
+ #[inline]
+ const fn const_impl(ptr: *mut u8) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ match (ptr).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self as *mut u8,), const_impl, runtime_impl) }
}
/// Casts to a pointer of another type.
@@ -197,14 +208,11 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
- unsafe { mem::transmute(self) }
+ unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
@@ -234,12 +242,9 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn expose_addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
- self as usize
+ self.cast::<()>() as usize
}
/// Creates a new pointer with the given address.
@@ -257,10 +262,7 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: usize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
@@ -283,10 +285,7 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -1110,7 +1109,7 @@ impl<T: ?Sized> *mut T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
@@ -1275,7 +1274,7 @@ impl<T: ?Sized> *mut T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
pub const fn wrapping_sub(self, count: usize) -> Self
where
T: Sized,
@@ -1618,26 +1617,6 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- #[cfg(bootstrap)]
- {
- fn rt_impl<T>(p: *mut T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
-
- const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
- usize::MAX
- }
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
- }
-
- #[cfg(not(bootstrap))]
{
// SAFETY: `align` has been checked to be a power of 2 above
unsafe { align_offset(self, align) }
@@ -1674,8 +1653,7 @@ impl<T: ?Sized> *mut T {
/// is never aligned if cast to a type with a stricter alignment than the reference's
/// underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
/// #![feature(const_mut_refs)]
@@ -1702,8 +1680,7 @@ impl<T: ?Sized> *mut T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1730,8 +1707,7 @@ impl<T: ?Sized> *mut T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1807,8 +1783,7 @@ impl<T: ?Sized> *mut T {
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
/// cannot be stricter aligned than the reference's underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
/// #![feature(const_mut_refs)]
@@ -1834,8 +1809,7 @@ impl<T: ?Sized> *mut T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1860,8 +1834,7 @@ impl<T: ?Sized> *mut T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1885,11 +1858,22 @@ impl<T: ?Sized> *mut T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
- // The cast to `()` is used to
- // 1. deal with fat pointers; and
- // 2. ensure that `align_offset` doesn't actually try to compute an offset.
- self.cast::<()>().align_offset(align) == 0
+ #[inline]
+ fn runtime_impl(ptr: *mut (), align: usize) -> bool {
+ ptr.addr() & (align - 1) == 0
+ }
+
+ #[inline]
+ const fn const_impl(ptr: *mut (), align: usize) -> bool {
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ ptr.align_offset(align) == 0
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index c4348169c..8c1a64886 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -268,10 +268,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> NonZeroUsize
- where
- T: Sized,
- {
+ pub fn addr(self) -> NonZeroUsize {
// SAFETY: The pointer is guaranteed by the type to be non-null,
// meaning that the address will be non-zero.
unsafe { NonZeroUsize::new_unchecked(self.pointer.addr()) }
@@ -286,10 +283,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: NonZeroUsize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: NonZeroUsize) -> Self {
// SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero.
unsafe { NonNull::new_unchecked(self.pointer.with_addr(addr.get()) as *mut _) }
}
@@ -303,10 +297,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -712,7 +703,7 @@ impl<T: ?Sized> const Clone for NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> Copy for NonNull<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 3f33c5fd6..f00c40f35 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -209,11 +209,10 @@
//!
//! *It's much nicer!*
//!
-//! Ending the expression with [`?`] will result in the unwrapped
-//! success ([`Ok`]) value, unless the result is [`Err`], in which case
-//! [`Err`] is returned early from the enclosing function.
+//! Ending the expression with [`?`] will result in the [`Ok`]'s unwrapped value, unless the result
+//! is [`Err`], in which case [`Err`] is returned early from the enclosing function.
//!
-//! [`?`] can only be used in functions that return [`Result`] because of the
+//! [`?`] can be used in functions that return [`Result`] because of the
//! early return of [`Err`] that it provides.
//!
//! [`expect`]: Result::expect
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 062289767..90ab43d12 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -6,7 +6,7 @@ mod macros;
use crate::cmp;
use crate::cmp::Ordering;
use crate::fmt;
-use crate::intrinsics::{assume, exact_div, unchecked_sub};
+use crate::intrinsics::assume;
use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use crate::marker::{PhantomData, Send, Sized, Sync};
use crate::mem::{self, SizedTypeProperties};
@@ -35,12 +35,6 @@ impl<'a, T> IntoIterator for &'a mut [T] {
}
}
-// Macro helper functions
-#[inline(always)]
-fn size_from_ptr<T>(_: *const T) -> usize {
- mem::size_of::<T>()
-}
-
/// Immutable slice iterator
///
/// This struct is created by the [`iter`] method on [slices].
@@ -65,7 +59,7 @@ fn size_from_ptr<T>(_: *const T) -> usize {
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
- end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a T>,
@@ -186,7 +180,7 @@ impl<T> AsRef<[T]> for Iter<'_, T> {
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
- end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a mut T>,
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index ce51d48e3..0fd57b197 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -9,30 +9,20 @@ macro_rules! is_empty {
};
}
-// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
-// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
macro_rules! len {
($self: ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
let start = $self.ptr;
- let size = size_from_ptr(start.as_ptr());
- if size == 0 {
- // This _cannot_ use `unchecked_sub` because we depend on wrapping
+ if T::IS_ZST {
+ // This _cannot_ use `ptr_sub` because we depend on wrapping
// to represent the length of long ZST slice iterators.
$self.end.addr().wrapping_sub(start.as_ptr().addr())
} else {
- // We know that `start <= end`, so can do better than `offset_from`,
- // which needs to deal in signed. By setting appropriate flags here
- // we can tell LLVM this, which helps it remove bounds checks.
- // SAFETY: By the type invariant, `start <= end`
- let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) };
- // By also telling LLVM that the pointers are apart by an exact
- // multiple of the type size, it can optimize `len() == 0` down to
- // `start == end` instead of `(end - start) < size`.
- // SAFETY: By the type invariant, the pointers are aligned so the
- // distance between them must be a multiple of pointee size
- unsafe { exact_div(diff, size) }
+ // To get rid of some bounds checks (see `position`), we use ptr_sub instead of
+ // offset_from (Tested by `codegen/slice-position-bounds-check`.)
+ // SAFETY: by the type invariant pointers are aligned and `start <= end`
+ unsafe { $self.end.sub_ptr(start.as_ptr()) }
}
}};
}
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index d9281a925..d93a3a57e 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -29,13 +29,19 @@ use crate::slice;
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
+#[unstable(
+ feature = "slice_internals",
+ issue = "none",
+ reason = "exposed from core to be reused in std;"
+)]
+pub mod sort;
+
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
-mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
@@ -703,7 +709,7 @@ impl<T> [T] {
// Because this function is first compiled in isolation,
// this check tells LLVM that the indexing below is
- // in-bounds. Then after inlining -- once the actual
+ // in-bounds. Then after inlining -- once the actual
// lengths of the slices are known -- it's removed.
let (a, b) = (&mut a[..n], &mut b[..n]);
@@ -781,6 +787,22 @@ impl<T> [T] {
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
+ ///
+ /// There's no `windows_mut`, as that existing would let safe code violate the
+ /// "only one `&mut` at a time to the same thing" rule. However, you can sometimes
+ /// use [`Cell::as_slice_of_cells`](crate::cell::Cell::as_slice_of_cells) in
+ /// conjunction with `windows` to accomplish something similar:
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let mut array = ['R', 'u', 's', 't', ' ', '2', '0', '1', '5'];
+ /// let slice = &mut array[..];
+ /// let slice_of_cells: &[Cell<char>] = Cell::from_mut(slice).as_slice_of_cells();
+ /// for w in slice_of_cells.windows(3) {
+ /// Cell::swap(&w[0], &w[2]);
+ /// }
+ /// assert_eq!(array, ['s', 't', ' ', '2', '0', '1', '5', 'u', 'R']);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
@@ -893,7 +915,7 @@ impl<T> [T] {
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
- assert_ne!(chunk_size, 0);
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
ChunksExact::new(self, chunk_size)
}
@@ -935,7 +957,7 @@ impl<T> [T] {
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
- assert_ne!(chunk_size, 0);
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
ChunksExactMut::new(self, chunk_size)
}
@@ -1002,11 +1024,22 @@ impl<T> [T] {
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
+ ///
+ /// If you expect the slice to be an exact multiple, you can combine
+ /// `let`-`else` with an empty slice pattern:
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice = ['R', 'u', 's', 't'];
+ /// let (chunks, []) = slice.as_chunks::<2>() else {
+ /// panic!("slice didn't have even length")
+ /// };
+ /// assert_eq!(chunks, &[['R', 'u'], ['s', 't']]);
+ /// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
#[must_use]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1037,7 +1070,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1076,7 +1109,7 @@ impl<T> [T] {
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
ArrayChunks::new(self)
}
@@ -1155,7 +1188,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1192,7 +1225,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1233,11 +1266,11 @@ impl<T> [T] {
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
ArrayChunksMut::new(self)
}
- /// Returns an iterator over overlapping windows of `N` elements of a slice,
+ /// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
@@ -1265,7 +1298,7 @@ impl<T> [T] {
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "windows cannot have a size of zero");
ArrayWindows::new(self)
}
@@ -2465,7 +2498,7 @@ impl<T> [T] {
let mid = left + size / 2;
// SAFETY: the while condition means `size` is strictly positive, so
- // `size/2 < size`. Thus `left + size/2 < left + size`, which
+ // `size/2 < size`. Thus `left + size/2 < left + size`, which
// coupled with the `left + size <= self.len()` invariant means
// we have `left + size/2 < self.len()`, and this is in-bounds.
let cmp = f(unsafe { self.get_unchecked(mid) });
@@ -3795,7 +3828,7 @@ impl<T> [T] {
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
- /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// For example, `[7, 15, 3, 5, 4, 12, 6]` is partitioned under the predicate `x % 2 != 0`
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index 87f77b7f2..2181f9a81 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -3,8 +3,11 @@
//! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
//! published at: <https://github.com/orlp/pdqsort>
//!
-//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
+//! Unstable sorting is compatible with core because it doesn't allocate memory, unlike our
//! stable sorting implementation.
+//!
+//! In addition it also contains the core logic of the stable sort used by `slice::sort` based on
+//! TimSort.
use crate::cmp;
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
@@ -18,9 +21,9 @@ struct CopyOnDrop<T> {
impl<T> Drop for CopyOnDrop<T> {
fn drop(&mut self) {
- // SAFETY: This is a helper class.
- // Please refer to its usage for correctness.
- // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
+ // SAFETY: This is a helper class.
+ // Please refer to its usage for correctness.
+ // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
@@ -831,6 +834,15 @@ fn partition_at_index_loop<'a, T, F>(
) where
F: FnMut(&T, &T) -> bool,
{
+ // Limit the amount of iterations and fall back to heapsort, similarly to `slice::sort_unstable`.
+ // This lowers the worst case running time from O(n^2) to O(n log n).
+ // FIXME: Investigate whether it would be better to use something like Median of Medians
+ // or Fast Deterministic Selection to guarantee O(n) worst case.
+ let mut limit = usize::BITS - v.len().leading_zeros();
+
+ // True if the last partitioning was reasonably balanced.
+ let mut was_balanced = true;
+
loop {
// For slices of up to this length it's probably faster to simply sort them.
const MAX_INSERTION: usize = 10;
@@ -839,6 +851,18 @@ fn partition_at_index_loop<'a, T, F>(
return;
}
+ if limit == 0 {
+ heapsort(v, is_less);
+ return;
+ }
+
+ // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+ // some elements around. Hopefully we'll choose a better pivot this time.
+ if !was_balanced {
+ break_patterns(v);
+ limit -= 1;
+ }
+
// Choose a pivot
let (pivot, _) = choose_pivot(v, is_less);
@@ -863,6 +887,7 @@ fn partition_at_index_loop<'a, T, F>(
}
let (mid, _) = partition(v, pivot, is_less);
+ was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8;
// Split the slice into `left`, `pivot`, and `right`.
let (left, right) = v.split_at_mut(mid);
@@ -883,6 +908,7 @@ fn partition_at_index_loop<'a, T, F>(
}
}
+/// Reorder the slice such that the element at `index` is at its final sorted position.
pub fn partition_at_index<T, F>(
v: &mut [T],
index: usize,
@@ -927,3 +953,513 @@ where
let pivot = &mut pivot[0];
(left, pivot, right)
}
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ // SAFETY: Copy tmp back even if panic, and ensure unique observation.
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *const T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ // SAFETY: The caller must ensure that src and dest are correctly set.
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+
+ // SAFETY: mid and len must be in-bounds of v.
+ let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+
+ // SAFETY: buf must have enough capacity for `v[..mid]`.
+ unsafe {
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+ }
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+
+ // SAFETY: left and right must be valid and part of v same for out.
+ unsafe {
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ }
+ } else {
+ // The right run is shorter.
+
+ // SAFETY: buf must have enough capacity for `v[mid..]`.
+ unsafe {
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+ }
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+
+ // SAFETY: left and right must be valid and part of v same for out.
+ unsafe {
+ let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+
+ // SAFETY: ptr.add(1) must still be a valid pointer and part of `v`.
+ *ptr = unsafe { ptr.add(1) };
+ old
+ }
+
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ // SAFETY: ptr.sub(1) must still be a valid pointer and part of `v`.
+ *ptr = unsafe { ptr.sub(1) };
+ *ptr
+ }
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // SAFETY: `T` is not a zero-sized type, and these are pointers into a slice's elements.
+ unsafe {
+ let len = self.end.sub_ptr(self.start);
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// This merge sort borrows some (but not all) ideas from TimSort, which used to be described in
+/// detail [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt). However Python
+/// has switched to a Powersort based implementation.
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>(
+ v: &mut [T],
+ is_less: &mut CmpF,
+ elem_alloc_fn: ElemAllocF,
+ elem_dealloc_fn: ElemDeallocF,
+ run_alloc_fn: RunAllocF,
+ run_dealloc_fn: RunDeallocF,
+) where
+ CmpF: FnMut(&T, &T) -> bool,
+ ElemAllocF: Fn(usize) -> *mut T,
+ ElemDeallocF: Fn(*mut T, usize),
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ // The caller should have already checked that.
+ debug_assert!(!T::IS_ZST);
+
+ let len = v.len();
+
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn);
+ let buf_ptr = buf.buf_ptr;
+
+ let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+
+ // SAFETY: The v.get_unchecked must be fed with correct inbound indicies.
+ unsafe {
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+ v[start..end].reverse();
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
+ {
+ start -= 1;
+ }
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(TimSortRun { start, len: end - start });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(runs.as_slice()) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and
+ // neither side may be on length 0.
+ unsafe {
+ merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less);
+ }
+ runs[r] = TimSortRun { start: left.start, len: left.len + right.len };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for its buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[TimSortRun]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
+ } else {
+ None
+ }
+ }
+
+ // Extremely basic versions of Vec.
+ // Their use is super limited and by having the code here, it allows reuse between the sort
+ // implementations.
+ struct BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ buf_ptr: *mut T,
+ capacity: usize,
+ elem_dealloc_fn: ElemDeallocF,
+ }
+
+ impl<T, ElemDeallocF> BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ fn new<ElemAllocF>(
+ len: usize,
+ elem_alloc_fn: ElemAllocF,
+ elem_dealloc_fn: ElemDeallocF,
+ ) -> Self
+ where
+ ElemAllocF: Fn(usize) -> *mut T,
+ {
+ Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn }
+ }
+ }
+
+ impl<T, ElemDeallocF> Drop for BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ fn drop(&mut self) {
+ (self.elem_dealloc_fn)(self.buf_ptr, self.capacity);
+ }
+ }
+
+ struct RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ buf_ptr: *mut TimSortRun,
+ capacity: usize,
+ len: usize,
+ run_alloc_fn: RunAllocF,
+ run_dealloc_fn: RunDeallocF,
+ }
+
+ impl<RunAllocF, RunDeallocF> RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn new(run_alloc_fn: RunAllocF, run_dealloc_fn: RunDeallocF) -> Self {
+ // Most slices can be sorted with at most 16 runs in-flight.
+ const START_RUN_CAPACITY: usize = 16;
+
+ Self {
+ buf_ptr: run_alloc_fn(START_RUN_CAPACITY),
+ capacity: START_RUN_CAPACITY,
+ len: 0,
+ run_alloc_fn,
+ run_dealloc_fn,
+ }
+ }
+
+ fn push(&mut self, val: TimSortRun) {
+ if self.len == self.capacity {
+ let old_capacity = self.capacity;
+ let old_buf_ptr = self.buf_ptr;
+
+ self.capacity = self.capacity * 2;
+ self.buf_ptr = (self.run_alloc_fn)(self.capacity);
+
+ // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has
+ // old_capacity valid elements.
+ unsafe {
+ ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity);
+ }
+
+ (self.run_dealloc_fn)(old_buf_ptr, old_capacity);
+ }
+
+ // SAFETY: The invariant was just checked.
+ unsafe {
+ self.buf_ptr.add(self.len).write(val);
+ }
+ self.len += 1;
+ }
+
+ fn remove(&mut self, index: usize) {
+ if index >= self.len {
+ panic!("Index out of bounds");
+ }
+
+ // SAFETY: buf_ptr needs to be valid and len invariant upheld.
+ unsafe {
+ // the place we are taking from.
+ let ptr = self.buf_ptr.add(index);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.add(1), ptr, self.len - index - 1);
+ }
+ self.len -= 1;
+ }
+
+ fn as_slice(&self) -> &[TimSortRun] {
+ // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld.
+ unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) }
+ }
+
+ fn len(&self) -> usize {
+ self.len
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> core::ops::Index<usize> for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ type Output = TimSortRun;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ if index < self.len {
+ // SAFETY: buf_ptr and len invariant must be upheld.
+ unsafe {
+ return &*(self.buf_ptr.add(index));
+ }
+ }
+
+ panic!("Index out of bounds");
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> core::ops::IndexMut<usize> for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ if index < self.len {
+ // SAFETY: buf_ptr and len invariant must be upheld.
+ unsafe {
+ return &mut *(self.buf_ptr.add(index));
+ }
+ }
+
+ panic!("Index out of bounds");
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> Drop for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn drop(&mut self) {
+ // As long as TimSortRun is Copy we don't need to drop them individually but just the
+ // whole allocation.
+ (self.run_dealloc_fn)(self.buf_ptr, self.capacity);
+ }
+ }
+}
+
+/// Internal type used by merge_sort.
+#[derive(Clone, Copy, Debug)]
+pub struct TimSortRun {
+ len: usize,
+ start: usize,
+}
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index 24083ee6a..d969475aa 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -585,16 +585,17 @@ where
impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
#[inline]
fn get_end(&mut self) -> Option<&'a str> {
- if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) {
+ if !self.finished {
self.finished = true;
- // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
- unsafe {
- let string = self.matcher.haystack().get_unchecked(self.start..self.end);
- Some(string)
+
+ if self.allow_trailing_empty || self.end - self.start > 0 {
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ let string = unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) };
+ return Some(string);
}
- } else {
- None
}
+
+ None
}
#[inline]
@@ -716,14 +717,14 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
}
#[inline]
- fn as_str(&self) -> &'a str {
+ fn remainder(&self) -> Option<&'a str> {
// `Self::get_end` doesn't change `self.start`
if self.finished {
- return "";
+ return None;
}
// SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
- unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) }
+ Some(unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) })
}
}
@@ -746,44 +747,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> Split<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".split(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplit<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".rsplit(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "Mary had a little");
+ /// assert_eq!(split.remainder(), Some("Mary had a little"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -806,44 +811,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> SplitTerminator<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "A..B..".split_terminator('.');
- /// assert_eq!(split.as_str(), "A..B..");
+ /// assert_eq!(split.remainder(), Some("A..B.."));
/// split.next();
- /// assert_eq!(split.as_str(), ".B..");
+ /// assert_eq!(split.remainder(), Some(".B.."));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplitTerminator<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "A..B..".rsplit_terminator('.');
- /// assert_eq!(split.as_str(), "A..B..");
+ /// assert_eq!(split.remainder(), Some("A..B.."));
/// split.next();
- /// assert_eq!(split.as_str(), "A..B");
+ /// assert_eq!(split.remainder(), Some("A..B"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -905,8 +914,8 @@ impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> {
}
#[inline]
- fn as_str(&self) -> &'a str {
- self.iter.as_str()
+ fn remainder(&self) -> Option<&'a str> {
+ self.iter.remainder()
}
}
@@ -929,44 +938,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> SplitN<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".splitn(3, ' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplitN<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".rsplitn(3, ' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "Mary had a little");
+ /// assert_eq!(split.remainder(), Some("Mary had a little"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -1239,22 +1252,22 @@ impl<'a> SplitWhitespace<'a> {
/// # Examples
///
/// ```
- /// #![feature(str_split_whitespace_as_str)]
+ /// #![feature(str_split_whitespace_remainder)]
///
/// let mut split = "Mary had a little lamb".split_whitespace();
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
///
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
///
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.inner.iter.as_str()
+ #[unstable(feature = "str_split_whitespace_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.inner.iter.remainder()
}
}
@@ -1290,32 +1303,34 @@ impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> {
impl FusedIterator for SplitAsciiWhitespace<'_> {}
impl<'a> SplitAsciiWhitespace<'a> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_whitespace_as_str)]
+ /// #![feature(str_split_whitespace_remainder)]
///
/// let mut split = "Mary had a little lamb".split_ascii_whitespace();
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
///
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
///
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
+ #[unstable(feature = "str_split_whitespace_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
if self.inner.iter.iter.finished {
- return "";
+ return None;
}
// SAFETY: Slice is created from str.
- unsafe { crate::str::from_utf8_unchecked(&self.inner.iter.iter.v) }
+ Some(unsafe { crate::str::from_utf8_unchecked(&self.inner.iter.iter.v) })
}
}
@@ -1358,23 +1373,25 @@ impl<'a, P: Pattern<'a, Searcher: ReverseSearcher<'a>>> DoubleEndedIterator
impl<'a, P: Pattern<'a>> FusedIterator for SplitInclusive<'a, P> {}
impl<'a, P: Pattern<'a>> SplitInclusive<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_inclusive_as_str)]
+ /// #![feature(str_split_inclusive_remainder)]
/// let mut split = "Mary had a little lamb".split_inclusive(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_inclusive_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_inclusive_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 45fd2caae..ab2f8520e 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -368,7 +368,7 @@ impl str {
#[inline(always)]
pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
// SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
- // has the same layout as `&[u8]` (only libstd can make this guarantee).
+ // has the same layout as `&[u8]` (only std can make this guarantee).
// The pointer dereference is safe since it comes from a mutable reference which
// is guaranteed to be valid for writes.
unsafe { &mut *(self as *mut str as *mut [u8]) }
@@ -970,8 +970,10 @@ impl str {
/// An iterator over the lines of a string, as string slices.
///
- /// Lines are ended with either a newline (`\n`) or a carriage return with
- /// a line feed (`\r\n`).
+ /// Lines are split at line endings that are either newlines (`\n`) or
+ /// sequences of a carriage return followed by a line feed (`\r\n`).
+ ///
+ /// Line terminators are not included in the lines returned by the iterator.
///
/// The final line ending is optional. A string that ends with a final line
/// ending will return the same lines as an otherwise identical string
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index edc68d6fa..14367eb09 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -1786,6 +1786,42 @@ impl<T> AtomicPtr<T> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
+
+ /// Returns a mutable pointer to the underlying pointer.
+ ///
+ /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+ /// This method is mostly useful for FFI, where the function signature may use
+ /// `*mut *mut T` instead of `&AtomicPtr<T>`.
+ ///
+ /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+ /// atomic types work with interior mutability. All modifications of an atomic change the value
+ /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+ /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+ /// restriction: operations on it must be atomic.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// #![feature(atomic_mut_ptr)]
+ //// use std::sync::atomic::AtomicPtr;
+ ///
+ /// extern "C" {
+ /// fn my_atomic_op(arg: *mut *mut u32);
+ /// }
+ ///
+ /// let mut value = 17;
+ /// let atomic = AtomicPtr::new(&mut value);
+ ///
+ /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
+ /// unsafe {
+ /// my_atomic_op(atomic.as_mut_ptr());
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut *mut T {
+ self.p.get()
+ }
}
#[cfg(target_has_atomic_load_store = "8")]
@@ -2678,9 +2714,9 @@ macro_rules! atomic_int {
#[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
/// }
///
- #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
+ #[doc = concat!("let atomic = ", stringify!($atomic_type), "::new(1);")]
///
- // SAFETY: Safe as long as `my_atomic_op` is atomic.
+ /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
/// unsafe {
/// my_atomic_op(atomic.as_mut_ptr());
/// }
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index c65c27500..301ad41c9 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -138,7 +138,7 @@ impl<T: ?Sized> Exclusive<T> {
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
}
- /// Build a _mutable_ references to an `Exclusive<T>` from
+ /// Build a _mutable_ reference to an `Exclusive<T>` from
/// a _mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
@@ -149,7 +149,7 @@ impl<T: ?Sized> Exclusive<T> {
unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
}
- /// Build a _pinned mutable_ references to an `Exclusive<T>` from
+ /// Build a _pinned mutable_ reference to an `Exclusive<T>` from
/// a _pinned mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index f1dc4f7b5..25b61c0e6 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -9,7 +9,7 @@ use crate::task::Ready;
/// scheduled to receive a wakeup instead.
#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
-#[cfg_attr(not(bootstrap), lang = "Poll")]
+#[lang = "Poll"]
#[stable(feature = "futures_api", since = "1.36.0")]
pub enum Poll<T> {
/// Represents that a value is immediately ready.
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 0cff972df..89adfccd9 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -104,7 +104,7 @@ pub struct RawWakerVTable {
/// pointer.
wake_by_ref: unsafe fn(*const ()),
- /// This function gets called when a [`RawWaker`] gets dropped.
+ /// This function gets called when a [`Waker`] gets dropped.
///
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
@@ -151,7 +151,7 @@ impl RawWakerVTable {
///
/// # `drop`
///
- /// This function gets called when a [`RawWaker`] gets dropped.
+ /// This function gets called when a [`Waker`] gets dropped.
///
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
@@ -174,6 +174,7 @@ impl RawWakerVTable {
/// Currently, `Context` only serves to provide access to a [`&Waker`](Waker)
/// which can be used to wake the current task.
#[stable(feature = "futures_api", since = "1.36.0")]
+#[cfg_attr(not(bootstrap), lang = "Context")]
pub struct Context<'a> {
waker: &'a Waker,
// Ensure we future-proof against variance changes by forcing
@@ -181,6 +182,9 @@ pub struct Context<'a> {
// are contravariant while return-position lifetimes are
// covariant).
_marker: PhantomData<fn(&'a ()) -> &'a ()>,
+ // Ensure `Context` is `!Send` and `!Sync` in order to allow
+ // for future `!Send` and / or `!Sync` fields.
+ _marker2: PhantomData<*mut ()>,
}
impl<'a> Context<'a> {
@@ -190,7 +194,7 @@ impl<'a> Context<'a> {
#[must_use]
#[inline]
pub const fn from_waker(waker: &'a Waker) -> Self {
- Context { waker, _marker: PhantomData }
+ Context { waker, _marker: PhantomData, _marker2: PhantomData }
}
/// Returns a reference to the [`Waker`] for the current task.
diff --git a/library/core/src/unicode/mod.rs b/library/core/src/unicode/mod.rs
index 72fa059b7..e1faa407d 100644
--- a/library/core/src/unicode/mod.rs
+++ b/library/core/src/unicode/mod.rs
@@ -17,7 +17,7 @@ mod unicode_data;
#[stable(feature = "unicode_version", since = "1.45.0")]
pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION;
-// For use in liballoc, not re-exported in libstd.
+// For use in alloc, not re-exported in std.
pub use unicode_data::{
case_ignorable::lookup as Case_Ignorable, cased::lookup as Cased, conversions,
};
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index e98dac8d1..a8f6b7ebb 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -131,7 +131,6 @@ fn distinct_type_names() {
assert_ne!(type_name_of_val(Velocity), type_name_of_val(Velocity(0.0, -9.8)),);
}
-#[cfg(not(bootstrap))]
#[test]
fn dyn_type_name() {
trait Foo {
diff --git a/library/core/tests/char.rs b/library/core/tests/char.rs
index 8542e5c70..ac0b2ca16 100644
--- a/library/core/tests/char.rs
+++ b/library/core/tests/char.rs
@@ -306,6 +306,10 @@ fn test_decode_utf16() {
}
check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
+ check(&[0xD800], &[Err(0xD800)]);
+ check(&[0xD840, 0xDC00], &[Ok('\u{20000}')]);
+ check(&[0xD840, 0xD840, 0xDC00], &[Err(0xD840), Ok('\u{20000}')]);
+ check(&[0xDC00, 0xD840], &[Err(0xDC00), Err(0xD840)]);
}
#[test]
diff --git a/library/core/tests/lazy.rs b/library/core/tests/lazy.rs
index 70fcc6d2d..c7c3c479b 100644
--- a/library/core/tests/lazy.rs
+++ b/library/core/tests/lazy.rs
@@ -106,6 +106,12 @@ fn lazy_new() {
assert_eq!(called.get(), 1);
}
+// Check that we can infer `T` from closure's type.
+#[test]
+fn lazy_type_inference() {
+ let _ = LazyCell::new(|| ());
+}
+
#[test]
fn aliasing_in_get() {
let x = OnceCell::new();
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 99d4a40c4..42a26ae16 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -48,7 +48,6 @@
#![feature(is_sorted)]
#![feature(layout_for_ptr)]
#![feature(pattern)]
-#![feature(pin_macro)]
#![feature(sort_internals)]
#![feature(slice_take)]
#![feature(slice_from_ptr_range)]
@@ -155,3 +154,16 @@ mod time;
mod tuple;
mod unicode;
mod waker;
+
+/// Copied from `std::test_helpers::test_rng`, see that function for rationale.
+#[track_caller]
+#[allow(dead_code)] // Not used in all configurations.
+pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use core::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ core::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+}
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 1cfb4fd9f..f7740a114 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -77,7 +77,6 @@ fn align_of_val_basic() {
}
#[test]
-#[cfg(not(bootstrap))] // stage 0 doesn't have the fix yet, so the test fails
fn align_of_val_raw_packed() {
#[repr(C, packed)]
struct B {
diff --git a/library/core/tests/num/flt2dec/random.rs b/library/core/tests/num/flt2dec/random.rs
index d09500393..0084c1c81 100644
--- a/library/core/tests/num/flt2dec/random.rs
+++ b/library/core/tests/num/flt2dec/random.rs
@@ -9,8 +9,6 @@ use core::num::flt2dec::MAX_SIG_DIGITS;
use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
use rand::distributions::{Distribution, Uniform};
-use rand::rngs::StdRng;
-use rand::SeedableRng;
pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
match decode(v).1 {
@@ -92,7 +90,7 @@ where
if cfg!(target_os = "emscripten") {
return; // using rng pulls in i128 support, which doesn't work
}
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
let f32_range = Uniform::new(0x0000_0001u32, 0x7f80_0000);
iterate("f32_random_equivalence_test", k, n, f, g, |_| {
let x = f32::from_bits(f32_range.sample(&mut rng));
@@ -108,7 +106,7 @@ where
if cfg!(target_os = "emscripten") {
return; // using rng pulls in i128 support, which doesn't work
}
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
let f64_range = Uniform::new(0x0000_0000_0000_0001u64, 0x7ff0_0000_0000_0000);
iterate("f64_random_equivalence_test", k, n, f, g, |_| {
let x = f64::from_bits(f64_range.sample(&mut rng));
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index 90bc83510..80d30f14c 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -359,7 +359,6 @@ fn align_offset_zst() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_zst_const() {
const {
// For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
@@ -397,7 +396,6 @@ fn align_offset_stride_one() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_stride_one_const() {
const {
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
@@ -493,7 +491,6 @@ fn align_offset_various_strides() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_various_strides_const() {
const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
let mut expected = usize::MAX;
@@ -561,7 +558,6 @@ fn align_offset_various_strides_const() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_with_provenance_const() {
const {
// On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
@@ -681,7 +677,6 @@ fn align_offset_issue_103361() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_issue_103361_const() {
#[cfg(target_pointer_width = "64")]
const SIZE: usize = 1 << 47;
@@ -715,7 +710,6 @@ fn is_aligned() {
}
#[test]
-#[cfg(not(bootstrap))]
fn is_aligned_const() {
const {
let data = 42;
@@ -735,18 +729,6 @@ fn is_aligned_const() {
}
#[test]
-#[cfg(bootstrap)]
-fn is_aligned_const() {
- const {
- let data = 42;
- let ptr: *const i32 = &data;
- // The bootstrap compiler always returns false for is_aligned.
- assert!(!ptr.is_aligned());
- assert!(!ptr.is_aligned_to(1));
- }
-}
-
-#[test]
fn offset_from() {
let mut a = [0; 5];
let ptr1: *mut i32 = &mut a[1];
@@ -825,7 +807,7 @@ fn ptr_metadata_bounds() {
}
// "Synthetic" trait impls generated by the compiler like those of `Pointee`
// are not checked for bounds of associated type.
- // So with a buggy libcore we could have both:
+ // So with a buggy core we could have both:
// * `<dyn Display as Pointee>::Metadata == DynMetadata`
// * `DynMetadata: !PartialEq`
// … and cause an ICE here:
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 4e06e0f43..39559cdbb 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -1488,7 +1488,7 @@ mod slice_index {
// optional:
//
// one or more similar inputs for which data[input] succeeds,
- // and the corresponding output as an array. This helps validate
+ // and the corresponding output as an array. This helps validate
// "critical points" where an input range straddles the boundary
// between valid and invalid.
// (such as the input `len..len`, which is just barely valid)
@@ -1805,7 +1805,7 @@ fn brute_force_rotate_test_1() {
fn sort_unstable() {
use core::cmp::Ordering::{Equal, Greater, Less};
use core::slice::heapsort;
- use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
+ use rand::{seq::SliceRandom, Rng};
// Miri is too slow (but still need to `chain` to make the types match)
let lens = if cfg!(miri) { (2..20).chain(0..0) } else { (2..25).chain(500..510) };
@@ -1813,7 +1813,7 @@ fn sort_unstable() {
let mut v = [0; 600];
let mut tmp = [0; 600];
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
for len in lens {
let v = &mut v[0..len];
@@ -1879,11 +1879,10 @@ fn sort_unstable() {
#[cfg_attr(miri, ignore)] // Miri is too slow
fn select_nth_unstable() {
use core::cmp::Ordering::{Equal, Greater, Less};
- use rand::rngs::StdRng;
use rand::seq::SliceRandom;
- use rand::{Rng, SeedableRng};
+ use rand::Rng;
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
for len in (2..21).chain(500..501) {
let mut orig = vec![0; len];
diff --git a/library/core/tests/str.rs b/library/core/tests/str.rs
index ed939ca71..f5066343a 100644
--- a/library/core/tests/str.rs
+++ b/library/core/tests/str.rs
@@ -1 +1 @@
-// All `str` tests live in liballoc/tests
+// All `str` tests live in library/alloc/tests/str.rs
diff --git a/library/core/tests/task.rs b/library/core/tests/task.rs
index 56be30e92..163b34c96 100644
--- a/library/core/tests/task.rs
+++ b/library/core/tests/task.rs
@@ -1,4 +1,4 @@
-use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+use core::task::{Poll, RawWaker, RawWakerVTable, Waker};
#[test]
fn poll_const() {
@@ -21,9 +21,5 @@ fn waker_const() {
static WAKER: Waker = unsafe { Waker::from_raw(VOID_WAKER) };
- static CONTEXT: Context<'static> = Context::from_waker(&WAKER);
-
- static WAKER_REF: &'static Waker = CONTEXT.waker();
-
- WAKER_REF.wake_by_ref();
+ WAKER.wake_by_ref();
}
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
index a05128de4..2975c81f8 100644
--- a/library/core/tests/time.rs
+++ b/library/core/tests/time.rs
@@ -174,6 +174,32 @@ fn div() {
}
#[test]
+fn div_duration_f32() {
+ assert_eq!(Duration::ZERO.div_duration_f32(Duration::MAX), 0.0);
+ assert_eq!(Duration::MAX.div_duration_f32(Duration::ZERO), f32::INFINITY);
+ assert_eq!((Duration::SECOND * 2).div_duration_f32(Duration::SECOND), 2.0);
+ assert!(Duration::ZERO.div_duration_f32(Duration::ZERO).is_nan());
+ // These tests demonstrate it doesn't panic with extreme values.
+ // Accuracy of the computed value is not a huge concern, we know floats don't work well
+ // at these extremes.
+ assert!((Duration::MAX).div_duration_f32(Duration::NANOSECOND) > 10.0f32.powf(28.0));
+ assert!((Duration::NANOSECOND).div_duration_f32(Duration::MAX) < 0.1);
+}
+
+#[test]
+fn div_duration_f64() {
+ assert_eq!(Duration::ZERO.div_duration_f64(Duration::MAX), 0.0);
+ assert_eq!(Duration::MAX.div_duration_f64(Duration::ZERO), f64::INFINITY);
+ assert_eq!((Duration::SECOND * 2).div_duration_f64(Duration::SECOND), 2.0);
+ assert!(Duration::ZERO.div_duration_f64(Duration::ZERO).is_nan());
+ // These tests demonstrate it doesn't panic with extreme values.
+ // Accuracy of the computed value is not a huge concern, we know floats don't work well
+ // at these extremes.
+ assert!((Duration::MAX).div_duration_f64(Duration::NANOSECOND) > 10.0f64.powf(28.0));
+ assert!((Duration::NANOSECOND).div_duration_f64(Duration::MAX) < 0.1);
+}
+
+#[test]
fn checked_div() {
assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));