diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:11:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:12:43 +0000 |
commit | cf94bdc0742c13e2a0cac864c478b8626b266e1b (patch) | |
tree | 044670aa50cc5e2b4229aa0b6b3df6676730c0a6 /library | |
parent | Adding debian version 1.65.0+dfsg1-2. (diff) | |
download | rustc-cf94bdc0742c13e2a0cac864c478b8626b266e1b.tar.xz rustc-cf94bdc0742c13e2a0cac864c478b8626b266e1b.zip |
Merging upstream version 1.66.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library')
285 files changed, 7944 insertions, 4580 deletions
diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs index 72ac897d4..d418965cd 100644 --- a/library/alloc/benches/lib.rs +++ b/library/alloc/benches/lib.rs @@ -3,7 +3,6 @@ #![cfg(not(target_os = "android"))] #![feature(btree_drain_filter)] #![feature(iter_next_chunk)] -#![feature(map_first_last)] #![feature(repr_simd)] #![feature(slice_partition_dedup)] #![feature(test)] diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 80b067812..8187517cc 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -28,16 +28,20 @@ extern "Rust" { // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them // like `malloc`, `realloc`, and `free`, respectively. #[rustc_allocator] - #[rustc_allocator_nounwind] + #[cfg_attr(not(bootstrap), rustc_nounwind)] + #[cfg_attr(bootstrap, rustc_allocator_nounwind)] fn __rust_alloc(size: usize, align: usize) -> *mut u8; #[rustc_deallocator] - #[rustc_allocator_nounwind] + #[cfg_attr(not(bootstrap), rustc_nounwind)] + #[cfg_attr(bootstrap, rustc_allocator_nounwind)] fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); #[rustc_reallocator] - #[rustc_allocator_nounwind] + #[cfg_attr(not(bootstrap), rustc_nounwind)] + #[cfg_attr(bootstrap, rustc_allocator_nounwind)] fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8; #[rustc_allocator_zeroed] - #[rustc_allocator_nounwind] + #[cfg_attr(not(bootstrap), rustc_nounwind)] + #[cfg_attr(bootstrap, rustc_allocator_nounwind)] fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8; } diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs index 904a53bb4..83a138559 100644 --- a/library/alloc/src/borrow.rs +++ b/library/alloc/src/borrow.rs @@ -21,7 +21,6 @@ use Cow::*; impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B> where B: ToOwned, - <B as ToOwned>::Owned: 'a, { fn borrow(&self) -> &B { &**self diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 65e323c9e..d6681a317 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -151,7 +151,6 @@ use core::async_iter::AsyncIterator; use core::borrow; use core::cmp::Ordering; use core::convert::{From, TryFrom}; -#[cfg(not(bootstrap))] use core::error::Error; use core::fmt; use core::future::Future; @@ -176,7 +175,6 @@ use crate::borrow::Cow; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] use crate::string::String; #[cfg(not(no_global_oom_handling))] @@ -1622,6 +1620,22 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> { } } +/// Casts a boxed slice to a boxed array. +/// +/// # Safety +/// +/// `boxed_slice.len()` must be exactly `N`. +unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>( + boxed_slice: Box<[T], A>, +) -> Box<[T; N], A> { + debug_assert_eq!(boxed_slice.len(), N); + + let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice); + // SAFETY: Pointer and allocator came from an existing box, + // and our safety condition requires that the length is exactly `N` + unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) } +} + #[stable(feature = "boxed_slice_try_from", since = "1.43.0")] impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> { type Error = Box<[T]>; @@ -1637,13 +1651,46 @@ impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> { /// `boxed_slice.len()` does not equal `N`. fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> { if boxed_slice.len() == N { - Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) }) + Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) }) } else { Err(boxed_slice) } } } +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")] +impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> { + type Error = Vec<T>; + + /// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`. + /// + /// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`, + /// but will require a reallocation otherwise. + /// + /// # Errors + /// + /// Returns the original `Vec<T>` in the `Err` variant if + /// `boxed_slice.len()` does not equal `N`. + /// + /// # Examples + /// + /// This can be used with [`vec!`] to create an array on the heap: + /// + /// ``` + /// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap(); + /// assert_eq!(state.len(), 100); + /// ``` + fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> { + if vec.len() == N { + let boxed_slice = vec.into_boxed_slice(); + Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) }) + } else { + Err(vec) + } + } +} + impl<A: Allocator> Box<dyn Any, A> { /// Attempt to downcast the box to a concrete type. /// @@ -2037,8 +2084,7 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> { * could have a method to project a Pin<T> from it. */ #[stable(feature = "pin", since = "1.33.0")] -#[rustc_const_unstable(feature = "const_box", issue = "92521")] -impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {} +impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {} #[unstable(feature = "generator_trait", issue = "43122")] impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A> @@ -2091,7 +2137,6 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> { } } -#[cfg(not(bootstrap))] impl dyn Error { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] @@ -2109,7 +2154,6 @@ impl dyn Error { } } -#[cfg(not(bootstrap))] impl dyn Error + Send { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] @@ -2124,7 +2168,6 @@ impl dyn Error + Send { } } -#[cfg(not(bootstrap))] impl dyn Error + Send + Sync { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] @@ -2139,7 +2182,6 @@ impl dyn Error + Send + Sync { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> { @@ -2173,7 +2215,6 @@ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> { @@ -2213,7 +2254,6 @@ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl From<String> for Box<dyn Error + Send + Sync> { @@ -2258,7 +2298,6 @@ impl From<String> for Box<dyn Error + Send + Sync> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "string_box_error", since = "1.6.0")] impl From<String> for Box<dyn Error> { @@ -2281,7 +2320,6 @@ impl From<String> for Box<dyn Error> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> { @@ -2306,7 +2344,6 @@ impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "string_box_error", since = "1.6.0")] impl From<&str> for Box<dyn Error> { @@ -2329,7 +2366,6 @@ impl From<&str> for Box<dyn Error> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "cow_box_error", since = "1.22.0")] impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> { @@ -2352,7 +2388,6 @@ impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> { } } -#[cfg(not(bootstrap))] #[cfg(not(no_global_oom_handling))] #[stable(feature = "cow_box_error", since = "1.22.0")] impl<'a> From<Cow<'a, str>> for Box<dyn Error> { @@ -2374,7 +2409,6 @@ impl<'a> From<Cow<'a, str>> for Box<dyn Error> { } } -#[cfg(not(bootstrap))] #[stable(feature = "box_error", since = "1.8.0")] impl<T: core::error::Error> core::error::Error for Box<T> { #[allow(deprecated, deprecated_in_future)] diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs index 0a20c74b0..c477c4490 100644 --- a/library/alloc/src/boxed/thin.rs +++ b/library/alloc/src/boxed/thin.rs @@ -2,7 +2,6 @@ // https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs // by matthieu-m use crate::alloc::{self, Layout, LayoutError}; -#[cfg(not(bootstrap))] use core::error::Error; use core::fmt::{self, Debug, Display, Formatter}; use core::marker::PhantomData; @@ -274,7 +273,6 @@ impl<H> WithHeader<H> { } } -#[cfg(not(bootstrap))] #[unstable(feature = "thin_box", issue = "92791")] impl<T: ?Sized + Error> Error for ThinBox<T> { fn source(&self) -> Option<&(dyn Error + 'static)> { diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index cacbd54b6..8a7719347 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -580,7 +580,7 @@ impl<K, V> BTreeMap<K, V> { /// map.insert(1, "a"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")] #[must_use] pub const fn new() -> BTreeMap<K, V> { BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(Global), _marker: PhantomData } @@ -703,7 +703,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// Basic usage: /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -712,7 +711,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// map.insert(2, "a"); /// assert_eq!(map.first_key_value(), Some((&1, &"b"))); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn first_key_value(&self) -> Option<(&K, &V)> where K: Ord, @@ -727,7 +726,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// # Examples /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -741,7 +739,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// assert_eq!(*map.get(&1).unwrap(), "first"); /// assert_eq!(*map.get(&2).unwrap(), "b"); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>> where K: Ord, @@ -765,7 +763,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// Draining elements in ascending order, while keeping a usable map each iteration. /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -776,7 +773,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// } /// assert!(map.is_empty()); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn pop_first(&mut self) -> Option<(K, V)> where K: Ord, @@ -792,7 +789,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// Basic usage: /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -800,7 +796,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// map.insert(2, "a"); /// assert_eq!(map.last_key_value(), Some((&2, &"a"))); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn last_key_value(&self) -> Option<(&K, &V)> where K: Ord, @@ -815,7 +811,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// # Examples /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -829,7 +824,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// assert_eq!(*map.get(&1).unwrap(), "a"); /// assert_eq!(*map.get(&2).unwrap(), "last"); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>> where K: Ord, @@ -853,7 +848,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// Draining elements in descending order, while keeping a usable map each iteration. /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); @@ -864,7 +858,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// } /// assert!(map.is_empty()); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn pop_last(&mut self) -> Option<(K, V)> where K: Ord, @@ -1099,6 +1093,9 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// Moves all elements from `other` into `self`, leaving `other` empty. /// + /// If a key from `other` is already present in `self`, the respective + /// value from `self` will be overwritten with the respective value from `other`. + /// /// # Examples /// /// ``` @@ -1107,10 +1104,10 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// let mut a = BTreeMap::new(); /// a.insert(1, "a"); /// a.insert(2, "b"); - /// a.insert(3, "c"); + /// a.insert(3, "c"); // Note: Key (3) also present in b. /// /// let mut b = BTreeMap::new(); - /// b.insert(3, "d"); + /// b.insert(3, "d"); // Note: Key (3) also present in a. /// b.insert(4, "e"); /// b.insert(5, "f"); /// @@ -1121,7 +1118,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// /// assert_eq!(a[&1], "a"); /// assert_eq!(a[&2], "b"); - /// assert_eq!(a[&3], "d"); + /// assert_eq!(a[&3], "d"); // Note: "c" has been overwritten. /// assert_eq!(a[&4], "e"); /// assert_eq!(a[&5], "f"); /// ``` @@ -2392,7 +2389,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_unstable( + feature = "const_btree_len", + issue = "71835", + implied_by = "const_btree_new" + )] pub const fn len(&self) -> usize { self.length } @@ -2413,7 +2414,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_unstable( + feature = "const_btree_len", + issue = "71835", + implied_by = "const_btree_new" + )] pub const fn is_empty(&self) -> bool { self.len() == 0 } diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index cd7cdc192..370b58864 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -133,7 +133,6 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display } } -#[cfg(not(bootstrap))] #[unstable(feature = "map_try_insert", issue = "82766")] impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error for crate::collections::btree_map::OccupiedError<'a, K, V> diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index f1d2d3b30..da766b67a 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -206,9 +206,9 @@ impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> { unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {} -unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {} -unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {} -unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {} +unsafe impl<K: Sync, V: Sync, Type> Send for NodeRef<marker::Immut<'_>, K, V, Type> {} +unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Mut<'_>, K, V, Type> {} +unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::ValMut<'_>, K, V, Type> {} unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {} unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {} diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs index 2cfc08074..4ddb21192 100644 --- a/library/alloc/src/collections/btree/set.rs +++ b/library/alloc/src/collections/btree/set.rs @@ -343,7 +343,7 @@ impl<T> BTreeSet<T> { /// let mut set: BTreeSet<i32> = BTreeSet::new(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")] #[must_use] pub const fn new() -> BTreeSet<T> { BTreeSet { map: BTreeMap::new() } @@ -786,7 +786,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// Basic usage: /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeSet; /// /// let mut set = BTreeSet::new(); @@ -797,7 +796,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// assert_eq!(set.first(), Some(&1)); /// ``` #[must_use] - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn first(&self) -> Option<&T> where T: Ord, @@ -813,7 +812,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// Basic usage: /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeSet; /// /// let mut set = BTreeSet::new(); @@ -824,7 +822,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// assert_eq!(set.last(), Some(&2)); /// ``` #[must_use] - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn last(&self) -> Option<&T> where T: Ord, @@ -838,7 +836,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// # Examples /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeSet; /// /// let mut set = BTreeSet::new(); @@ -849,7 +846,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// } /// assert!(set.is_empty()); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn pop_first(&mut self) -> Option<T> where T: Ord, @@ -863,7 +860,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// # Examples /// /// ``` - /// #![feature(map_first_last)] /// use std::collections::BTreeSet; /// /// let mut set = BTreeSet::new(); @@ -874,7 +870,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// } /// assert!(set.is_empty()); /// ``` - #[unstable(feature = "map_first_last", issue = "62924")] + #[stable(feature = "map_first_last", since = "1.66.0")] pub fn pop_last(&mut self) -> Option<T> where T: Ord, @@ -1174,7 +1170,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_unstable( + feature = "const_btree_len", + issue = "71835", + implied_by = "const_btree_new" + )] pub const fn len(&self) -> usize { self.map.len() } @@ -1193,7 +1193,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] + #[rustc_const_unstable( + feature = "const_btree_len", + issue = "71835", + implied_by = "const_btree_new" + )] pub const fn is_empty(&self) -> bool { self.len() == 0 } diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs index 6480fcaf9..f2f5dffc2 100644 --- a/library/alloc/src/collections/linked_list.rs +++ b/library/alloc/src/collections/linked_list.rs @@ -1613,7 +1613,7 @@ impl<'a, T> CursorMut<'a, T> { None } else { // We can't point to the node that we pop. Copying the behavior of - // `remove_current`, we move on the the next node in the sequence. + // `remove_current`, we move on to the next node in the sequence. // If the list is of length 1 then we end pointing to the "ghost" // node at index 0, which is expected. if self.list.head == self.current { diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs index 21d0def08..161a37573 100644 --- a/library/alloc/src/collections/mod.rs +++ b/library/alloc/src/collections/mod.rs @@ -153,6 +153,5 @@ trait SpecExtend<I: IntoIterator> { fn spec_extend(&mut self, iter: I); } -#[cfg(not(bootstrap))] #[stable(feature = "try_reserve", since = "1.57.0")] impl core::error::Error for TryReserveError {} diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index e3f4deb08..2a57dad89 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -12,11 +12,17 @@ use core::fmt; use core::hash::{Hash, Hasher}; use core::iter::{repeat_with, FromIterator}; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ops::{Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice; +// This is used in a bunch of intra-doc links. +// FIXME: For some reason, `#[cfg(doc)]` wasn't sufficient, resulting in +// failures in linkchecker even though rustdoc built the docs just fine. +#[allow(unused_imports)] +use core::mem; + use crate::alloc::{Allocator, Global}; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; @@ -177,7 +183,7 @@ impl<T, A: Allocator> VecDeque<T, A> { /// Marginally more convenient #[inline] fn cap(&self) -> usize { - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { @@ -3038,7 +3044,7 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> { /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated. fn from(mut other: Vec<T, A>) -> Self { let len = other.len(); - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // There's no actual allocation for ZSTs to worry about capacity, // but `VecDeque` can't handle as much length as `Vec`. assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow"); @@ -3124,7 +3130,7 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> { fn from(arr: [T; N]) -> Self { let mut deq = VecDeque::with_capacity(N); let arr = ManuallyDrop::new(arr); - if mem::size_of::<T>() != 0 { + if !<T>::IS_ZST { // SAFETY: VecDeque::with_capacity ensures that there is enough capacity. unsafe { ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N); diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs index aede6d54c..11bd4c4dc 100644 --- a/library/alloc/src/ffi/c_str.rs +++ b/library/alloc/src/ffi/c_str.rs @@ -1122,7 +1122,6 @@ impl CStr { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl core::error::Error for NulError { #[allow(deprecated)] @@ -1131,11 +1130,9 @@ impl core::error::Error for NulError { } } -#[cfg(not(bootstrap))] #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")] impl core::error::Error for FromVecWithNulError {} -#[cfg(not(bootstrap))] #[stable(feature = "cstring_into", since = "1.7.0")] impl core::error::Error for IntoStringError { #[allow(deprecated)] diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs index ed398b566..799ce9d5d 100644 --- a/library/alloc/src/fmt.rs +++ b/library/alloc/src/fmt.rs @@ -327,7 +327,7 @@ //! - `text` must not contain any `'{'` or `'}'` characters, //! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic //! meaning and is completely optional, -//! - `integer` is a decimal integer that may contain leading zeroes and +//! - `integer` is a decimal integer that may contain leading zeroes and must fit into an `usize` and //! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html). //! //! # Formatting traits diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 8619467c2..ce36b116f 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -69,6 +69,8 @@ any(not(feature = "miri-test-libstd"), test, doctest), no_global_oom_handling, not(no_global_oom_handling), + not(no_rc), + not(no_sync), target_has_atomic = "ptr" ))] #![no_std] @@ -97,7 +99,7 @@ #![feature(coerce_unsized)] #![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))] #![feature(const_box)] -#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))] +#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] #![feature(const_cow_is_borrowed)] #![feature(const_convert)] #![feature(const_size_of_val)] @@ -109,10 +111,11 @@ #![feature(core_intrinsics)] #![feature(const_eval_select)] #![feature(const_pin)] +#![feature(const_waker)] #![feature(cstr_from_bytes_until_nul)] #![feature(dispatch_from_dyn)] -#![cfg_attr(not(bootstrap), feature(error_generic_member_access))] -#![cfg_attr(not(bootstrap), feature(error_in_core))] +#![feature(error_generic_member_access)] +#![feature(error_in_core)] #![feature(exact_size_is_empty)] #![feature(extend_one)] #![feature(fmt_internals)] @@ -122,20 +125,21 @@ #![feature(iter_advance_by)] #![feature(iter_next_chunk)] #![feature(layout_for_ptr)] -#![feature(maybe_uninit_array_assume_init)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_uninit_array)] +#![feature(maybe_uninit_uninit_array_transpose)] #![cfg_attr(test, feature(new_uninit))] #![feature(nonnull_slice_from_raw_parts)] #![feature(pattern)] #![feature(pointer_byte_offsets)] -#![cfg_attr(not(bootstrap), feature(provide_any))] +#![feature(provide_any)] #![feature(ptr_internals)] #![feature(ptr_metadata)] #![feature(ptr_sub_ptr)] #![feature(receiver_trait)] #![feature(saturating_int_impl)] #![feature(set_ptr_value)] +#![feature(sized_type_properties)] #![feature(slice_from_ptr_range)] #![feature(slice_group_by)] #![feature(slice_ptr_get)] @@ -169,7 +173,6 @@ #![cfg_attr(not(test), feature(generator_trait))] #![feature(hashmap_internals)] #![feature(lang_items)] -#![cfg_attr(bootstrap, feature(let_else))] #![feature(min_specialization)] #![feature(negative_impls)] #![feature(never_type)] @@ -224,16 +227,17 @@ mod boxed { } pub mod borrow; pub mod collections; -#[cfg(not(no_global_oom_handling))] +#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))] pub mod ffi; pub mod fmt; +#[cfg(not(no_rc))] pub mod rc; pub mod slice; pub mod str; pub mod string; -#[cfg(target_has_atomic = "ptr")] +#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))] pub mod sync; -#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))] +#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))] pub mod task; #[cfg(test)] mod tests; diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index b0f4529ab..5a10121bb 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -3,7 +3,7 @@ use core::alloc::LayoutError; use core::cmp; use core::intrinsics; -use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ops::Drop; use core::ptr::{self, NonNull, Unique}; use core::slice; @@ -168,7 +168,7 @@ impl<T, A: Allocator> RawVec<T, A> { #[cfg(not(no_global_oom_handling))] fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - if mem::size_of::<T>() == 0 || capacity == 0 { + if T::IS_ZST || capacity == 0 { Self::new_in(alloc) } else { // We avoid `unwrap_or_else` here because it bloats the amount of @@ -229,7 +229,7 @@ impl<T, A: Allocator> RawVec<T, A> { /// This will always be `usize::MAX` if `T` is zero-sized. #[inline(always)] pub fn capacity(&self) -> usize { - if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap } + if T::IS_ZST { usize::MAX } else { self.cap } } /// Returns a shared reference to the allocator backing this `RawVec`. @@ -238,7 +238,7 @@ impl<T, A: Allocator> RawVec<T, A> { } fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> { - if mem::size_of::<T>() == 0 || self.cap == 0 { + if T::IS_ZST || self.cap == 0 { None } else { // We have an allocated chunk of memory, so we can bypass runtime @@ -380,7 +380,7 @@ impl<T, A: Allocator> RawVec<T, A> { // This is ensured by the calling contexts. debug_assert!(additional > 0); - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow.into()); @@ -406,7 +406,7 @@ impl<T, A: Allocator> RawVec<T, A> { // `grow_amortized`, but this method is usually instantiated less often so // it's less critical. fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // Since we return a capacity of `usize::MAX` when the type size is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow.into()); diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 6d247681c..006d813e5 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -1110,8 +1110,8 @@ impl<T: ?Sized> Rc<T> { #[inline] #[stable(feature = "ptr_eq", since = "1.17.0")] - /// Returns `true` if the two `Rc`s point to the same allocation - /// (in a vein similar to [`ptr::eq`]). + /// Returns `true` if the two `Rc`s point to the same allocation in a vein similar to + /// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers. /// /// # Examples /// @@ -1386,7 +1386,7 @@ impl<T: ?Sized> Rc<T> { Self::allocate_for_layout( Layout::for_value(&*ptr), |layout| Global.allocate(layout), - |mem| mem.with_metadata_of(ptr as *mut RcBox<T>), + |mem| mem.with_metadata_of(ptr as *const RcBox<T>), ) } } @@ -2419,9 +2419,9 @@ impl<T: ?Sized> Weak<T> { } } - /// Returns `true` if the two `Weak`s point to the same allocation (similar to - /// [`ptr::eq`]), or if both don't point to any allocation - /// (because they were created with `Weak::new()`). + /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if + /// both don't point to any allocation (because they were created with `Weak::new()`). See [that + /// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers. /// /// # Notes /// diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index bcd3f49e2..a5e7bf2a1 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -16,9 +16,7 @@ use core::borrow::{Borrow, BorrowMut}; #[cfg(not(no_global_oom_handling))] use core::cmp::Ordering::{self, Less}; #[cfg(not(no_global_oom_handling))] -use core::mem; -#[cfg(not(no_global_oom_handling))] -use core::mem::size_of; +use core::mem::{self, SizedTypeProperties}; #[cfg(not(no_global_oom_handling))] use core::ptr; @@ -205,7 +203,7 @@ impl<T> [T] { where T: Ord, { - merge_sort(self, |a, b| a.lt(b)); + merge_sort(self, T::lt); } /// Sorts the slice with a comparator function. @@ -1018,7 +1016,7 @@ where const MIN_RUN: usize = 10; // Sorting has no meaningful behavior on zero-sized types. - if size_of::<T>() == 0 { + if T::IS_ZST { return; } diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index f2448396c..c436adf70 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -44,7 +44,6 @@ #[cfg(not(no_global_oom_handling))] use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; -#[cfg(not(bootstrap))] use core::error::Error; use core::fmt; use core::hash; @@ -68,7 +67,7 @@ use core::str::Utf8Chunks; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::collections::TryReserveError; -use crate::str::{self, Chars, Utf8Error}; +use crate::str::{self, from_utf8_unchecked_mut, Chars, Utf8Error}; #[cfg(not(no_global_oom_handling))] use crate::str::{from_boxed_utf8_unchecked, FromStr}; use crate::vec::Vec; @@ -1850,6 +1849,35 @@ impl String { let slice = self.vec.into_boxed_slice(); unsafe { from_boxed_utf8_unchecked(slice) } } + + /// Consumes and leaks the `String`, returning a mutable reference to the contents, + /// `&'static mut str`. + /// + /// This is mainly useful for data that lives for the remainder of + /// the program's life. Dropping the returned reference will cause a memory + /// leak. + /// + /// It does not reallocate or shrink the `String`, + /// so the leaked allocation may include unused capacity that is not part + /// of the returned slice. + /// + /// # Examples + /// + /// Simple usage: + /// + /// ``` + /// #![feature(string_leak)] + /// + /// let x = String::from("bucket"); + /// let static_ref: &'static mut str = x.leak(); + /// assert_eq!(static_ref, "bucket"); + /// ``` + #[unstable(feature = "string_leak", issue = "102929")] + #[inline] + pub fn leak(self) -> &'static mut str { + let slice = self.vec.leak(); + unsafe { from_utf8_unchecked_mut(slice) } + } } impl FromUtf8Error { @@ -1941,7 +1969,6 @@ impl fmt::Display for FromUtf16Error { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for FromUtf8Error { #[allow(deprecated)] @@ -1950,7 +1977,6 @@ impl Error for FromUtf8Error { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for FromUtf16Error { #[allow(deprecated)] diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 4377edeee..81cd77074 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -3,6 +3,10 @@ //! Thread-safe reference-counting pointers. //! //! See the [`Arc<T>`][Arc] documentation for more details. +//! +//! **Note**: This module is only available on platforms that support atomic +//! loads and stores of pointers. This may be detected at compile time using +//! `#[cfg(target_has_atomic = "ptr")]`. use core::any::Any; use core::borrow; @@ -82,6 +86,11 @@ macro_rules! acquire { /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] /// types. /// +/// **Note**: This type is only available on platforms that support atomic +/// loads and stores of pointers, which includes all platforms that support +/// the `std` crate but not all those which only support [`alloc`](crate). +/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`. +/// /// ## Thread Safety /// /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference @@ -1108,8 +1117,8 @@ impl<T: ?Sized> Arc<T> { drop(Weak { ptr: self.ptr }); } - /// Returns `true` if the two `Arc`s point to the same allocation - /// (in a vein similar to [`ptr::eq`]). + /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to + /// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers. /// /// # Examples /// @@ -1195,7 +1204,7 @@ impl<T: ?Sized> Arc<T> { Self::allocate_for_layout( Layout::for_value(&*ptr), |layout| Global.allocate(layout), - |mem| mem.with_metadata_of(ptr as *mut ArcInner<T>), + |mem| mem.with_metadata_of(ptr as *const ArcInner<T>), ) } } @@ -1980,33 +1989,26 @@ impl<T: ?Sized> Weak<T> { // We use a CAS loop to increment the strong count instead of a // fetch_add as this function should never take the reference count // from zero to one. - let inner = self.inner()?; - - // Relaxed load because any write of 0 that we can observe - // leaves the field in a permanently zero state (so a - // "stale" read of 0 is fine), and any other value is - // confirmed via the CAS below. - let mut n = inner.strong.load(Relaxed); - - loop { - if n == 0 { - return None; - } - - // See comments in `Arc::clone` for why we do this (for `mem::forget`). - if n > MAX_REFCOUNT { - abort(); - } - + self.inner()? + .strong // Relaxed is fine for the failure case because we don't have any expectations about the new state. // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner // value can be initialized after `Weak` references have already been created. In that case, we // expect to observe the fully initialized value. - match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) { - Ok(_) => return Some(unsafe { Arc::from_inner(self.ptr) }), // null checked above - Err(old) => n = old, - } - } + .fetch_update(Acquire, Relaxed, |n| { + // Any write of 0 we can observe leaves the field in permanently zero state. + if n == 0 { + return None; + } + // See comments in `Arc::clone` for why we do this (for `mem::forget`). + if n > MAX_REFCOUNT { + abort(); + } + Some(n + 1) + }) + .ok() + // null checked above + .map(|_| unsafe { Arc::from_inner(self.ptr) }) } /// Gets the number of strong (`Arc`) pointers pointing to this allocation. @@ -2067,9 +2069,9 @@ impl<T: ?Sized> Weak<T> { } } - /// Returns `true` if the two `Weak`s point to the same allocation (similar to - /// [`ptr::eq`]), or if both don't point to any allocation - /// (because they were created with `Weak::new()`). + /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if + /// both don't point to any allocation (because they were created with `Weak::new()`). See [that + /// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers. /// /// # Notes /// @@ -2764,7 +2766,6 @@ fn data_offset_align(align: usize) -> usize { layout.size() + layout.padding_needed_for(align) } -#[cfg(not(bootstrap))] #[stable(feature = "arc_error", since = "1.52.0")] impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> { #[allow(deprecated, deprecated_in_future)] diff --git a/library/alloc/src/task.rs b/library/alloc/src/task.rs index 528ee4ff1..9d8e309a9 100644 --- a/library/alloc/src/task.rs +++ b/library/alloc/src/task.rs @@ -1,5 +1,11 @@ #![stable(feature = "wake_trait", since = "1.51.0")] + //! Types and Traits for working with asynchronous tasks. +//! +//! **Note**: This module is only available on platforms that support atomic +//! loads and stores of pointers. This may be detected at compile time using +//! `#[cfg(target_has_atomic = "ptr")]`. + use core::mem::ManuallyDrop; use core::task::{RawWaker, RawWakerVTable, Waker}; diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs index 5b73906a1..541f99bcf 100644 --- a/library/alloc/src/vec/drain.rs +++ b/library/alloc/src/vec/drain.rs @@ -1,7 +1,7 @@ use crate::alloc::{Allocator, Global}; use core::fmt; use core::iter::{FusedIterator, TrustedLen}; -use core::mem::{self, ManuallyDrop}; +use core::mem::{self, ManuallyDrop, SizedTypeProperties}; use core::ptr::{self, NonNull}; use core::slice::{self}; @@ -202,7 +202,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> { let mut vec = self.vec; - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount. // this can be achieved by manipulating the Vec length instead of moving values out from `iter`. unsafe { diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index b211421b2..87d61deb1 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -55,6 +55,9 @@ //! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by //! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`). //! +//! If dropping any remaining source item (`T`) panics then [`InPlaceDstBufDrop`] will handle dropping +//! the already collected sink items (`U`) and freeing the allocation. +//! //! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining() //! //! # O(1) collect @@ -135,10 +138,10 @@ //! vec.truncate(write_idx); //! ``` use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce}; -use core::mem::{self, ManuallyDrop}; +use core::mem::{self, ManuallyDrop, SizedTypeProperties}; use core::ptr::{self}; -use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec}; +use super::{InPlaceDrop, InPlaceDstBufDrop, SpecFromIter, SpecFromIterNested, Vec}; /// Specialization marker for collecting an iterator pipeline into a Vec while reusing the /// source allocation, i.e. executing the pipeline in place. @@ -154,7 +157,7 @@ where default fn from_iter(mut iterator: I) -> Self { // See "Layout constraints" section in the module documentation. We rely on const // optimization here since these conditions currently cannot be expressed as trait bounds - if mem::size_of::<T>() == 0 + if T::IS_ZST || mem::size_of::<T>() != mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>() || mem::align_of::<T>() @@ -191,14 +194,17 @@ where ); } - // Drop any remaining values at the tail of the source but prevent drop of the allocation - // itself once IntoIter goes out of scope. - // If the drop panics then we also leak any elements collected into dst_buf. + // The ownership of the allocation and the new `T` values is temporarily moved into `dst_guard`. + // This is safe because `forget_allocation_drop_remaining` immediately forgets the allocation + // before any panic can occur in order to avoid any double free, and then proceeds to drop + // any remaining values at the tail of the source. // // Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce // contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the // module documenttation why this is ok anyway. + let dst_guard = InPlaceDstBufDrop { ptr: dst_buf, len, cap }; src.forget_allocation_drop_remaining(); + mem::forget(dst_guard); let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) }; diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs index 1b1ef9130..25ca33c6a 100644 --- a/library/alloc/src/vec/in_place_drop.rs +++ b/library/alloc/src/vec/in_place_drop.rs @@ -22,3 +22,18 @@ impl<T> Drop for InPlaceDrop<T> { } } } + +// A helper struct for in-place collection that drops the destination allocation and elements, +// to avoid leaking them if some other destructor panics. +pub(super) struct InPlaceDstBufDrop<T> { + pub(super) ptr: *mut T, + pub(super) len: usize, + pub(super) cap: usize, +} + +impl<T> Drop for InPlaceDstBufDrop<T> { + #[inline] + fn drop(&mut self) { + unsafe { super::Vec::from_raw_parts(self.ptr, self.len, self.cap) }; + } +} diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index b4157fd58..02cc7691a 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -8,7 +8,7 @@ use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; #[cfg(not(no_global_oom_handling))] use core::ops::Deref; use core::ptr::{self, NonNull}; @@ -95,13 +95,16 @@ impl<T, A: Allocator> IntoIter<T, A> { } /// Drops remaining elements and relinquishes the backing allocation. + /// This method guarantees it won't panic before relinquishing + /// the backing allocation. /// /// This is roughly equivalent to the following, but more efficient /// /// ``` /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter(); + /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter()); /// (&mut into_iter).for_each(core::mem::drop); - /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); } + /// std::mem::forget(into_iter); /// ``` /// /// This method is used by in-place iteration, refer to the vec::in_place_collect @@ -118,6 +121,8 @@ impl<T, A: Allocator> IntoIter<T, A> { self.ptr = self.buf.as_ptr(); self.end = self.buf.as_ptr(); + // Dropping the remaining elements can panic, so this needs to be + // done only after updating the other fields. unsafe { ptr::drop_in_place(remaining); } @@ -149,7 +154,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { fn next(&mut self) -> Option<T> { if self.ptr == self.end { None - } else if mem::size_of::<T>() == 0 { + } else if T::IS_ZST { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. @@ -167,7 +172,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { #[inline] fn size_hint(&self) -> (usize, Option<usize>) { - let exact = if mem::size_of::<T>() == 0 { + let exact = if T::IS_ZST { self.end.addr().wrapping_sub(self.ptr.addr()) } else { unsafe { self.end.sub_ptr(self.ptr) } @@ -179,7 +184,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { fn advance_by(&mut self, n: usize) -> Result<(), usize> { let step_size = self.len().min(n); let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size); - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound // effectively results in unsigned pointers representing positions 0..usize::MAX, // which is valid for ZSTs. @@ -209,7 +214,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { let len = self.len(); - if mem::size_of::<T>() == 0 { + if T::IS_ZST { if len < N { self.forget_remaining_elements(); // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct @@ -218,7 +223,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { self.ptr = self.ptr.wrapping_byte_add(N); // Safety: ditto - return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) }); + return Ok(unsafe { raw_ary.transpose().assume_init() }); } if len < N { @@ -236,7 +241,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { return unsafe { ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N); self.ptr = self.ptr.add(N); - Ok(MaybeUninit::array_assume_init(raw_ary)) + Ok(raw_ary.transpose().assume_init()) }; } @@ -253,7 +258,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { // that `T: Copy` so reading elements from the buffer doesn't invalidate // them for `Drop`. unsafe { - if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } + if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } } } } @@ -264,7 +269,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { fn next_back(&mut self) -> Option<T> { if self.end == self.ptr { None - } else if mem::size_of::<T>() == 0 { + } else if T::IS_ZST { // See above for why 'ptr.offset' isn't used self.end = self.end.wrapping_byte_sub(1); @@ -280,7 +285,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { #[inline] fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { let step_size = self.len().min(n); - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // SAFETY: same as for advance_by() self.end = self.end.wrapping_byte_sub(step_size); } else { diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs index 2e025c8a4..8e652d676 100644 --- a/library/alloc/src/vec/is_zero.rs +++ b/library/alloc/src/vec/is_zero.rs @@ -160,3 +160,25 @@ unsafe impl<T: IsZero> IsZero for Saturating<T> { self.0.is_zero() } } + +macro_rules! impl_for_optional_bool { + ($($t:ty,)+) => {$( + unsafe impl IsZero for $t { + #[inline] + fn is_zero(&self) -> bool { + // SAFETY: This is *not* a stable layout guarantee, but + // inside `core` we're allowed to rely on the current rustc + // behaviour that options of bools will be one byte with + // no padding, so long as they're nested less than 254 deep. + let raw: u8 = unsafe { core::mem::transmute(*self) }; + raw == 0 + } + } + )+}; +} +impl_for_optional_bool! { + Option<bool>, + Option<Option<bool>>, + Option<Option<Option<bool>>>, + // Could go further, but not worth the metadata overhead +} diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 60b36af5e..bbbdc3aa2 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -64,7 +64,7 @@ use core::iter; #[cfg(not(no_global_oom_handling))] use core::iter::FromIterator; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; @@ -125,7 +125,7 @@ use self::set_len_on_drop::SetLenOnDrop; mod set_len_on_drop; #[cfg(not(no_global_oom_handling))] -use self::in_place_drop::InPlaceDrop; +use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop}; #[cfg(not(no_global_oom_handling))] mod in_place_drop; @@ -483,15 +483,13 @@ impl<T> Vec<T> { Self::with_capacity_in(capacity, Global) } - /// Creates a `Vec<T>` directly from the raw components of another vector. + /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length. /// /// # Safety /// /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// - /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>` - /// (at least, it's highly likely to be incorrect if it wasn't). /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be @@ -500,6 +498,14 @@ impl<T> Vec<T> { /// to be the same size as the pointer was allocated with. (Because similar to /// alignment, [`dealloc`] must be called with the same layout `size`.) /// * `length` needs to be less than or equal to `capacity`. + /// * The first `length` values must be properly initialized values of type `T`. + /// * `capacity` needs to be the capacity that the pointer was allocated with. + /// * The allocated size in bytes must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// These requirements are always upheld by any `ptr` that has been allocated + /// via `Vec<T>`. Other allocation sources are allowed if the invariants are + /// upheld. /// /// Violating these may cause problems like corrupting the allocator's /// internal data structures. For example it is normally **not** safe @@ -551,6 +557,32 @@ impl<T> Vec<T> { /// assert_eq!(rebuilt, [4, 5, 6]); /// } /// ``` + /// + /// Using memory that was allocated elsewhere: + /// + /// ```rust + /// #![feature(allocator_api)] + /// + /// use std::alloc::{AllocError, Allocator, Global, Layout}; + /// + /// fn main() { + /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen"); + /// + /// let vec = unsafe { + /// let mem = match Global.allocate(layout) { + /// Ok(mem) => mem.cast::<u32>().as_ptr(), + /// Err(AllocError) => return, + /// }; + /// + /// mem.write(1_000_000); + /// + /// Vec::from_raw_parts_in(mem, 1, 16, Global) + /// }; + /// + /// assert_eq!(vec, &[1_000_000]); + /// assert_eq!(vec.capacity(), 16); + /// } + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self { @@ -641,21 +673,30 @@ impl<T, A: Allocator> Vec<T, A> { Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } } - /// Creates a `Vec<T, A>` directly from the raw components of another vector. + /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length, + /// and an allocator. /// /// # Safety /// /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// - /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>` - /// (at least, it's highly likely to be incorrect if it wasn't). - /// * `T` needs to have the same size and alignment as what `ptr` was allocated with. + /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be /// allocated and deallocated with the same layout.) + /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs + /// to be the same size as the pointer was allocated with. (Because similar to + /// alignment, [`dealloc`] must be called with the same layout `size`.) /// * `length` needs to be less than or equal to `capacity`. - /// * `capacity` needs to be the capacity that the pointer was allocated with. + /// * The first `length` values must be properly initialized values of type `T`. + /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with. + /// * The allocated size in bytes must be no larger than `isize::MAX`. + /// See the safety documentation of [`pointer::offset`]. + /// + /// These requirements are always upheld by any `ptr` that has been allocated + /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are + /// upheld. /// /// Violating these may cause problems like corrupting the allocator's /// internal data structures. For example it is **not** safe @@ -673,6 +714,7 @@ impl<T, A: Allocator> Vec<T, A> { /// /// [`String`]: crate::string::String /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc + /// [*fit*]: crate::alloc::Allocator#memory-fitting /// /// # Examples /// @@ -711,6 +753,29 @@ impl<T, A: Allocator> Vec<T, A> { /// assert_eq!(rebuilt, [4, 5, 6]); /// } /// ``` + /// + /// Using memory that was allocated elsewhere: + /// + /// ```rust + /// use std::alloc::{alloc, Layout}; + /// + /// fn main() { + /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen"); + /// let vec = unsafe { + /// let mem = alloc(layout).cast::<u32>(); + /// if mem.is_null() { + /// return; + /// } + /// + /// mem.write(1_000_000); + /// + /// Vec::from_raw_parts(mem, 1, 16) + /// }; + /// + /// assert_eq!(vec, &[1_000_000]); + /// assert_eq!(vec.capacity(), 16); + /// } + /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { @@ -803,13 +868,14 @@ impl<T, A: Allocator> Vec<T, A> { (ptr, len, capacity, alloc) } - /// Returns the number of elements the vector can hold without + /// Returns the total number of elements the vector can hold without /// reallocating. /// /// # Examples /// /// ``` - /// let vec: Vec<i32> = Vec::with_capacity(10); + /// let mut vec: Vec<i32> = Vec::with_capacity(10); + /// vec.push(42); /// assert_eq!(vec.capacity(), 10); /// ``` #[inline] @@ -1774,6 +1840,51 @@ impl<T, A: Allocator> Vec<T, A> { } } + /// Appends an element if there is sufficient spare capacity, otherwise an error is returned + /// with the element. + /// + /// Unlike [`push`] this method will not reallocate when there's insufficient capacity. + /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity. + /// + /// [`push`]: Vec::push + /// [`reserve`]: Vec::reserve + /// [`try_reserve`]: Vec::try_reserve + /// + /// # Examples + /// + /// A manual, panic-free alternative to [`FromIterator`]: + /// + /// ``` + /// #![feature(vec_push_within_capacity)] + /// + /// use std::collections::TryReserveError; + /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> { + /// let mut vec = Vec::new(); + /// for value in iter { + /// if let Err(value) = vec.push_within_capacity(value) { + /// vec.try_reserve(1)?; + /// // this cannot fail, the previous line either returned or added at least 1 free slot + /// let _ = vec.push_within_capacity(value); + /// } + /// } + /// Ok(vec) + /// } + /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100))); + /// ``` + #[inline] + #[unstable(feature = "vec_push_within_capacity", issue = "100486")] + pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> { + if self.len == self.buf.capacity() { + return Err(value); + } + unsafe { + let end = self.as_mut_ptr().add(self.len); + ptr::write(end, value); + self.len += 1; + } + Ok(()) + } + /// Removes the last element from a vector and returns it, or [`None`] if it /// is empty. /// @@ -1889,9 +2000,7 @@ impl<T, A: Allocator> Vec<T, A> { unsafe { // set self.vec length's to start, to be safe in case Drain is leaked self.set_len(start); - // Use the borrow in the IterMut to indicate borrowing behavior of the - // whole Drain iterator (like &mut T). - let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start); + let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start); Drain { tail_start: end, tail_len: len - end, @@ -2083,7 +2192,6 @@ impl<T, A: Allocator> Vec<T, A> { /// static_ref[0] += 1; /// assert_eq!(static_ref, &[2, 2, 3]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_leak", since = "1.47.0")] #[inline] pub fn leak<'a>(self) -> &'a mut [T] @@ -2347,7 +2455,7 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> { #[unstable(feature = "slice_flatten", issue = "95629")] pub fn into_flattened(self) -> Vec<T, A> { let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc(); - let (new_len, new_cap) = if mem::size_of::<T>() == 0 { + let (new_len, new_cap) = if T::IS_ZST { (len.checked_mul(N).expect("vec len overflow"), usize::MAX) } else { // SAFETY: @@ -2677,7 +2785,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> { let mut me = ManuallyDrop::new(self); let alloc = ManuallyDrop::new(ptr::read(me.allocator())); let begin = me.as_mut_ptr(); - let end = if mem::size_of::<T>() == 0 { + let end = if T::IS_ZST { begin.wrapping_byte_add(me.len()) } else { begin.add(me.len()) as *const T diff --git a/library/alloc/tests/autotraits.rs b/library/alloc/tests/autotraits.rs new file mode 100644 index 000000000..8ff5f0abe --- /dev/null +++ b/library/alloc/tests/autotraits.rs @@ -0,0 +1,293 @@ +fn require_sync<T: Sync>(_: T) {} +fn require_send_sync<T: Send + Sync>(_: T) {} + +struct NotSend(*const ()); +unsafe impl Sync for NotSend {} + +#[test] +fn test_btree_map() { + // Tests of this form are prone to https://github.com/rust-lang/rust/issues/64552. + // + // In theory the async block's future would be Send if the value we hold + // across the await point is Send, and Sync if the value we hold across the + // await point is Sync. + // + // We test autotraits in this convoluted way, instead of a straightforward + // `require_send_sync::<TypeIWantToTest>()`, because the interaction with + // generators exposes some current limitations in rustc's ability to prove a + // lifetime bound on the erased generator witness types. See the above link. + // + // A typical way this would surface in real code is: + // + // fn spawn<T: Future + Send>(_: T) {} + // + // async fn f() { + // let map = BTreeMap::<u32, Box<dyn Send + Sync>>::new(); + // for _ in &map { + // async {}.await; + // } + // } + // + // fn main() { + // spawn(f()); + // } + // + // where with some unintentionally overconstrained Send impls in liballoc's + // internals, the future might incorrectly not be Send even though every + // single type involved in the program is Send and Sync. + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>; + async {}.await; + }); + + // Testing like this would not catch all issues that the above form catches. + require_send_sync(None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>); + + require_sync(async { + let _v = None::<alloc::collections::btree_map::Iter<'_, u32, NotSend>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::BTreeMap<&u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::< + alloc::collections::btree_map::DrainFilter< + '_, + &u32, + &u32, + fn(&&u32, &mut &u32) -> bool, + >, + >; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Entry<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::IntoIter<&u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::IntoKeys<&u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::IntoValues<&u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Iter<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::IterMut<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Keys<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::OccupiedEntry<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::OccupiedError<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Range<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::RangeMut<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::VacantEntry<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::Values<'_, &u32, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_map::ValuesMut<'_, &u32, &u32>>; + async {}.await; + }); +} + +#[test] +fn test_btree_set() { + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::BTreeSet<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::Difference<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::DrainFilter<'_, &u32, fn(&&u32) -> bool>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::Intersection<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::IntoIter<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::Iter<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::Range<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::SymmetricDifference<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::btree_set::Union<'_, &u32>>; + async {}.await; + }); +} + +#[test] +fn test_binary_heap() { + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::BinaryHeap<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::Drain<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::DrainSorted<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::IntoIter<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::IntoIterSorted<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::Iter<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::binary_heap::PeekMut<'_, &u32>>; + async {}.await; + }); +} + +#[test] +fn test_linked_list() { + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::Cursor<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::CursorMut<'_, &u32>>; + async {}.await; + }); + + // FIXME + /* + require_send_sync(async { + let _v = + None::<alloc::collections::linked_list::DrainFilter<'_, &u32, fn(&mut &u32) -> bool>>; + async {}.await; + }); + */ + + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::IntoIter<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::Iter<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::IterMut<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::linked_list::LinkedList<&u32>>; + async {}.await; + }); +} + +#[test] +fn test_vec_deque() { + require_send_sync(async { + let _v = None::<alloc::collections::vec_deque::Drain<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::vec_deque::IntoIter<&u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::vec_deque::Iter<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::vec_deque::IterMut<'_, &u32>>; + async {}.await; + }); + + require_send_sync(async { + let _v = None::<alloc::collections::vec_deque::VecDeque<&u32>>; + async {}.await; + }); +} diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs index 490c0d8f7..ffc5ca7a5 100644 --- a/library/alloc/tests/lib.rs +++ b/library/alloc/tests/lib.rs @@ -2,6 +2,7 @@ #![feature(alloc_layout_extra)] #![feature(assert_matches)] #![feature(box_syntax)] +#![feature(btree_drain_filter)] #![feature(cow_is_borrowed)] #![feature(const_box)] #![feature(const_convert)] @@ -14,6 +15,8 @@ #![feature(core_intrinsics)] #![feature(drain_filter)] #![feature(exact_size_is_empty)] +#![feature(linked_list_cursors)] +#![feature(map_try_insert)] #![feature(new_uninit)] #![feature(pattern)] #![feature(trusted_len)] @@ -32,7 +35,7 @@ #![feature(slice_group_by)] #![feature(slice_partition_dedup)] #![feature(string_remove_matches)] -#![feature(const_btree_new)] +#![feature(const_btree_len)] #![feature(const_default_impls)] #![feature(const_trait_impl)] #![feature(const_str_from_utf8)] @@ -41,7 +44,6 @@ #![feature(pointer_is_aligned)] #![feature(slice_flatten)] #![feature(thin_box)] -#![feature(bench_black_box)] #![feature(strict_provenance)] #![feature(once_cell)] #![feature(drain_keep_rest)] @@ -50,6 +52,7 @@ use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; mod arc; +mod autotraits; mod borrow; mod boxed; mod btree_set_hash; diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index f140fc414..e02711870 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -1191,48 +1191,53 @@ fn test_from_iter_specialization_panic_during_iteration_drops() { } #[test] -fn test_from_iter_specialization_panic_during_drop_leaks() { - static mut DROP_COUNTER: usize = 0; +fn test_from_iter_specialization_panic_during_drop_doesnt_leak() { + static mut DROP_COUNTER_OLD: [usize; 5] = [0; 5]; + static mut DROP_COUNTER_NEW: [usize; 2] = [0; 2]; #[derive(Debug)] - enum Droppable { - DroppedTwice(Box<i32>), - PanicOnDrop, - } + struct Old(usize); - impl Drop for Droppable { + impl Drop for Old { fn drop(&mut self) { - match self { - Droppable::DroppedTwice(_) => { - unsafe { - DROP_COUNTER += 1; - } - println!("Dropping!") - } - Droppable::PanicOnDrop => { - if !std::thread::panicking() { - panic!(); - } - } + unsafe { + DROP_COUNTER_OLD[self.0] += 1; + } + + if self.0 == 3 { + panic!(); } + + println!("Dropped Old: {}", self.0); } } - let mut to_free: *mut Droppable = core::ptr::null_mut(); - let mut cap = 0; + #[derive(Debug)] + struct New(usize); + + impl Drop for New { + fn drop(&mut self) { + unsafe { + DROP_COUNTER_NEW[self.0] += 1; + } + + println!("Dropped New: {}", self.0); + } + } let _ = std::panic::catch_unwind(AssertUnwindSafe(|| { - let mut v = vec![Droppable::DroppedTwice(Box::new(123)), Droppable::PanicOnDrop]; - to_free = v.as_mut_ptr(); - cap = v.capacity(); - let _ = v.into_iter().take(0).collect::<Vec<_>>(); + let v = vec![Old(0), Old(1), Old(2), Old(3), Old(4)]; + let _ = v.into_iter().map(|x| New(x.0)).take(2).collect::<Vec<_>>(); })); - assert_eq!(unsafe { DROP_COUNTER }, 1); - // clean up the leak to keep miri happy - unsafe { - drop(Vec::from_raw_parts(to_free, 0, cap)); - } + assert_eq!(unsafe { DROP_COUNTER_OLD[0] }, 1); + assert_eq!(unsafe { DROP_COUNTER_OLD[1] }, 1); + assert_eq!(unsafe { DROP_COUNTER_OLD[2] }, 1); + assert_eq!(unsafe { DROP_COUNTER_OLD[3] }, 1); + assert_eq!(unsafe { DROP_COUNTER_OLD[4] }, 1); + + assert_eq!(unsafe { DROP_COUNTER_NEW[0] }, 1); + assert_eq!(unsafe { DROP_COUNTER_NEW[1] }, 1); } // regression test for issue #85322. Peekable previously implemented InPlaceIterable, diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs index 0abe20e4c..38887f29a 100644 --- a/library/core/benches/iter.rs +++ b/library/core/benches/iter.rs @@ -1,4 +1,6 @@ use core::iter::*; +use core::mem; +use core::num::Wrapping; use test::{black_box, Bencher}; #[bench] @@ -364,6 +366,13 @@ fn bench_partial_cmp(b: &mut Bencher) { } #[bench] +fn bench_chain_partial_cmp(b: &mut Bencher) { + b.iter(|| { + (0..50000).chain(50000..100000).map(black_box).partial_cmp((0..100000).map(black_box)) + }) +} + +#[bench] fn bench_lt(b: &mut Bencher) { b.iter(|| (0..100000).map(black_box).lt((0..100000).map(black_box))) } @@ -391,3 +400,21 @@ fn bench_trusted_random_access_adapters(b: &mut Bencher) { acc }) } + +/// Exercises the iter::Copied specialization for slice::Iter +#[bench] +fn bench_copied_array_chunks(b: &mut Bencher) { + let v = vec![1u8; 1024]; + + b.iter(|| { + black_box(&v) + .iter() + .copied() + .array_chunks::<{ mem::size_of::<u64>() }>() + .map(|ary| { + let d = u64::from_ne_bytes(ary); + Wrapping(d.rotate_left(7).wrapping_add(1)) + }) + .sum::<Wrapping<u64>>() + }) +} diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs index a6c174d2f..1e462e3fc 100644 --- a/library/core/benches/lib.rs +++ b/library/core/benches/lib.rs @@ -4,6 +4,7 @@ #![feature(int_log)] #![feature(test)] #![feature(trusted_random_access)] +#![feature(iter_array_chunks)] extern crate test; diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index f03502429..920e559cc 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -5,7 +5,6 @@ // Your performance intuition is useless. Run perf. use crate::cmp; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; use crate::mem::{self, ValidAlign}; @@ -65,6 +64,7 @@ impl Layout { #[stable(feature = "alloc_layout", since = "1.28.0")] #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")] #[inline] + #[rustc_allow_const_fn_unstable(ptr_alignment_type)] pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutError> { if !align.is_power_of_two() { return Err(LayoutError); @@ -114,6 +114,7 @@ impl Layout { #[rustc_const_stable(feature = "const_alloc_layout_unchecked", since = "1.36.0")] #[must_use] #[inline] + #[rustc_allow_const_fn_unstable(ptr_alignment_type)] pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self { // SAFETY: the caller is required to uphold the preconditions. unsafe { Layout { size, align: ValidAlign::new_unchecked(align) } } @@ -134,6 +135,7 @@ impl Layout { #[must_use = "this returns the minimum alignment, \ without modifying the layout"] #[inline] + #[rustc_allow_const_fn_unstable(ptr_alignment_type)] pub const fn align(&self) -> usize { self.align.as_usize() } @@ -463,7 +465,6 @@ pub type LayoutErr = LayoutError; #[derive(Clone, PartialEq, Eq, Debug)] pub struct LayoutError; -#[cfg(not(bootstrap))] #[stable(feature = "alloc_layout", since = "1.28.0")] impl Error for LayoutError {} diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 94efa7666..a4bf6a853 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -21,7 +21,6 @@ pub use self::layout::LayoutErr; #[stable(feature = "alloc_layout_error", since = "1.50.0")] pub use self::layout::LayoutError; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; use crate::ptr::{self, NonNull}; @@ -34,7 +33,6 @@ use crate::ptr::{self, NonNull}; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct AllocError; -#[cfg(not(bootstrap))] #[unstable( feature = "allocator_api", reason = "the precise API and guarantees it provides may be tweaked.", @@ -107,6 +105,7 @@ impl fmt::Display for AllocError { /// /// [*currently allocated*]: #currently-allocated-memory #[unstable(feature = "allocator_api", issue = "32838")] +#[const_trait] pub unsafe trait Allocator { /// Attempts to allocate a block of memory. /// diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs index f4885ed9f..b91c63018 100644 --- a/library/core/src/array/iter.rs +++ b/library/core/src/array/iter.rs @@ -1,10 +1,10 @@ //! Defines the `IntoIter` owned iterator for arrays. use crate::{ - cmp, fmt, + fmt, iter::{self, ExactSizeIterator, FusedIterator, TrustedLen}, mem::{self, MaybeUninit}, - ops::Range, + ops::{IndexRange, Range}, ptr, }; @@ -29,9 +29,10 @@ pub struct IntoIter<T, const N: usize> { /// The elements in `data` that have not been yielded yet. /// /// Invariants: - /// - `alive.start <= alive.end` /// - `alive.end <= N` - alive: Range<usize>, + /// + /// (And the `IndexRange` type requires `alive.start <= alive.end`.) + alive: IndexRange, } // Note: the `#[rustc_skip_array_during_method_dispatch]` on `trait IntoIterator` @@ -69,7 +70,7 @@ impl<T, const N: usize> IntoIterator for [T; N] { // Until then, we can use `mem::transmute_copy` to create a bitwise copy // as a different type, then forget `array` so that it is not dropped. unsafe { - let iter = IntoIter { data: mem::transmute_copy(&self), alive: 0..N }; + let iter = IntoIter { data: mem::transmute_copy(&self), alive: IndexRange::zero_to(N) }; mem::forget(self); iter } @@ -103,8 +104,7 @@ impl<T, const N: usize> IntoIter<T, N> { /// /// ``` /// #![feature(array_into_iter_constructors)] - /// - /// #![feature(maybe_uninit_array_assume_init)] + /// #![feature(maybe_uninit_uninit_array_transpose)] /// #![feature(maybe_uninit_uninit_array)] /// use std::array::IntoIter; /// use std::mem::MaybeUninit; @@ -133,7 +133,7 @@ impl<T, const N: usize> IntoIter<T, N> { /// } /// /// // SAFETY: We've initialized all N items - /// unsafe { Ok(MaybeUninit::array_assume_init(buffer)) } + /// unsafe { Ok(buffer.transpose().assume_init()) } /// } /// /// let r: [_; 4] = next_chunk(&mut (10..16)).unwrap(); @@ -147,7 +147,9 @@ impl<T, const N: usize> IntoIter<T, N> { buffer: [MaybeUninit<T>; N], initialized: Range<usize>, ) -> Self { - Self { data: buffer, alive: initialized } + // SAFETY: one of our safety conditions is that the range is canonical. + let alive = unsafe { IndexRange::new_unchecked(initialized.start, initialized.end) }; + Self { data: buffer, alive } } /// Creates an iterator over `T` which returns no elements. @@ -283,16 +285,11 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> { } fn advance_by(&mut self, n: usize) -> Result<(), usize> { - let len = self.len(); - - // The number of elements to drop. Always in-bounds by construction. - let delta = cmp::min(n, len); + let original_len = self.len(); - let range_to_drop = self.alive.start..(self.alive.start + delta); - - // Moving the start marks them as conceptually "dropped", so if anything - // goes bad then our drop impl won't double-free them. - self.alive.start += delta; + // This also moves the start, which marks them as conceptually "dropped", + // so if anything goes bad then our drop impl won't double-free them. + let range_to_drop = self.alive.take_prefix(n); // SAFETY: These elements are currently initialized, so it's fine to drop them. unsafe { @@ -300,7 +297,7 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> { ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice)); } - if n > len { Err(len) } else { Ok(()) } + if n > original_len { Err(original_len) } else { Ok(()) } } } @@ -338,16 +335,11 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> { } fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { - let len = self.len(); - - // The number of elements to drop. Always in-bounds by construction. - let delta = cmp::min(n, len); - - let range_to_drop = (self.alive.end - delta)..self.alive.end; + let original_len = self.len(); - // Moving the end marks them as conceptually "dropped", so if anything - // goes bad then our drop impl won't double-free them. - self.alive.end -= delta; + // This also moves the end, which marks them as conceptually "dropped", + // so if anything goes bad then our drop impl won't double-free them. + let range_to_drop = self.alive.take_suffix(n); // SAFETY: These elements are currently initialized, so it's fine to drop them. unsafe { @@ -355,7 +347,7 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> { ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice)); } - if n > len { Err(len) } else { Ok(()) } + if n > original_len { Err(original_len) } else { Ok(()) } } } @@ -372,9 +364,7 @@ impl<T, const N: usize> Drop for IntoIter<T, N> { #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl<T, const N: usize> ExactSizeIterator for IntoIter<T, N> { fn len(&self) -> usize { - // Will never underflow due to the invariant `alive.start <= - // alive.end`. - self.alive.end - self.alive.start + self.alive.len() } fn is_empty(&self) -> bool { self.alive.is_empty() @@ -396,14 +386,15 @@ impl<T: Clone, const N: usize> Clone for IntoIter<T, N> { fn clone(&self) -> Self { // Note, we don't really need to match the exact same alive range, so // we can just clone into offset 0 regardless of where `self` is. - let mut new = Self { data: MaybeUninit::uninit_array(), alive: 0..0 }; + let mut new = Self { data: MaybeUninit::uninit_array(), alive: IndexRange::zero_to(0) }; // Clone all alive elements. for (src, dst) in iter::zip(self.as_slice(), &mut new.data) { // Write a clone into the new array, then update its alive range. // If cloning panics, we'll correctly drop the previous items. dst.write(src.clone()); - new.alive.end += 1; + // This addition cannot overflow as we're iterating a slice + new.alive = IndexRange::zero_to(new.alive.end() + 1); } new diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 9effb3790..eae0e1c76 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -7,7 +7,6 @@ use crate::borrow::{Borrow, BorrowMut}; use crate::cmp::Ordering; use crate::convert::{Infallible, TryFrom}; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; use crate::hash::{self, Hash}; @@ -33,6 +32,10 @@ pub use iter::IntoIter; /// # Example /// /// ```rust +/// // type inference is helping us here, the way `from_fn` knows how many +/// // elements to produce is the length of array down there: only arrays of +/// // equal lengths can be compared, so the const generic parameter `N` is +/// // inferred to be 5, thus creating array of 5 elements. /// let array = core::array::from_fn(|i| i); /// assert_eq!(array, [0, 1, 2, 3, 4]); /// ``` @@ -121,7 +124,6 @@ impl fmt::Display for TryFromSliceError { } } -#[cfg(not(bootstrap))] #[stable(feature = "try_from", since = "1.34.0")] impl Error for TryFromSliceError { #[allow(deprecated)] @@ -184,6 +186,18 @@ impl<T, const N: usize> const BorrowMut<[T]> for [T; N] { } } +/// Tries to create an array `[T; N]` by copying from a slice `&[T]`. Succeeds if +/// `slice.len() == N`. +/// +/// ``` +/// let bytes: [u8; 3] = [1, 0, 2]; +/// +/// let bytes_head: [u8; 2] = <[u8; 2]>::try_from(&bytes[0..2]).unwrap(); +/// assert_eq!(1, u16::from_le_bytes(bytes_head)); +/// +/// let bytes_tail: [u8; 2] = bytes[1..3].try_into().unwrap(); +/// assert_eq!(512, u16::from_le_bytes(bytes_tail)); +/// ``` #[stable(feature = "try_from", since = "1.34.0")] impl<T, const N: usize> TryFrom<&[T]> for [T; N] where @@ -196,6 +210,18 @@ where } } +/// Tries to create an array `[T; N]` by copying from a mutable slice `&mut [T]`. +/// Succeeds if `slice.len() == N`. +/// +/// ``` +/// let mut bytes: [u8; 3] = [1, 0, 2]; +/// +/// let bytes_head: [u8; 2] = <[u8; 2]>::try_from(&mut bytes[0..2]).unwrap(); +/// assert_eq!(1, u16::from_le_bytes(bytes_head)); +/// +/// let bytes_tail: [u8; 2] = (&mut bytes[1..3]).try_into().unwrap(); +/// assert_eq!(512, u16::from_le_bytes(bytes_tail)); +/// ``` #[stable(feature = "try_from_mut_slice_to_array", since = "1.59.0")] impl<T, const N: usize> TryFrom<&mut [T]> for [T; N] where @@ -208,6 +234,18 @@ where } } +/// Tries to create an array ref `&[T; N]` from a slice ref `&[T]`. Succeeds if +/// `slice.len() == N`. +/// +/// ``` +/// let bytes: [u8; 3] = [1, 0, 2]; +/// +/// let bytes_head: &[u8; 2] = <&[u8; 2]>::try_from(&bytes[0..2]).unwrap(); +/// assert_eq!(1, u16::from_le_bytes(*bytes_head)); +/// +/// let bytes_tail: &[u8; 2] = bytes[1..3].try_into().unwrap(); +/// assert_eq!(512, u16::from_le_bytes(*bytes_tail)); +/// ``` #[stable(feature = "try_from", since = "1.34.0")] impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] { type Error = TryFromSliceError; @@ -223,6 +261,18 @@ impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] { } } +/// Tries to create a mutable array ref `&mut [T; N]` from a mutable slice ref +/// `&mut [T]`. Succeeds if `slice.len() == N`. +/// +/// ``` +/// let mut bytes: [u8; 3] = [1, 0, 2]; +/// +/// let bytes_head: &mut [u8; 2] = <&mut [u8; 2]>::try_from(&mut bytes[0..2]).unwrap(); +/// assert_eq!(1, u16::from_le_bytes(*bytes_head)); +/// +/// let bytes_tail: &mut [u8; 2] = (&mut bytes[1..3]).try_into().unwrap(); +/// assert_eq!(512, u16::from_le_bytes(*bytes_tail)); +/// ``` #[stable(feature = "try_from", since = "1.34.0")] impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] { type Error = TryFromSliceError; @@ -386,7 +436,8 @@ impl<T: Copy> SpecArrayClone for T { macro_rules! array_impl_default { {$n:expr, $t:ident $($ts:ident)*} => { #[stable(since = "1.4.0", feature = "array_default")] - impl<T> Default for [T; $n] where T: Default { + #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] + impl<T> const Default for [T; $n] where T: ~const Default { fn default() -> [T; $n] { [$t::default(), $($ts::default()),*] } @@ -865,7 +916,7 @@ where mem::forget(guard); // SAFETY: All elements of the array were populated in the loop above. - let output = unsafe { MaybeUninit::array_assume_init(array) }; + let output = unsafe { array.transpose().assume_init() }; Ok(Try::from_output(output)) } diff --git a/library/core/src/bool.rs b/library/core/src/bool.rs index 7667a6508..db1c505ba 100644 --- a/library/core/src/bool.rs +++ b/library/core/src/bool.rs @@ -18,6 +18,18 @@ impl bool { /// assert_eq!(false.then_some(0), None); /// assert_eq!(true.then_some(0), Some(0)); /// ``` + /// + /// ``` + /// let mut a = 0; + /// let mut function_with_side_effects = || { a += 1; }; + /// + /// true.then_some(function_with_side_effects()); + /// false.then_some(function_with_side_effects()); + /// + /// // `a` is incremented twice because the value passed to `then_some` is + /// // evaluated eagerly. + /// assert_eq!(a, 2); + /// ``` #[stable(feature = "bool_to_option", since = "1.62.0")] #[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")] #[inline] @@ -37,6 +49,17 @@ impl bool { /// assert_eq!(false.then(|| 0), None); /// assert_eq!(true.then(|| 0), Some(0)); /// ``` + /// + /// ``` + /// let mut a = 0; + /// + /// true.then(|| { a += 1; }); + /// false.then(|| { a += 1; }); + /// + /// // `a` is incremented once because the closure is evaluated lazily by + /// // `then`. + /// assert_eq!(a, 1); + /// ``` #[stable(feature = "lazy_bool_to_option", since = "1.50.0")] #[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")] #[inline] diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs index 8378611eb..fdd56cb4e 100644 --- a/library/core/src/borrow.rs +++ b/library/core/src/borrow.rs @@ -154,6 +154,7 @@ /// [`String`]: ../../std/string/struct.String.html #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "Borrow"] +#[const_trait] pub trait Borrow<Borrowed: ?Sized> { /// Immutably borrows from an owned value. /// @@ -184,6 +185,7 @@ pub trait Borrow<Borrowed: ?Sized> { /// an underlying type by providing a mutable reference. See [`Borrow<T>`] /// for more information on borrowing as another type. #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> { /// Mutably borrows from an owned value. /// diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index fb4454c94..7bf32cb0d 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -405,6 +405,7 @@ impl<T> Cell<T> { /// assert_eq!(cell.replace(10), 5); /// assert_eq!(cell.get(), 10); /// ``` + #[inline] #[stable(feature = "move_cell", since = "1.17.0")] pub fn replace(&self, val: T) -> T { // SAFETY: This can cause data races if called from a separate thread, @@ -614,6 +615,7 @@ impl<T, const N: usize> Cell<[T; N]> { /// A mutable memory location with dynamically checked borrow rules /// /// See the [module-level documentation](self) for more. +#[cfg_attr(not(test), rustc_diagnostic_item = "RefCell")] #[stable(feature = "rust1", since = "1.0.0")] pub struct RefCell<T: ?Sized> { borrow: Cell<BorrowFlag>, @@ -1021,15 +1023,18 @@ impl<T: ?Sized> RefCell<T> { /// Returns a mutable reference to the underlying data. /// - /// This call borrows `RefCell` mutably (at compile-time) so there is no - /// need for dynamic checks. + /// Since this method borrows `RefCell` mutably, it is statically guaranteed + /// that no borrows to the underlying data exist. The dynamic checks inherent + /// in [`borrow_mut`] and most other methods of `RefCell` are therefor + /// unnecessary. /// - /// However be cautious: this method expects `self` to be mutable, which is - /// generally not the case when using a `RefCell`. Take a look at the - /// [`borrow_mut`] method instead if `self` isn't mutable. + /// This method can only be called if `RefCell` can be mutably borrowed, + /// which in general is only the case directly after the `RefCell` has + /// been created. In these situations, skipping the aforementioned dynamic + /// borrowing checks may yield better ergonomics and runtime-performance. /// - /// Also, please be aware that this method is only for special circumstances and is usually - /// not what you want. In case of doubt, use [`borrow_mut`] instead. + /// In most situations where `RefCell` is used, it can't be borrowed mutably. + /// Use [`borrow_mut`] to get mutable access to the underlying data then. /// /// [`borrow_mut`]: RefCell::borrow_mut() /// @@ -1811,6 +1816,61 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> { /// /// [`.get_mut()`]: `UnsafeCell::get_mut` /// +/// # Memory layout +/// +/// `UnsafeCell<T>` has the same in-memory representation as its inner type `T`. A consequence +/// of this guarantee is that it is possible to convert between `T` and `UnsafeCell<T>`. +/// Special care has to be taken when converting a nested `T` inside of an `Outer<T>` type +/// to an `Outer<UnsafeCell<T>>` type: this is not sound when the `Outer<T>` type enables [niche] +/// optimizations. For example, the type `Option<NonNull<u8>>` is typically 8 bytes large on +/// 64-bit platforms, but the type `Option<UnsafeCell<NonNull<u8>>>` takes up 16 bytes of space. +/// Therefore this is not a valid conversion, despite `NonNull<u8>` and `UnsafeCell<NonNull<u8>>>` +/// having the same memory layout. This is because `UnsafeCell` disables niche optimizations in +/// order to avoid its interior mutability property from spreading from `T` into the `Outer` type, +/// thus this can cause distortions in the type size in these cases. +/// +/// Note that the only valid way to obtain a `*mut T` pointer to the contents of a +/// _shared_ `UnsafeCell<T>` is through [`.get()`] or [`.raw_get()`]. A `&mut T` reference +/// can be obtained by either dereferencing this pointer or by calling [`.get_mut()`] +/// on an _exclusive_ `UnsafeCell<T>`. Even though `T` and `UnsafeCell<T>` have the +/// same memory layout, the following is not allowed and undefined behavior: +/// +/// ```rust,no_run +/// # use std::cell::UnsafeCell; +/// unsafe fn not_allowed<T>(ptr: &UnsafeCell<T>) -> &mut T { +/// let t = ptr as *const UnsafeCell<T> as *mut T; +/// // This is undefined behavior, because the `*mut T` pointer +/// // was not obtained through `.get()` nor `.raw_get()`: +/// unsafe { &mut *t } +/// } +/// ``` +/// +/// Instead, do this: +/// +/// ```rust +/// # use std::cell::UnsafeCell; +/// // Safety: the caller must ensure that there are no references that +/// // point to the *contents* of the `UnsafeCell`. +/// unsafe fn get_mut<T>(ptr: &UnsafeCell<T>) -> &mut T { +/// unsafe { &mut *ptr.get() } +/// } +/// ``` +/// +/// Coverting in the other direction from a `&mut T` +/// to an `&UnsafeCell<T>` is allowed: +/// +/// ```rust +/// # use std::cell::UnsafeCell; +/// fn get_shared<T>(ptr: &mut T) -> &UnsafeCell<T> { +/// let t = ptr as *mut T as *const UnsafeCell<T>; +/// // SAFETY: `T` and `UnsafeCell<T>` have the same memory layout +/// unsafe { &*t } +/// } +/// ``` +/// +/// [niche]: https://rust-lang.github.io/unsafe-code-guidelines/glossary.html#niche +/// [`.raw_get()`]: `UnsafeCell::raw_get` +/// /// # Examples /// /// Here is an example showcasing how to soundly mutate the contents of an `UnsafeCell<_>` despite diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs index dc8ea66cc..11f1c30f6 100644 --- a/library/core/src/char/decode.rs +++ b/library/core/src/char/decode.rs @@ -1,6 +1,5 @@ //! UTF-8 and UTF-16 decoding iterators -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; @@ -124,7 +123,6 @@ impl fmt::Display for DecodeUtf16Error { } } -#[cfg(not(bootstrap))] #[stable(feature = "decode_utf16", since = "1.9.0")] impl Error for DecodeUtf16Error { #[allow(deprecated)] diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index b7a63b7c6..bb8359936 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -597,9 +597,14 @@ impl char { /// Returns the number of 16-bit code units this `char` would need if /// encoded in UTF-16. /// + /// That number of code units is always either 1 or 2, for unicode scalar values in + /// the [basic multilingual plane] or [supplementary planes] respectively. + /// /// See the documentation for [`len_utf8()`] for more explanation of this /// concept. This function is a mirror, but for UTF-16 instead of UTF-8. /// + /// [basic multilingual plane]: http://www.unicode.org/glossary/#basic_multilingual_plane + /// [supplementary planes]: http://www.unicode.org/glossary/#supplementary_planes /// [`len_utf8()`]: #method.len_utf8 /// /// # Examples @@ -1444,6 +1449,38 @@ impl char { matches!(*self, '0'..='9') } + /// Checks if the value is an ASCII octal digit: + /// U+0030 '0' ..= U+0037 '7'. + /// + /// # Examples + /// + /// ``` + /// #![feature(is_ascii_octdigit)] + /// + /// let uppercase_a = 'A'; + /// let a = 'a'; + /// let zero = '0'; + /// let seven = '7'; + /// let nine = '9'; + /// let percent = '%'; + /// let lf = '\n'; + /// + /// assert!(!uppercase_a.is_ascii_octdigit()); + /// assert!(!a.is_ascii_octdigit()); + /// assert!(zero.is_ascii_octdigit()); + /// assert!(seven.is_ascii_octdigit()); + /// assert!(!nine.is_ascii_octdigit()); + /// assert!(!percent.is_ascii_octdigit()); + /// assert!(!lf.is_ascii_octdigit()); + /// ``` + #[must_use] + #[unstable(feature = "is_ascii_octdigit", issue = "101288")] + #[rustc_const_unstable(feature = "is_ascii_octdigit", issue = "101288")] + #[inline] + pub const fn is_ascii_octdigit(&self) -> bool { + matches!(*self, '0'..='7') + } + /// Checks if the value is an ASCII hexadecimal digit: /// /// - U+0030 '0' ..= U+0039 '9', or diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index 72d63ac4b..b34a71216 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -38,7 +38,6 @@ pub use self::methods::encode_utf16_raw; #[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")] pub use self::methods::encode_utf8_raw; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt::{self, Write}; use crate::iter::FusedIterator; @@ -587,6 +586,5 @@ impl fmt::Display for TryFromCharError { } } -#[cfg(not(bootstrap))] #[stable(feature = "u8_from_char", since = "1.59.0")] impl Error for TryFromCharError {} diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index d9f2d3d64..f0fa2e1d2 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -22,6 +22,7 @@ #![stable(feature = "rust1", since = "1.0.0")] +use crate::const_closure::ConstFnMutClosure; use crate::marker::Destruct; use crate::marker::StructuralPartialEq; @@ -204,20 +205,10 @@ use self::Ordering::*; #[stable(feature = "rust1", since = "1.0.0")] #[doc(alias = "==")] #[doc(alias = "!=")] -#[cfg_attr( - bootstrap, - rustc_on_unimplemented( - message = "can't compare `{Self}` with `{Rhs}`", - label = "no implementation for `{Self} == {Rhs}`" - ) -)] -#[cfg_attr( - not(bootstrap), - rustc_on_unimplemented( - message = "can't compare `{Self}` with `{Rhs}`", - label = "no implementation for `{Self} == {Rhs}`", - append_const_msg, - ) +#[rustc_on_unimplemented( + message = "can't compare `{Self}` with `{Rhs}`", + label = "no implementation for `{Self} == {Rhs}`", + append_const_msg )] #[const_trait] #[rustc_diagnostic_item = "PartialEq"] @@ -1076,20 +1067,10 @@ impl const PartialOrd for Ordering { #[doc(alias = "<")] #[doc(alias = "<=")] #[doc(alias = ">=")] -#[cfg_attr( - bootstrap, - rustc_on_unimplemented( - message = "can't compare `{Self}` with `{Rhs}`", - label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`", - ) -)] -#[cfg_attr( - not(bootstrap), - rustc_on_unimplemented( - message = "can't compare `{Self}` with `{Rhs}`", - label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`", - append_const_msg, - ) +#[rustc_on_unimplemented( + message = "can't compare `{Self}` with `{Rhs}`", + label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`", + append_const_msg )] #[const_trait] #[rustc_diagnostic_item = "PartialOrd"] @@ -1242,7 +1223,12 @@ pub const fn min<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { +#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] +pub const fn min_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T +where + T: ~const Destruct, + F: ~const Destruct, +{ match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v1, Ordering::Greater => v2, @@ -1264,8 +1250,24 @@ pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T { - min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) +#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] +pub const fn min_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T +where + T: ~const Destruct, + F: ~const Destruct, + K: ~const Destruct, +{ + const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( + f: &mut F, + (v1, v2): (&T, &T), + ) -> Ordering + where + T: ~const Destruct, + K: ~const Destruct, + { + f(v1).cmp(&f(v2)) + } + min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) } /// Compares and returns the maximum of two values. @@ -1306,7 +1308,12 @@ pub const fn max<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { +#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] +pub const fn max_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T +where + T: ~const Destruct, + F: ~const Destruct, +{ match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v2, Ordering::Greater => v1, @@ -1328,8 +1335,24 @@ pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T { - max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) +#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] +pub const fn max_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T +where + T: ~const Destruct, + F: ~const Destruct, + K: ~const Destruct, +{ + const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( + f: &mut F, + (v1, v2): (&T, &T), + ) -> Ordering + where + T: ~const Destruct, + K: ~const Destruct, + { + f(v1).cmp(&f(v2)) + } + max_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) } // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs new file mode 100644 index 000000000..9e9c02093 --- /dev/null +++ b/library/core/src/const_closure.rs @@ -0,0 +1,77 @@ +use crate::marker::Destruct; + +/// Struct representing a closure with mutably borrowed data. +/// +/// Example: +/// ```no_build +/// #![feature(const_mut_refs)] +/// use crate::const_closure::ConstFnMutClosure; +/// const fn imp(state: &mut i32, (arg,): (i32,)) -> i32 { +/// *state += arg; +/// *state +/// } +/// let mut i = 5; +/// let mut cl = ConstFnMutClosure::new(&mut i, imp); +/// +/// assert!(7 == cl(2)); +/// assert!(8 == cl(1)); +/// ``` +pub(crate) struct ConstFnMutClosure<CapturedData, Function> { + /// The Data captured by the Closure. + /// Must be either a (mutable) reference or a tuple of (mutable) references. + pub data: CapturedData, + /// The Function of the Closure, must be: Fn(CapturedData, ClosureArgs) -> ClosureReturn + pub func: Function, +} +impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData, Function> { + /// Function for creating a new closure. + /// + /// `data` is the a mutable borrow of data that is captured from the environment. + /// If you want Data to be a tuple of mutable Borrows, the struct must be constructed manually. + /// + /// `func` is the function of the closure, it gets the data and a tuple of the arguments closure + /// and return the return value of the closure. + pub(crate) const fn new<ClosureArguments, ClosureReturnValue>( + data: &'a mut CapturedData, + func: Function, + ) -> Self + where + Function: ~const Fn(&mut CapturedData, ClosureArguments) -> ClosureReturnValue, + { + Self { data, func } + } +} + +macro_rules! impl_fn_mut_tuple { + ($($var:ident)*) => { + #[allow(unused_parens)] + impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const + FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> + where + Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct, + { + type Output = ClosureReturnValue; + + extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output { + self.call_mut(args) + } + } + #[allow(unused_parens)] + impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const + FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> + where + Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue, + { + extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output { + #[allow(non_snake_case)] + let ($($var),*) = &mut self.data; + (self.func)(($($var),*), args) + } + } + }; +} +impl_fn_mut_tuple!(A); +impl_fn_mut_tuple!(A B); +impl_fn_mut_tuple!(A B C); +impl_fn_mut_tuple!(A B C D); +impl_fn_mut_tuple!(A B C D E); diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs index 05637c166..33493964b 100644 --- a/library/core/src/convert/mod.rs +++ b/library/core/src/convert/mod.rs @@ -25,6 +25,7 @@ //! # Generic Implementations //! //! - [`AsRef`] and [`AsMut`] auto-dereference if the inner type is a reference +//! (but not generally for all [dereferenceable types][core::ops::Deref]) //! - [`From`]`<U> for T` implies [`Into`]`<T> for U` //! - [`TryFrom`]`<U> for T` implies [`TryInto`]`<T> for U` //! - [`From`] and [`Into`] are reflexive, which means that all types can @@ -34,7 +35,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; use crate::hash::{Hash, Hasher}; @@ -110,10 +110,12 @@ pub const fn identity<T>(x: T) -> T { /// If you need to do a costly conversion it is better to implement [`From`] with type /// `&T` or write a custom function. /// +/// # Relation to `Borrow` +/// /// `AsRef` has the same signature as [`Borrow`], but [`Borrow`] is different in a few aspects: /// /// - Unlike `AsRef`, [`Borrow`] has a blanket impl for any `T`, and can be used to accept either -/// a reference or a value. +/// a reference or a value. (See also note on `AsRef`'s reflexibility below.) /// - [`Borrow`] also requires that [`Hash`], [`Eq`] and [`Ord`] for a borrowed value are /// equivalent to those of the owned value. For this reason, if you want to /// borrow only a single field of a struct you can implement `AsRef`, but not [`Borrow`]. @@ -123,9 +125,66 @@ pub const fn identity<T>(x: T) -> T { /// /// # Generic Implementations /// -/// - `AsRef` auto-dereferences if the inner type is a reference or a mutable -/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type -/// `&mut Foo` or `&&mut Foo`) +/// `AsRef` auto-dereferences if the inner type is a reference or a mutable reference +/// (e.g.: `foo.as_ref()` will work the same if `foo` has type `&mut Foo` or `&&mut Foo`). +/// +/// Note that due to historic reasons, the above currently does not hold generally for all +/// [dereferenceable types], e.g. `foo.as_ref()` will *not* work the same as +/// `Box::new(foo).as_ref()`. Instead, many smart pointers provide an `as_ref` implementation which +/// simply returns a reference to the [pointed-to value] (but do not perform a cheap +/// reference-to-reference conversion for that value). However, [`AsRef::as_ref`] should not be +/// used for the sole purpose of dereferencing; instead ['`Deref` coercion'] can be used: +/// +/// [dereferenceable types]: core::ops::Deref +/// [pointed-to value]: core::ops::Deref::Target +/// ['`Deref` coercion']: core::ops::Deref#more-on-deref-coercion +/// +/// ``` +/// let x = Box::new(5i32); +/// // Avoid this: +/// // let y: &i32 = x.as_ref(); +/// // Better just write: +/// let y: &i32 = &x; +/// ``` +/// +/// Types which implement [`Deref`] should consider implementing `AsRef<T>` as follows: +/// +/// [`Deref`]: core::ops::Deref +/// +/// ``` +/// # use core::ops::Deref; +/// # struct SomeType; +/// # impl Deref for SomeType { +/// # type Target = [u8]; +/// # fn deref(&self) -> &[u8] { +/// # &[] +/// # } +/// # } +/// impl<T> AsRef<T> for SomeType +/// where +/// T: ?Sized, +/// <SomeType as Deref>::Target: AsRef<T>, +/// { +/// fn as_ref(&self) -> &T { +/// self.deref().as_ref() +/// } +/// } +/// ``` +/// +/// # Reflexivity +/// +/// Ideally, `AsRef` would be reflexive, i.e. there would be an `impl<T: ?Sized> AsRef<T> for T` +/// with [`as_ref`] simply returning its argument unchanged. +/// Such a blanket implementation is currently *not* provided due to technical restrictions of +/// Rust's type system (it would be overlapping with another existing blanket implementation for +/// `&T where T: AsRef<U>` which allows `AsRef` to auto-dereference, see "Generic Implementations" +/// above). +/// +/// [`as_ref`]: AsRef::as_ref +/// +/// A trivial implementation of `AsRef<T> for T` must be added explicitly for a particular type `T` +/// where needed or desired. Note, however, that not all types from `std` contain such an +/// implementation, and those cannot be added by external code due to orphan rules. /// /// # Examples /// @@ -155,6 +214,7 @@ pub const fn identity<T>(x: T) -> T { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "AsRef")] +#[const_trait] pub trait AsRef<T: ?Sized> { /// Converts this type into a shared reference of the (usually inferred) input type. #[stable(feature = "rust1", since = "1.0.0")] @@ -172,31 +232,141 @@ pub trait AsRef<T: ?Sized> { /// /// # Generic Implementations /// -/// - `AsMut` auto-dereferences if the inner type is a mutable reference -/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo` -/// or `&mut &mut Foo`) +/// `AsMut` auto-dereferences if the inner type is a mutable reference +/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo` or `&mut &mut Foo`). +/// +/// Note that due to historic reasons, the above currently does not hold generally for all +/// [mutably dereferenceable types], e.g. `foo.as_mut()` will *not* work the same as +/// `Box::new(foo).as_mut()`. Instead, many smart pointers provide an `as_mut` implementation which +/// simply returns a reference to the [pointed-to value] (but do not perform a cheap +/// reference-to-reference conversion for that value). However, [`AsMut::as_mut`] should not be +/// used for the sole purpose of mutable dereferencing; instead ['`Deref` coercion'] can be used: +/// +/// [mutably dereferenceable types]: core::ops::DerefMut +/// [pointed-to value]: core::ops::Deref::Target +/// ['`Deref` coercion']: core::ops::DerefMut#more-on-deref-coercion +/// +/// ``` +/// let mut x = Box::new(5i32); +/// // Avoid this: +/// // let y: &mut i32 = x.as_mut(); +/// // Better just write: +/// let y: &mut i32 = &mut x; +/// ``` +/// +/// Types which implement [`DerefMut`] should consider to add an implementation of `AsMut<T>` as +/// follows: +/// +/// [`DerefMut`]: core::ops::DerefMut +/// +/// ``` +/// # use core::ops::{Deref, DerefMut}; +/// # struct SomeType; +/// # impl Deref for SomeType { +/// # type Target = [u8]; +/// # fn deref(&self) -> &[u8] { +/// # &[] +/// # } +/// # } +/// # impl DerefMut for SomeType { +/// # fn deref_mut(&mut self) -> &mut [u8] { +/// # &mut [] +/// # } +/// # } +/// impl<T> AsMut<T> for SomeType +/// where +/// <SomeType as Deref>::Target: AsMut<T>, +/// { +/// fn as_mut(&mut self) -> &mut T { +/// self.deref_mut().as_mut() +/// } +/// } +/// ``` +/// +/// # Reflexivity +/// +/// Ideally, `AsMut` would be reflexive, i.e. there would be an `impl<T: ?Sized> AsMut<T> for T` +/// with [`as_mut`] simply returning its argument unchanged. +/// Such a blanket implementation is currently *not* provided due to technical restrictions of +/// Rust's type system (it would be overlapping with another existing blanket implementation for +/// `&mut T where T: AsMut<U>` which allows `AsMut` to auto-dereference, see "Generic +/// Implementations" above). +/// +/// [`as_mut`]: AsMut::as_mut +/// +/// A trivial implementation of `AsMut<T> for T` must be added explicitly for a particular type `T` +/// where needed or desired. Note, however, that not all types from `std` contain such an +/// implementation, and those cannot be added by external code due to orphan rules. /// /// # Examples /// -/// Using `AsMut` as trait bound for a generic function we can accept all mutable references -/// that can be converted to type `&mut T`. Because [`Box<T>`] implements `AsMut<T>` we can -/// write a function `add_one` that takes all arguments that can be converted to `&mut u64`. -/// Because [`Box<T>`] implements `AsMut<T>`, `add_one` accepts arguments of type -/// `&mut Box<u64>` as well: +/// Using `AsMut` as trait bound for a generic function, we can accept all mutable references that +/// can be converted to type `&mut T`. Unlike [dereference], which has a single [target type], +/// there can be multiple implementations of `AsMut` for a type. In particular, `Vec<T>` implements +/// both `AsMut<Vec<T>>` and `AsMut<[T]>`. +/// +/// In the following, the example functions `caesar` and `null_terminate` provide a generic +/// interface which work with any type that can be converted by cheap mutable-to-mutable conversion +/// into a byte slice (`[u8]`) or byte vector (`Vec<u8>`), respectively. +/// +/// [dereference]: core::ops::DerefMut +/// [target type]: core::ops::Deref::Target /// /// ``` -/// fn add_one<T: AsMut<u64>>(num: &mut T) { -/// *num.as_mut() += 1; +/// struct Document { +/// info: String, +/// content: Vec<u8>, /// } /// -/// let mut boxed_num = Box::new(0); -/// add_one(&mut boxed_num); -/// assert_eq!(*boxed_num, 1); +/// impl<T: ?Sized> AsMut<T> for Document +/// where +/// Vec<u8>: AsMut<T>, +/// { +/// fn as_mut(&mut self) -> &mut T { +/// self.content.as_mut() +/// } +/// } +/// +/// fn caesar<T: AsMut<[u8]>>(data: &mut T, key: u8) { +/// for byte in data.as_mut() { +/// *byte = byte.wrapping_add(key); +/// } +/// } +/// +/// fn null_terminate<T: AsMut<Vec<u8>>>(data: &mut T) { +/// // Using a non-generic inner function, which contains most of the +/// // functionality, helps to minimize monomorphization overhead. +/// fn doit(data: &mut Vec<u8>) { +/// let len = data.len(); +/// if len == 0 || data[len-1] != 0 { +/// data.push(0); +/// } +/// } +/// doit(data.as_mut()); +/// } +/// +/// fn main() { +/// let mut v: Vec<u8> = vec![1, 2, 3]; +/// caesar(&mut v, 5); +/// assert_eq!(v, [6, 7, 8]); +/// null_terminate(&mut v); +/// assert_eq!(v, [6, 7, 8, 0]); +/// let mut doc = Document { +/// info: String::from("Example"), +/// content: vec![17, 19, 8], +/// }; +/// caesar(&mut doc, 1); +/// assert_eq!(doc.content, [18, 20, 9]); +/// null_terminate(&mut doc); +/// assert_eq!(doc.content, [18, 20, 9, 0]); +/// } /// ``` /// -/// [`Box<T>`]: ../../std/boxed/struct.Box.html +/// Note, however, that APIs don't need to be generic. In many cases taking a `&mut [u8]` or +/// `&mut Vec<u8>`, for example, is the better choice (callers need to pass the correct type then). #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "AsMut")] +#[const_trait] pub trait AsMut<T: ?Sized> { /// Converts this type into a mutable reference of the (usually inferred) input type. #[stable(feature = "rust1", since = "1.0.0")] @@ -273,6 +443,7 @@ pub trait AsMut<T: ?Sized> { /// [`Vec`]: ../../std/vec/struct.Vec.html #[rustc_diagnostic_item = "Into"] #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] pub trait Into<T>: Sized { /// Converts this type into the (usually inferred) input type. #[must_use] @@ -368,12 +539,13 @@ pub trait Into<T>: Sized { all(_Self = "&str", T = "std::string::String"), note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix", ))] +#[const_trait] pub trait From<T>: Sized { /// Converts to this type from the input type. #[lang = "from"] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn from(_: T) -> Self; + fn from(value: T) -> Self; } /// An attempted conversion that consumes `self`, which may or may not be @@ -392,6 +564,7 @@ pub trait From<T>: Sized { /// [`Into`], see there for details. #[rustc_diagnostic_item = "TryInto"] #[stable(feature = "try_from", since = "1.34.0")] +#[const_trait] pub trait TryInto<T>: Sized { /// The type returned in the event of a conversion error. #[stable(feature = "try_from", since = "1.34.0")] @@ -436,7 +609,7 @@ pub trait TryInto<T>: Sized { /// /// fn try_from(value: i32) -> Result<Self, Self::Error> { /// if value <= 0 { -/// Err("GreaterThanZero only accepts value superior than zero!") +/// Err("GreaterThanZero only accepts values greater than zero!") /// } else { /// Ok(GreaterThanZero(value)) /// } @@ -468,6 +641,7 @@ pub trait TryInto<T>: Sized { /// [`try_from`]: TryFrom::try_from #[rustc_diagnostic_item = "TryFrom"] #[stable(feature = "try_from", since = "1.34.0")] +#[const_trait] pub trait TryFrom<T>: Sized { /// The type returned in the event of a conversion error. #[stable(feature = "try_from", since = "1.34.0")] @@ -718,7 +892,6 @@ impl fmt::Display for Infallible { } } -#[cfg(not(bootstrap))] #[stable(feature = "str_parse_error2", since = "1.8.0")] impl Error for Infallible { fn description(&self) -> &str { diff --git a/library/core/src/default.rs b/library/core/src/default.rs index b53cd6074..a5b4e9655 100644 --- a/library/core/src/default.rs +++ b/library/core/src/default.rs @@ -99,6 +99,7 @@ /// ``` #[cfg_attr(not(test), rustc_diagnostic_item = "Default")] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Default: Sized { /// Returns the "default value" for a type. /// diff --git a/library/core/src/error.rs b/library/core/src/error.rs index 4a8efe15e..2738b4994 100644 --- a/library/core/src/error.rs +++ b/library/core/src/error.rs @@ -493,8 +493,8 @@ impl Error for crate::char::ParseCharError { } } -#[unstable(feature = "duration_checked_float", issue = "83400")] -impl Error for crate::time::FromFloatSecsError {} +#[stable(feature = "duration_checked_float", since = "1.66.0")] +impl Error for crate::time::TryFromFloatSecsError {} #[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] impl Error for crate::ffi::FromBytesWithNulError { diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 21f80ec02..8923f548a 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -221,9 +221,7 @@ impl CStr { /// # Examples /// /// ```ignore (extern-declaration) - /// # fn main() { - /// use std::ffi::CStr; - /// use std::os::raw::c_char; + /// use std::ffi::{c_char, CStr}; /// /// extern "C" { /// fn my_string() -> *const c_char; @@ -233,14 +231,26 @@ impl CStr { /// let slice = CStr::from_ptr(my_string()); /// println!("string returned: {}", slice.to_str().unwrap()); /// } - /// # } + /// ``` + /// + /// ``` + /// #![feature(const_cstr_methods)] + /// + /// use std::ffi::{c_char, CStr}; + /// + /// const HELLO_PTR: *const c_char = { + /// const BYTES: &[u8] = b"Hello, world!\0"; + /// BYTES.as_ptr().cast() + /// }; + /// const HELLO: &CStr = unsafe { CStr::from_ptr(HELLO_PTR) }; /// ``` /// /// [valid]: core::ptr#safety #[inline] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - pub unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr { + #[rustc_const_unstable(feature = "const_cstr_methods", issue = "101719")] + pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr { // SAFETY: The caller has provided a pointer that points to a valid C // string with a NUL terminator of size less than `isize::MAX`, whose // content remain valid and doesn't change for the lifetime of the @@ -252,13 +262,29 @@ impl CStr { // // The cast from c_char to u8 is ok because a c_char is always one byte. unsafe { - extern "C" { - /// Provided by libc or compiler_builtins. - fn strlen(s: *const c_char) -> usize; + const fn strlen_ct(s: *const c_char) -> usize { + let mut len = 0; + + // SAFETY: Outer caller has provided a pointer to a valid C string. + while unsafe { *s.add(len) } != 0 { + len += 1; + } + + len } - let len = strlen(ptr); - let ptr = ptr as *const u8; - CStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr, len as usize + 1)) + + fn strlen_rt(s: *const c_char) -> usize { + extern "C" { + /// Provided by libc or compiler_builtins. + fn strlen(s: *const c_char) -> usize; + } + + // SAFETY: Outer caller has provided a pointer to a valid C string. + unsafe { strlen(s) } + } + + let len = intrinsics::const_eval_select((ptr,), strlen_ct, strlen_rt); + Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1)) } } @@ -474,6 +500,34 @@ impl CStr { self.inner.as_ptr() } + /// Returns `true` if `self.to_bytes()` has a length of 0. + /// + /// # Examples + /// + /// ``` + /// #![feature(cstr_is_empty)] + /// + /// use std::ffi::CStr; + /// # use std::ffi::FromBytesWithNulError; + /// + /// # fn main() { test().unwrap(); } + /// # fn test() -> Result<(), FromBytesWithNulError> { + /// let cstr = CStr::from_bytes_with_nul(b"foo\0")?; + /// assert!(!cstr.is_empty()); + /// + /// let empty_cstr = CStr::from_bytes_with_nul(b"\0")?; + /// assert!(empty_cstr.is_empty()); + /// # Ok(()) + /// # } + /// ``` + #[inline] + #[unstable(feature = "cstr_is_empty", issue = "102444")] + pub const fn is_empty(&self) -> bool { + // SAFETY: We know there is at least one byte; for empty strings it + // is the NUL terminator. + (unsafe { self.inner.get_unchecked(0) }) == &0 + } + /// Converts this C string to a byte slice. /// /// The returned slice will **not** contain the trailing nul terminator that this C diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs index 905212eb3..c8d285505 100644 --- a/library/core/src/fmt/mod.rs +++ b/library/core/src/fmt/mod.rs @@ -709,12 +709,19 @@ pub use macros::Debug; /// Format trait for an empty format, `{}`. /// +/// Implementing this trait for a type will automatically implement the +/// [`ToString`][tostring] trait for the type, allowing the usage +/// of the [`.to_string()`][tostring_function] method. Prefer implementing +/// the `Display` trait for a type, rather than [`ToString`][tostring]. +/// /// `Display` is similar to [`Debug`], but `Display` is for user-facing /// output, and so cannot be derived. /// /// For more information on formatters, see [the module-level documentation][module]. /// /// [module]: ../../std/fmt/index.html +/// [tostring]: ../../std/string/trait.ToString.html +/// [tostring_function]: ../../std/string/trait.ToString.html#tymethod.to_string /// /// # Examples /// @@ -2603,7 +2610,7 @@ impl Debug for () { #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Debug for PhantomData<T> { fn fmt(&self, f: &mut Formatter<'_>) -> Result { - f.debug_struct("PhantomData").finish() + write!(f, "PhantomData<{}>", crate::any::type_name::<T>()) } } diff --git a/library/core/src/fmt/num.rs b/library/core/src/fmt/num.rs index 25789d37c..d8365ae9b 100644 --- a/library/core/src/fmt/num.rs +++ b/library/core/src/fmt/num.rs @@ -211,7 +211,7 @@ macro_rules! impl_Display { fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { // 2^128 is about 3*10^38, so 39 gives an extra byte of space let mut buf = [MaybeUninit::<u8>::uninit(); 39]; - let mut curr = buf.len() as isize; + let mut curr = buf.len(); let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); @@ -228,7 +228,7 @@ macro_rules! impl_Display { // eagerly decode 4 characters at a time while n >= 10000 { - let rem = (n % 10000) as isize; + let rem = (n % 10000) as usize; n /= 10000; let d1 = (rem / 100) << 1; @@ -238,29 +238,29 @@ macro_rules! impl_Display { // We are allowed to copy to `buf_ptr[curr..curr + 3]` here since // otherwise `curr < 0`. But then `n` was originally at least `10000^10` // which is `10^40 > 2^128 > n`. - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2), buf_ptr.add(curr + 2), 2); } // if we reach here numbers are <= 9999, so at most 4 chars long - let mut n = n as isize; // possibly reduce 64bit math + let mut n = n as usize; // possibly reduce 64bit math // decode 2 more chars, if > 2 chars if n >= 100 { let d1 = (n % 100) << 1; n /= 100; curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } // decode last 1 or 2 chars if n < 10 { curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; } else { let d1 = n << 1; curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } } @@ -268,7 +268,7 @@ macro_rules! impl_Display { // UTF-8 since `DEC_DIGITS_LUT` is let buf_slice = unsafe { str::from_utf8_unchecked( - slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize)) + slice::from_raw_parts(buf_ptr.add(curr), buf.len() - curr)) }; f.pad_integral(is_nonnegative, "", buf_slice) } @@ -339,18 +339,18 @@ macro_rules! impl_Exp { // Since `curr` always decreases by the number of digits copied, this means // that `curr >= 0`. let mut buf = [MaybeUninit::<u8>::uninit(); 40]; - let mut curr = buf.len() as isize; //index for buf + let mut curr = buf.len(); //index for buf let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); // decode 2 chars at a time while n >= 100 { - let d1 = ((n % 100) as isize) << 1; + let d1 = ((n % 100) as usize) << 1; curr -= 2; // SAFETY: `d1 <= 198`, so we can copy from `lut_ptr[d1..d1 + 2]` since // `DEC_DIGITS_LUT` has a length of 200. unsafe { - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1), buf_ptr.add(curr), 2); } n /= 100; exponent += 2; @@ -362,7 +362,7 @@ macro_rules! impl_Exp { curr -= 1; // SAFETY: Safe since `40 > curr >= 0` (see comment) unsafe { - *buf_ptr.offset(curr) = (n as u8 % 10_u8) + b'0'; + *buf_ptr.add(curr) = (n as u8 % 10_u8) + b'0'; } n /= 10; exponent += 1; @@ -372,7 +372,7 @@ macro_rules! impl_Exp { curr -= 1; // SAFETY: Safe since `40 > curr >= 0` unsafe { - *buf_ptr.offset(curr) = b'.'; + *buf_ptr.add(curr) = b'.'; } } @@ -380,10 +380,10 @@ macro_rules! impl_Exp { let buf_slice = unsafe { // decode last character curr -= 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; let len = buf.len() - curr as usize; - slice::from_raw_parts(buf_ptr.offset(curr), len) + slice::from_raw_parts(buf_ptr.add(curr), len) }; // stores 'e' (or 'E') and the up to 2-digit exponent @@ -392,13 +392,13 @@ macro_rules! impl_Exp { // SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]` // is contained within `exp_buf` since `len <= 3`. let exp_slice = unsafe { - *exp_ptr.offset(0) = if upper { b'E' } else { b'e' }; + *exp_ptr.add(0) = if upper { b'E' } else { b'e' }; let len = if exponent < 10 { - *exp_ptr.offset(1) = (exponent as u8) + b'0'; + *exp_ptr.add(1) = (exponent as u8) + b'0'; 2 } else { let off = exponent << 1; - ptr::copy_nonoverlapping(lut_ptr.offset(off), exp_ptr.offset(1), 2); + ptr::copy_nonoverlapping(lut_ptr.add(off), exp_ptr.add(1), 2); 3 }; slice::from_raw_parts(exp_ptr, len) @@ -479,7 +479,7 @@ mod imp { impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128); /// Helper function for writing a u64 into `buf` going from last to first, with `curr`. -fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut isize) { +fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut usize) { let buf_ptr = MaybeUninit::slice_as_mut_ptr(buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); assert!(*curr > 19); @@ -505,14 +505,14 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu *curr -= 16; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d5 as isize), buf_ptr.offset(*curr + 8), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d6 as isize), buf_ptr.offset(*curr + 10), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d7 as isize), buf_ptr.offset(*curr + 12), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d8 as isize), buf_ptr.offset(*curr + 14), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d3 as usize), buf_ptr.add(*curr + 4), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d4 as usize), buf_ptr.add(*curr + 6), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d5 as usize), buf_ptr.add(*curr + 8), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d6 as usize), buf_ptr.add(*curr + 10), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d7 as usize), buf_ptr.add(*curr + 12), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d8 as usize), buf_ptr.add(*curr + 14), 2); } if n >= 1e8 as u64 { let to_parse = n % 1e8 as u64; @@ -525,10 +525,10 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu let d4 = ((to_parse / 1e0 as u64) % 100) << 1; *curr -= 8; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d3 as usize), buf_ptr.add(*curr + 4), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d4 as usize), buf_ptr.add(*curr + 6), 2); } // `n` < 1e8 < (1 << 32) let mut n = n as u32; @@ -540,8 +540,8 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu let d2 = (to_parse % 100) << 1; *curr -= 4; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr + 0), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d2 as usize), buf_ptr.add(*curr + 2), 2); } // `n` < 1e4 < (1 << 16) @@ -550,17 +550,17 @@ fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], cu let d1 = (n % 100) << 1; n /= 100; *curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr), 2); } // decode last 1 or 2 chars if n < 10 { *curr -= 1; - *buf_ptr.offset(*curr) = (n as u8) + b'0'; + *buf_ptr.add(*curr) = (n as u8) + b'0'; } else { let d1 = n << 1; *curr -= 2; - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2); + ptr::copy_nonoverlapping(lut_ptr.add(d1 as usize), buf_ptr.add(*curr), 2); } } } @@ -593,21 +593,21 @@ impl fmt::Display for i128 { fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { // 2^128 is about 3*10^38, so 39 gives an extra byte of space let mut buf = [MaybeUninit::<u8>::uninit(); 39]; - let mut curr = buf.len() as isize; + let mut curr = buf.len(); let (n, rem) = udiv_1e19(n); parse_u64_into(rem, &mut buf, &mut curr); if n != 0 { // 0 pad up to point - let target = (buf.len() - 19) as isize; + let target = buf.len() - 19; // SAFETY: Guaranteed that we wrote at most 19 bytes, and there must be space // remaining since it has length 39 unsafe { ptr::write_bytes( - MaybeUninit::slice_as_mut_ptr(&mut buf).offset(target), + MaybeUninit::slice_as_mut_ptr(&mut buf).add(target), b'0', - (curr - target) as usize, + curr - target, ); } curr = target; @@ -616,16 +616,16 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R parse_u64_into(rem, &mut buf, &mut curr); // Should this following branch be annotated with unlikely? if n != 0 { - let target = (buf.len() - 38) as isize; + let target = buf.len() - 38; // The raw `buf_ptr` pointer is only valid until `buf` is used the next time, // buf `buf` is not used in this scope so we are good. let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf); // SAFETY: At this point we wrote at most 38 bytes, pad up to that point, // There can only be at most 1 digit remaining. unsafe { - ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize); + ptr::write_bytes(buf_ptr.add(target), b'0', curr - target); curr = target - 1; - *buf_ptr.offset(curr) = (n as u8) + b'0'; + *buf_ptr.add(curr) = (n as u8) + b'0'; } } } @@ -634,8 +634,8 @@ fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::R // UTF-8 since `DEC_DIGITS_LUT` is let buf_slice = unsafe { str::from_utf8_unchecked(slice::from_raw_parts( - MaybeUninit::slice_as_mut_ptr(&mut buf).offset(curr), - buf.len() - curr as usize, + MaybeUninit::slice_as_mut_ptr(&mut buf).add(curr), + buf.len() - curr, )) }; f.pad_integral(is_nonnegative, "", buf_slice) diff --git a/library/core/src/future/ready.rs b/library/core/src/future/ready.rs index 48f20f90a..a07b63fb6 100644 --- a/library/core/src/future/ready.rs +++ b/library/core/src/future/ready.rs @@ -24,6 +24,30 @@ impl<T> Future for Ready<T> { } } +impl<T> Ready<T> { + /// Consumes the `Ready`, returning the wrapped value. + /// + /// # Panics + /// + /// Will panic if this [`Ready`] was already polled to completion. + /// + /// # Examples + /// + /// ``` + /// #![feature(ready_into_inner)] + /// use std::future; + /// + /// let a = future::ready(1); + /// assert_eq!(a.into_inner(), 1); + /// ``` + #[unstable(feature = "ready_into_inner", issue = "101196")] + #[must_use] + #[inline] + pub fn into_inner(self) -> T { + self.0.expect("Called `into_inner()` on `Ready` after completion") + } +} + /// Creates a future that is immediately ready with a value. /// /// Futures created through this function are functionally similar to those diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs index 764e27962..c53175ba4 100644 --- a/library/core/src/hint.rs +++ b/library/core/src/hint.rs @@ -100,7 +100,10 @@ use crate::intrinsics; pub const unsafe fn unreachable_unchecked() -> ! { // SAFETY: the safety contract for `intrinsics::unreachable` must // be upheld by the caller. - unsafe { intrinsics::unreachable() } + unsafe { + intrinsics::assert_unsafe_precondition!("hint::unreachable_unchecked must never be reached", () => false); + intrinsics::unreachable() + } } /// Emits a machine instruction to signal the processor that it is running in @@ -217,7 +220,7 @@ pub fn spin_loop() { /// /// [`std::convert::identity`]: crate::convert::identity #[inline] -#[unstable(feature = "bench_black_box", issue = "64102")] +#[stable(feature = "bench_black_box", since = "1.66.0")] #[rustc_const_unstable(feature = "const_black_box", issue = "none")] pub const fn black_box<T>(dummy: T) -> T { crate::intrinsics::black_box(dummy) diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 11c75e2c9..1dc79afe8 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -54,8 +54,6 @@ )] #![allow(missing_docs)] -#[cfg(bootstrap)] -use crate::marker::Destruct; use crate::marker::DiscriminantKind; use crate::mem; @@ -790,6 +788,7 @@ extern "rust-intrinsic" { /// uninitialized at that point in the control flow. /// /// This intrinsic should not be used outside of the compiler. + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn rustc_peek<T>(_: T) -> T; /// Aborts the execution of the process. @@ -807,6 +806,7 @@ extern "rust-intrinsic" { /// On Unix, the /// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or /// `SIGBUS`. The precise behaviour is not guaranteed and not stable. + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn abort() -> !; /// Informs the optimizer that this point in the code is not reachable, @@ -845,6 +845,7 @@ extern "rust-intrinsic" { /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_likely", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn likely(b: bool) -> bool; /// Hints to the compiler that branch condition is likely to be false. @@ -859,6 +860,7 @@ extern "rust-intrinsic" { /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_likely", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn unlikely(b: bool) -> bool; /// Executes a breakpoint trap, for inspection by a debugger. @@ -878,6 +880,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`core::mem::size_of`]. #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn size_of<T>() -> usize; /// The minimum alignment of a type. @@ -889,6 +892,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`core::mem::align_of`]. #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn min_align_of<T>() -> usize; /// The preferred alignment of a type. /// @@ -917,6 +921,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`core::any::type_name`]. #[rustc_const_unstable(feature = "const_type_name", issue = "63084")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn type_name<T: ?Sized>() -> &'static str; /// Gets an identifier which is globally unique to the specified type. This @@ -930,6 +935,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`core::any::TypeId::of`]. #[rustc_const_unstable(feature = "const_type_id", issue = "77125")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn type_id<T: ?Sized + 'static>() -> u64; /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited: @@ -937,6 +943,7 @@ extern "rust-intrinsic" { /// /// This intrinsic does not have a stable counterpart. #[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn assert_inhabited<T>(); /// A guard for unsafe functions that cannot ever be executed if `T` does not permit @@ -944,6 +951,7 @@ extern "rust-intrinsic" { /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn assert_zero_valid<T>(); /// A guard for unsafe functions that cannot ever be executed if `T` has invalid @@ -951,6 +959,7 @@ extern "rust-intrinsic" { /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn assert_uninit_valid<T>(); /// Gets a reference to a static `Location` indicating where it was called. @@ -962,6 +971,7 @@ extern "rust-intrinsic" { /// /// Consider using [`core::panic::Location::caller`] instead. #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn caller_location() -> &'static crate::panic::Location<'static>; /// Moves a value out of scope without running drop glue. @@ -974,6 +984,7 @@ extern "rust-intrinsic" { /// Therefore, implementations must not require the user to uphold /// any safety invariants. #[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn forget<T: ?Sized>(_: T); /// Reinterprets the bits of a value of one type as another type. @@ -983,14 +994,14 @@ extern "rust-intrinsic" { /// `transmute` is semantically equivalent to a bitwise move of one type /// into another. It copies the bits from the source value into the /// destination value, then forgets the original. Note that source and destination - /// are passed by-value, which means if `T` or `U` contain padding, that padding + /// are passed by-value, which means if `Src` or `Dst` contain padding, that padding /// is *not* guaranteed to be preserved by `transmute`. /// /// Both the argument and the result must be [valid](../../nomicon/what-unsafe-does.html) at /// their given type. Violating this condition leads to [undefined behavior][ub]. The compiler /// will generate code *assuming that you, the programmer, ensure that there will never be /// undefined behavior*. It is therefore your responsibility to guarantee that every value - /// passed to `transmute` is valid at both types `T` and `U`. Failing to uphold this condition + /// passed to `transmute` is valid at both types `Src` and `Dst`. Failing to uphold this condition /// may lead to unexpected and unstable compilation results. This makes `transmute` **incredibly /// unsafe**. `transmute` should be the absolute last resort. /// @@ -1001,7 +1012,7 @@ extern "rust-intrinsic" { /// /// Because `transmute` is a by-value operation, alignment of the *transmuted values /// themselves* is not a concern. As with any other function, the compiler already ensures - /// both `T` and `U` are properly aligned. However, when transmuting values that *point + /// both `Src` and `Dst` are properly aligned. However, when transmuting values that *point /// elsewhere* (such as pointers, references, boxes…), the caller has to ensure proper /// alignment of the pointed-to values. /// @@ -1237,7 +1248,7 @@ extern "rust-intrinsic" { #[rustc_allowed_through_unstable_modules] #[rustc_const_stable(feature = "const_transmute", since = "1.56.0")] #[rustc_diagnostic_item = "transmute"] - pub fn transmute<T, U>(e: T) -> U; + pub fn transmute<Src, Dst>(src: Src) -> Dst; /// Returns `true` if the actual type given as `T` requires drop /// glue; returns `false` if the actual type provided for `T` @@ -1253,6 +1264,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop). #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn needs_drop<T: ?Sized>() -> bool; /// Calculates the offset from a pointer. @@ -1297,7 +1309,7 @@ extern "rust-intrinsic" { /// any safety invariants. /// /// Consider using [`pointer::mask`] instead. - #[cfg(not(bootstrap))] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T; /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with @@ -1489,6 +1501,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is /// [`f32::min`] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn minnumf32(x: f32, y: f32) -> f32; /// Returns the minimum of two `f64` values. /// @@ -1499,6 +1512,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is /// [`f64::min`] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn minnumf64(x: f64, y: f64) -> f64; /// Returns the maximum of two `f32` values. /// @@ -1509,6 +1523,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is /// [`f32::max`] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn maxnumf32(x: f32, y: f32) -> f32; /// Returns the maximum of two `f64` values. /// @@ -1519,6 +1534,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is /// [`f64::max`] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn maxnumf64(x: f64, y: f64) -> f64; /// Copies the sign from `y` to `x` for `f32` values. @@ -1639,6 +1655,7 @@ extern "rust-intrinsic" { /// primitives via the `count_ones` method. For example, /// [`u32::count_ones`] #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn ctpop<T: Copy>(x: T) -> T; /// Returns the number of leading unset bits (zeroes) in an integer type `T`. @@ -1676,6 +1693,7 @@ extern "rust-intrinsic" { /// assert_eq!(num_leading, 16); /// ``` #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn ctlz<T: Copy>(x: T) -> T; /// Like `ctlz`, but extra-unsafe as it returns `undef` when @@ -1732,6 +1750,7 @@ extern "rust-intrinsic" { /// assert_eq!(num_trailing, 16); /// ``` #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn cttz<T: Copy>(x: T) -> T; /// Like `cttz`, but extra-unsafe as it returns `undef` when @@ -1764,6 +1783,7 @@ extern "rust-intrinsic" { /// primitives via the `swap_bytes` method. For example, /// [`u32::swap_bytes`] #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn bswap<T: Copy>(x: T) -> T; /// Reverses the bits in an integer type `T`. @@ -1777,6 +1797,7 @@ extern "rust-intrinsic" { /// primitives via the `reverse_bits` method. For example, /// [`u32::reverse_bits`] #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn bitreverse<T: Copy>(x: T) -> T; /// Performs checked integer addition. @@ -1790,6 +1811,7 @@ extern "rust-intrinsic" { /// primitives via the `overflowing_add` method. For example, /// [`u32::overflowing_add`] #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs checked integer subtraction @@ -1803,6 +1825,7 @@ extern "rust-intrinsic" { /// primitives via the `overflowing_sub` method. For example, /// [`u32::overflowing_sub`] #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs checked integer multiplication @@ -1816,6 +1839,7 @@ extern "rust-intrinsic" { /// primitives via the `overflowing_mul` method. For example, /// [`u32::overflowing_mul`] #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs an exact division, resulting in undefined behavior where @@ -1890,6 +1914,7 @@ extern "rust-intrinsic" { /// primitives via the `rotate_left` method. For example, /// [`u32::rotate_left`] #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn rotate_left<T: Copy>(x: T, y: T) -> T; /// Performs rotate right. @@ -1903,6 +1928,7 @@ extern "rust-intrinsic" { /// primitives via the `rotate_right` method. For example, /// [`u32::rotate_right`] #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn rotate_right<T: Copy>(x: T, y: T) -> T; /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits. @@ -1916,6 +1942,7 @@ extern "rust-intrinsic" { /// primitives via the `wrapping_add` method. For example, /// [`u32::wrapping_add`] #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn wrapping_add<T: Copy>(a: T, b: T) -> T; /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits. /// @@ -1928,6 +1955,7 @@ extern "rust-intrinsic" { /// primitives via the `wrapping_sub` method. For example, /// [`u32::wrapping_sub`] #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T; /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits. /// @@ -1940,6 +1968,7 @@ extern "rust-intrinsic" { /// primitives via the `wrapping_mul` method. For example, /// [`u32::wrapping_mul`] #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T; /// Computes `a + b`, saturating at numeric bounds. @@ -1953,6 +1982,7 @@ extern "rust-intrinsic" { /// primitives via the `saturating_add` method. For example, /// [`u32::saturating_add`] #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn saturating_add<T: Copy>(a: T, b: T) -> T; /// Computes `a - b`, saturating at numeric bounds. /// @@ -1965,6 +1995,7 @@ extern "rust-intrinsic" { /// primitives via the `saturating_sub` method. For example, /// [`u32::saturating_sub`] #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn saturating_sub<T: Copy>(a: T, b: T) -> T; /// Returns the value of the discriminant for the variant in 'v'; @@ -1977,6 +2008,7 @@ extern "rust-intrinsic" { /// /// The stabilized version of this intrinsic is [`core::mem::discriminant`]. #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant; /// Returns the number of variants of the type `T` cast to a `usize`; @@ -1989,6 +2021,7 @@ extern "rust-intrinsic" { /// /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`]. #[rustc_const_unstable(feature = "variant_count", issue = "73662")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn variant_count<T>() -> usize; /// Rust's "try catch" construct which invokes the function pointer `try_fn` @@ -2022,17 +2055,9 @@ extern "rust-intrinsic" { /// Therefore, implementations must not require the user to uphold /// any safety invariants. #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] - #[cfg(not(bootstrap))] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8; - #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] - #[cfg(bootstrap)] - pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool; - - #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] - #[cfg(bootstrap)] - pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool; - /// Allocates a block of memory at compile time. /// At runtime, just returns a null pointer. /// @@ -2081,6 +2106,7 @@ extern "rust-intrinsic" { /// /// [`std::hint::black_box`]: crate::hint::black_box #[rustc_const_unstable(feature = "const_black_box", issue = "none")] + #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)] pub fn black_box<T>(dummy: T) -> T; /// `ptr` must point to a vtable. @@ -2143,7 +2169,6 @@ extern "rust-intrinsic" { /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*, /// which violates the principle that a `const fn` must behave the same at /// compile-time and at run-time. The unsafe code in crate B is fine. - #[cfg(not(bootstrap))] #[rustc_const_unstable(feature = "const_eval_select", issue = "none")] pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET where @@ -2178,15 +2203,17 @@ extern "rust-intrinsic" { /// the occasional mistake, and this check should help them figure things out. #[allow_internal_unstable(const_eval_select)] // permit this to be called in stably-const fn macro_rules! assert_unsafe_precondition { - ($([$($tt:tt)*])?($($i:ident:$ty:ty),*$(,)?) => $e:expr) => { + ($name:expr, $([$($tt:tt)*])?($($i:ident:$ty:ty),*$(,)?) => $e:expr) => { if cfg!(debug_assertions) { // allow non_snake_case to allow capturing const generics #[allow(non_snake_case)] #[inline(always)] fn runtime$(<$($tt)*>)?($($i:$ty),*) { if !$e { - // abort instead of panicking to reduce impact on code size - ::core::intrinsics::abort(); + // don't unwind to reduce impact on code size + ::core::panicking::panic_str_nounwind( + concat!("unsafe precondition(s) violated: ", $name) + ); } } #[allow(non_snake_case)] @@ -2204,6 +2231,16 @@ pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool { !ptr.is_null() && ptr.is_aligned() } +/// Checks whether an allocation of `len` instances of `T` exceeds +/// the maximum allowed allocation size. +pub(crate) fn is_valid_allocation_size<T>(len: usize) -> bool { + let max_len = const { + let size = crate::mem::size_of::<T>(); + if size == 0 { usize::MAX } else { isize::MAX as usize / size } + }; + len <= max_len +} + /// Checks whether the regions of memory starting at `src` and `dst` of size /// `count * size_of::<T>()` do *not* overlap. pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool { @@ -2216,16 +2253,6 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) - diff >= size } -#[cfg(bootstrap)] -pub const fn ptr_guaranteed_cmp(a: *const (), b: *const ()) -> u8 { - match (ptr_guaranteed_eq(a, b), ptr_guaranteed_ne(a, b)) { - (false, false) => 2, - (true, false) => 1, - (false, true) => 0, - (true, true) => unreachable!(), - } -} - /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source /// and destination must *not* overlap. /// @@ -2325,7 +2352,10 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us // SAFETY: the safety contract for `copy_nonoverlapping` must be // upheld by the caller. unsafe { - assert_unsafe_precondition!([T](src: *const T, dst: *mut T, count: usize) => + assert_unsafe_precondition!( + "ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \ + and the specified memory ranges do not overlap", + [T](src: *const T, dst: *mut T, count: usize) => is_aligned_and_not_null(src) && is_aligned_and_not_null(dst) && is_nonoverlapping(src, dst, count) @@ -2411,8 +2441,11 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) { // SAFETY: the safety contract for `copy` must be upheld by the caller. unsafe { - assert_unsafe_precondition!([T](src: *const T, dst: *mut T) => - is_aligned_and_not_null(src) && is_aligned_and_not_null(dst)); + assert_unsafe_precondition!( + "ptr::copy requires that both pointer arguments are aligned aligned and non-null", + [T](src: *const T, dst: *mut T) => + is_aligned_and_not_null(src) && is_aligned_and_not_null(dst) + ); copy(src, dst, count) } } @@ -2480,49 +2513,10 @@ pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) { // SAFETY: the safety contract for `write_bytes` must be upheld by the caller. unsafe { - assert_unsafe_precondition!([T](dst: *mut T) => is_aligned_and_not_null(dst)); + assert_unsafe_precondition!( + "ptr::write_bytes requires that the destination pointer is aligned and non-null", + [T](dst: *mut T) => is_aligned_and_not_null(dst) + ); write_bytes(dst, val, count) } } - -#[cfg(bootstrap)] -#[unstable( - feature = "const_eval_select", - issue = "none", - reason = "const_eval_select will never be stable" -)] -#[rustc_const_unstable(feature = "const_eval_select", issue = "none")] -#[lang = "const_eval_select"] -#[rustc_do_not_const_check] -#[inline] -pub const unsafe fn const_eval_select<ARG, F, G, RET>( - arg: ARG, - _called_in_const: F, - called_at_rt: G, -) -> RET -where - F: ~const FnOnce<ARG, Output = RET>, - G: FnOnce<ARG, Output = RET> + ~const Destruct, -{ - called_at_rt.call_once(arg) -} - -#[cfg(bootstrap)] -#[unstable( - feature = "const_eval_select", - issue = "none", - reason = "const_eval_select will never be stable" -)] -#[rustc_const_unstable(feature = "const_eval_select", issue = "none")] -#[lang = "const_eval_select_ct"] -pub const unsafe fn const_eval_select_ct<ARG, F, G, RET>( - arg: ARG, - called_in_const: F, - _called_at_rt: G, -) -> RET -where - F: ~const FnOnce<ARG, Output = RET>, - G: FnOnce<ARG, Output = RET> + ~const Destruct, -{ - called_in_const.call_once(arg) -} diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs index 9b479a9f8..d4fb88610 100644 --- a/library/core/src/iter/adapters/array_chunks.rs +++ b/library/core/src/iter/adapters/array_chunks.rs @@ -1,6 +1,6 @@ use crate::array; use crate::iter::{ByRefSized, FusedIterator, Iterator}; -use crate::ops::{ControlFlow, NeverShortCircuit, Try}; +use crate::ops::{ControlFlow, Try}; /// An iterator over `N` elements of the iterator at a time. /// @@ -82,13 +82,7 @@ where } } - fn fold<B, F>(mut self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 - } + impl_fold_via_try_fold! { fold -> try_fold } } #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")] @@ -126,13 +120,7 @@ where try { acc } } - fn rfold<B, F>(mut self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.try_rfold(init, NeverShortCircuit::wrap_mut_2(f)).0 - } + impl_fold_via_try_fold! { rfold -> try_rfold } } impl<I, const N: usize> ArrayChunks<I, N> diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs index 477e7117c..1945e402f 100644 --- a/library/core/src/iter/adapters/by_ref_sized.rs +++ b/library/core/src/iter/adapters/by_ref_sized.rs @@ -1,4 +1,7 @@ -use crate::ops::{NeverShortCircuit, Try}; +use crate::{ + const_closure::ConstFnMutClosure, + ops::{NeverShortCircuit, Try}, +}; /// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics. /// @@ -36,12 +39,13 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> { } #[inline] - fn fold<B, F>(self, init: B, f: F) -> B + fn fold<B, F>(self, init: B, mut f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `fold` needs ownership, so this can't forward directly. - I::try_fold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 + I::try_fold(self.0, init, ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp)) + .0 } #[inline] @@ -72,12 +76,17 @@ impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> { } #[inline] - fn rfold<B, F>(self, init: B, f: F) -> B + fn rfold<B, F>(self, init: B, mut f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `rfold` needs ownership, so this can't forward directly. - I::try_rfold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 + I::try_rfold( + self.0, + init, + ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp), + ) + .0 } #[inline] diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs index f9bfd77d7..62d3afb81 100644 --- a/library/core/src/iter/adapters/copied.rs +++ b/library/core/src/iter/adapters/copied.rs @@ -2,7 +2,10 @@ use crate::iter::adapters::{ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; use crate::iter::{FusedIterator, TrustedLen}; +use crate::mem::MaybeUninit; +use crate::mem::SizedTypeProperties; use crate::ops::Try; +use crate::{array, ptr}; /// An iterator that copies the elements of an underlying iterator. /// @@ -44,6 +47,15 @@ where self.it.next().copied() } + fn next_chunk<const N: usize>( + &mut self, + ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> + where + Self: Sized, + { + <I as SpecNextChunk<'_, N, T>>::spec_next_chunk(&mut self.it) + } + fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } @@ -166,3 +178,65 @@ where T: Copy, { } + +trait SpecNextChunk<'a, const N: usize, T: 'a>: Iterator<Item = &'a T> +where + T: Copy, +{ + fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>>; +} + +impl<'a, const N: usize, I, T: 'a> SpecNextChunk<'a, N, T> for I +where + I: Iterator<Item = &'a T>, + T: Copy, +{ + default fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> { + array::iter_next_chunk(&mut self.map(|e| *e)) + } +} + +impl<'a, const N: usize, T: 'a> SpecNextChunk<'a, N, T> for crate::slice::Iter<'a, T> +where + T: Copy, +{ + fn spec_next_chunk(&mut self) -> Result<[T; N], array::IntoIter<T, N>> { + let mut raw_array = MaybeUninit::uninit_array(); + + let len = self.len(); + + if T::IS_ZST { + if len < N { + let _ = self.advance_by(len); + // SAFETY: ZSTs can be conjured ex nihilo; only the amount has to be correct + return Err(unsafe { array::IntoIter::new_unchecked(raw_array, 0..len) }); + } + + let _ = self.advance_by(N); + // SAFETY: ditto + return Ok(unsafe { MaybeUninit::array_assume_init(raw_array) }); + } + + if len < N { + // SAFETY: `len` indicates that this many elements are available and we just checked that + // it fits into the array. + unsafe { + ptr::copy_nonoverlapping( + self.as_ref().as_ptr(), + raw_array.as_mut_ptr() as *mut T, + len, + ); + let _ = self.advance_by(len); + return Err(array::IntoIter::new_unchecked(raw_array, 0..len)); + } + } + + // SAFETY: `len` is larger than the array size. Copy a fixed amount here to fully initialize + // the array. + unsafe { + ptr::copy_nonoverlapping(self.as_ref().as_ptr(), raw_array.as_mut_ptr() as *mut T, N); + let _ = self.advance_by(N); + Ok(MaybeUninit::array_assume_init(raw_array)) + } + } +} diff --git a/library/core/src/iter/adapters/map_while.rs b/library/core/src/iter/adapters/map_while.rs index 1e8d6bf3e..fbdeca4d4 100644 --- a/library/core/src/iter/adapters/map_while.rs +++ b/library/core/src/iter/adapters/map_while.rs @@ -64,19 +64,7 @@ where .into_try() } - #[inline] - fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } + impl_fold_via_try_fold! { fold -> try_fold } } #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs index bf4fabad3..8cc2b7cec 100644 --- a/library/core/src/iter/adapters/mod.rs +++ b/library/core/src/iter/adapters/mod.rs @@ -1,5 +1,5 @@ use crate::iter::{InPlaceIterable, Iterator}; -use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, NeverShortCircuit, Residual, Try}; +use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try}; mod array_chunks; mod by_ref_sized; @@ -203,13 +203,7 @@ where .into_try() } - fn fold<B, F>(mut self, init: B, fold: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0 - } + impl_fold_via_try_fold! { fold -> try_fold } } #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/adapters/scan.rs b/library/core/src/iter/adapters/scan.rs index 80bfd2231..62470512c 100644 --- a/library/core/src/iter/adapters/scan.rs +++ b/library/core/src/iter/adapters/scan.rs @@ -74,19 +74,7 @@ where self.iter.try_fold(init, scan(state, f, fold)).into_try() } - #[inline] - fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } + impl_fold_via_try_fold! { fold -> try_fold } } #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs index dbf0ae9ec..c6334880d 100644 --- a/library/core/src/iter/adapters/skip.rs +++ b/library/core/src/iter/adapters/skip.rs @@ -206,17 +206,7 @@ where if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() } } - fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok<Acc, T>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result<Acc, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_rfold(init, ok(fold)).unwrap() - } + impl_fold_via_try_fold! { rfold -> try_rfold } #[inline] fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs index 2962e0104..58a0b9d7b 100644 --- a/library/core/src/iter/adapters/take.rs +++ b/library/core/src/iter/adapters/take.rs @@ -98,19 +98,7 @@ where } } - #[inline] - fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } + impl_fold_via_try_fold! { fold -> try_fold } #[inline] #[rustc_inherit_overflow_checks] diff --git a/library/core/src/iter/adapters/take_while.rs b/library/core/src/iter/adapters/take_while.rs index ded216da9..ec66dc3ae 100644 --- a/library/core/src/iter/adapters/take_while.rs +++ b/library/core/src/iter/adapters/take_while.rs @@ -94,19 +94,7 @@ where } } - #[inline] - fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc - where - Self: Sized, - Fold: FnMut(Acc, Self::Item) -> Acc, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(fold)).unwrap() - } + impl_fold_via_try_fold! { fold -> try_fold } } #[stable(feature = "fused", since = "1.26.0")] diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index 9514466bd..ef0f39782 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -352,6 +352,29 @@ #![stable(feature = "rust1", since = "1.0.0")] +// This needs to be up here in order to be usable in the child modules +macro_rules! impl_fold_via_try_fold { + (fold -> try_fold) => { + impl_fold_via_try_fold! { @internal fold -> try_fold } + }; + (rfold -> try_rfold) => { + impl_fold_via_try_fold! { @internal rfold -> try_rfold } + }; + (@internal $fold:ident -> $try_fold:ident) => { + #[inline] + fn $fold<AAA, FFF>(mut self, init: AAA, mut fold: FFF) -> AAA + where + FFF: FnMut(AAA, Self::Item) -> AAA, + { + use crate::const_closure::ConstFnMutClosure; + use crate::ops::NeverShortCircuit; + + let fold = ConstFnMutClosure::new(&mut fold, NeverShortCircuit::wrap_mut_2_imp); + self.$try_fold(init, fold).0 + } + }; +} + #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::Iterator; diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index f7aeee8c9..ac7b389b1 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -1150,19 +1150,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> { self.spec_try_fold(init, f) } - #[inline] - fn fold<B, F>(mut self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_fold(init, ok(f)).unwrap() - } + impl_fold_via_try_fold! { fold -> try_fold } #[inline] fn last(mut self) -> Option<A> { @@ -1230,19 +1218,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> { self.spec_try_rfold(init, f) } - #[inline] - fn rfold<B, F>(mut self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - #[inline] - fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> { - move |acc, x| Ok(f(acc, x)) - } - - self.try_rfold(init, ok(f)).unwrap() - } + impl_fold_via_try_fold! { rfold -> try_rfold } } // Safety: See above implementation for `ops::Range<A>` diff --git a/library/core/src/iter/traits/collect.rs b/library/core/src/iter/traits/collect.rs index 12ca508be..e099700e3 100644 --- a/library/core/src/iter/traits/collect.rs +++ b/library/core/src/iter/traits/collect.rs @@ -228,6 +228,7 @@ pub trait FromIterator<A>: Sized { #[rustc_diagnostic_item = "IntoIterator"] #[rustc_skip_array_during_method_dispatch] #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] pub trait IntoIterator { /// The type of the elements being iterated over. #[stable(feature = "rust1", since = "1.0.0")] @@ -263,7 +264,7 @@ pub trait IntoIterator { #[rustc_const_unstable(feature = "const_intoiterator_identity", issue = "90603")] #[stable(feature = "rust1", since = "1.0.0")] -impl<I: ~const Iterator> const IntoIterator for I { +impl<I: Iterator> const IntoIterator for I { type Item = I::Item; type IntoIter = I; diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index b2d08f4b0..789a87968 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -692,7 +692,7 @@ pub trait Iterator { /// assert_eq!(it.next(), Some(NotClone(99))); // The separator. /// assert_eq!(it.next(), Some(NotClone(1))); // The next element from `v`. /// assert_eq!(it.next(), Some(NotClone(99))); // The separator. - /// assert_eq!(it.next(), Some(NotClone(2))); // The last element from from `v`. + /// assert_eq!(it.next(), Some(NotClone(2))); // The last element from `v`. /// assert_eq!(it.next(), None); // The iterator is finished. /// ``` /// @@ -2431,22 +2431,13 @@ pub trait Iterator { /// /// # Example /// - /// Find the maximum value: - /// /// ``` - /// fn find_max<I>(iter: I) -> Option<I::Item> - /// where I: Iterator, - /// I::Item: Ord, - /// { - /// iter.reduce(|accum, item| { - /// if accum >= item { accum } else { item } - /// }) - /// } - /// let a = [10, 20, 5, -23, 0]; - /// let b: [u32; 0] = []; + /// let reduced: i32 = (1..10).reduce(|acc, e| acc + e).unwrap(); + /// assert_eq!(reduced, 45); /// - /// assert_eq!(find_max(a.iter()), Some(&20)); - /// assert_eq!(find_max(b.iter()), None); + /// // Which is equivalent to doing it with `fold`: + /// let folded: i32 = (1..10).fold(0, |acc, e| acc + e); + /// assert_eq!(reduced, folded); /// ``` #[inline] #[stable(feature = "iterator_fold_self", since = "1.51.0")] @@ -2906,14 +2897,14 @@ pub trait Iterator { /// Stopping at the first `true`: /// /// ``` - /// let a = [1, 2, 3]; + /// let a = [-1, 2, 3, 4]; /// /// let mut iter = a.iter(); /// - /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); + /// assert_eq!(iter.rposition(|&x| x >= 2), Some(3)); /// /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&-1)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -3461,36 +3452,27 @@ pub trait Iterator { /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] - fn cmp_by<I, F>(mut self, other: I, mut cmp: F) -> Ordering + fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> Ordering, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => { - if other.next().is_none() { - return Ordering::Equal; - } else { - return Ordering::Less; - } - } - Some(val) => val, - }; - - let y = match other.next() { - None => return Ordering::Greater, - Some(val) => val, - }; - - match cmp(x, y) { - Ordering::Equal => (), - non_eq => return non_eq, + #[inline] + fn compare<X, Y, F>(mut cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Ordering> + where + F: FnMut(X, Y) -> Ordering, + { + move |x, y| match cmp(x, y) { + Ordering::Equal => ControlFlow::CONTINUE, + non_eq => ControlFlow::Break(non_eq), } } + + match iter_compare(self, other.into_iter(), compare(cmp)) { + ControlFlow::Continue(ord) => ord, + ControlFlow::Break(ord) => ord, + } } /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those @@ -3546,36 +3528,27 @@ pub trait Iterator { /// ); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] - fn partial_cmp_by<I, F>(mut self, other: I, mut partial_cmp: F) -> Option<Ordering> + fn partial_cmp_by<I, F>(self, other: I, partial_cmp: F) -> Option<Ordering> where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> Option<Ordering>, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => { - if other.next().is_none() { - return Some(Ordering::Equal); - } else { - return Some(Ordering::Less); - } - } - Some(val) => val, - }; - - let y = match other.next() { - None => return Some(Ordering::Greater), - Some(val) => val, - }; - - match partial_cmp(x, y) { - Some(Ordering::Equal) => (), - non_eq => return non_eq, + #[inline] + fn compare<X, Y, F>(mut partial_cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Option<Ordering>> + where + F: FnMut(X, Y) -> Option<Ordering>, + { + move |x, y| match partial_cmp(x, y) { + Some(Ordering::Equal) => ControlFlow::CONTINUE, + non_eq => ControlFlow::Break(non_eq), } } + + match iter_compare(self, other.into_iter(), compare(partial_cmp)) { + ControlFlow::Continue(ord) => Some(ord), + ControlFlow::Break(ord) => ord, + } } /// Determines if the elements of this [`Iterator`] are equal to those of @@ -3613,29 +3586,26 @@ pub trait Iterator { /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y)); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] - fn eq_by<I, F>(mut self, other: I, mut eq: F) -> bool + fn eq_by<I, F>(self, other: I, eq: F) -> bool where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> bool, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => return other.next().is_none(), - Some(val) => val, - }; - - let y = match other.next() { - None => return false, - Some(val) => val, - }; - - if !eq(x, y) { - return false; + #[inline] + fn compare<X, Y, F>(mut eq: F) -> impl FnMut(X, Y) -> ControlFlow<()> + where + F: FnMut(X, Y) -> bool, + { + move |x, y| { + if eq(x, y) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } } } + + match iter_compare(self, other.into_iter(), compare(eq)) { + ControlFlow::Continue(ord) => ord == Ordering::Equal, + ControlFlow::Break(()) => false, + } } /// Determines if the elements of this [`Iterator`] are unequal to those of @@ -3860,6 +3830,46 @@ pub trait Iterator { } } +/// Compares two iterators element-wise using the given function. +/// +/// If `ControlFlow::CONTINUE` is returned from the function, the comparison moves on to the next +/// elements of both iterators. Returning `ControlFlow::Break(x)` short-circuits the iteration and +/// returns `ControlFlow::Break(x)`. If one of the iterators runs out of elements, +/// `ControlFlow::Continue(ord)` is returned where `ord` is the result of comparing the lengths of +/// the iterators. +/// +/// Isolates the logic shared by ['cmp_by'](Iterator::cmp_by), +/// ['partial_cmp_by'](Iterator::partial_cmp_by), and ['eq_by'](Iterator::eq_by). +#[inline] +fn iter_compare<A, B, F, T>(mut a: A, mut b: B, f: F) -> ControlFlow<T, Ordering> +where + A: Iterator, + B: Iterator, + F: FnMut(A::Item, B::Item) -> ControlFlow<T>, +{ + #[inline] + fn compare<'a, B, X, T>( + b: &'a mut B, + mut f: impl FnMut(X, B::Item) -> ControlFlow<T> + 'a, + ) -> impl FnMut(X) -> ControlFlow<ControlFlow<T, Ordering>> + 'a + where + B: Iterator, + { + move |x| match b.next() { + None => ControlFlow::Break(ControlFlow::Continue(Ordering::Greater)), + Some(y) => f(x, y).map_break(ControlFlow::Break), + } + } + + match a.try_for_each(compare(&mut b, f)) { + ControlFlow::Continue(()) => ControlFlow::Continue(match b.next() { + None => Ordering::Equal, + Some(_) => Ordering::Less, + }), + ControlFlow::Break(x) => x, + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; diff --git a/library/core/src/lazy.rs b/library/core/src/lazy.rs deleted file mode 100644 index f8c06c3f9..000000000 --- a/library/core/src/lazy.rs +++ /dev/null @@ -1 +0,0 @@ -//! Lazy values and one-time initialization of static data. diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 5621d15c1..659409557 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -114,6 +114,7 @@ #![feature(const_fmt_arguments_new)] #![feature(const_heap)] #![feature(const_convert)] +#![feature(const_index_range_slice_index)] #![feature(const_inherent_unchecked_arith)] #![feature(const_int_unchecked_arith)] #![feature(const_intrinsic_forget)] @@ -137,17 +138,21 @@ #![feature(const_size_of_val)] #![feature(const_slice_from_raw_parts_mut)] #![feature(const_slice_ptr_len)] +#![feature(const_slice_split_at_mut)] #![feature(const_str_from_utf8_unchecked_mut)] #![feature(const_swap)] #![feature(const_trait_impl)] +#![feature(const_try)] #![feature(const_type_id)] #![feature(const_type_name)] #![feature(const_default_impls)] #![feature(const_unicode_case_lookup)] #![feature(const_unsafecell_get_mut)] +#![feature(const_waker)] #![feature(core_panic)] #![feature(duration_consts_float)] #![feature(maybe_uninit_uninit_array)] +#![feature(ptr_alignment_type)] #![feature(ptr_metadata)] #![feature(slice_ptr_get)] #![feature(slice_split_at_unchecked)] @@ -160,6 +165,7 @@ #![feature(const_slice_index)] #![feature(const_is_char_boundary)] #![feature(const_cstr_methods)] +#![feature(is_ascii_octdigit)] // // Language features: #![feature(abi_unadjusted)] @@ -168,6 +174,7 @@ #![feature(allow_internal_unstable)] #![feature(associated_type_bounds)] #![feature(auto_traits)] +#![feature(c_unwind)] #![feature(cfg_sanitize)] #![feature(cfg_target_has_atomic)] #![feature(cfg_target_has_atomic_equal_alignment)] @@ -185,13 +192,13 @@ #![feature(extern_types)] #![feature(fundamental)] #![feature(if_let_guard)] +#![feature(inline_const)] #![feature(intra_doc_pointers)] #![feature(intrinsics)] #![feature(lang_items)] #![feature(link_llvm_intrinsics)] #![feature(macro_metavar_expr)] #![feature(min_specialization)] -#![feature(mixed_integer_ops)] #![feature(must_not_suspend)] #![feature(negative_impls)] #![feature(never_type)] @@ -205,12 +212,14 @@ #![feature(simd_ffi)] #![feature(staged_api)] #![feature(stmt_expr_attributes)] +#![feature(target_feature_11)] #![feature(trait_alias)] #![feature(transparent_unions)] #![feature(try_blocks)] #![feature(unboxed_closures)] #![feature(unsized_fn_params)] #![feature(asm_const)] +#![feature(const_transmute_copy)] // // Target features: #![feature(arm_target_feature)] @@ -220,6 +229,7 @@ #![feature(hexagon_target_feature)] #![feature(mips_target_feature)] #![feature(powerpc_target_feature)] +#![feature(riscv_target_feature)] #![feature(rtm_target_feature)] #![feature(sse4a_target_feature)] #![feature(tbm_target_feature)] @@ -306,7 +316,6 @@ pub mod clone; pub mod cmp; pub mod convert; pub mod default; -#[cfg(not(bootstrap))] pub mod error; pub mod marker; pub mod ops; @@ -323,8 +332,6 @@ pub mod cell; pub mod char; pub mod ffi; pub mod iter; -#[unstable(feature = "once_cell", issue = "74465")] -pub mod lazy; pub mod option; pub mod panic; pub mod panicking; @@ -353,6 +360,8 @@ mod bool; mod tuple; mod unit; +mod const_closure; + #[stable(feature = "core_primitive", since = "1.43.0")] pub mod primitive; diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index b8239ed88..ae4ebf444 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -44,6 +44,12 @@ impl<T: ?Sized> !Send for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Send for *mut T {} +// Most instances arise automatically, but this instance is needed to link up `T: Sync` with +// `&T: Send` (and it also removes the unsound default instance `T Send` -> `&T: Send` that would +// otherwise exist). +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<T: Sync + ?Sized> Send for &T {} + /// Types with a constant size known at compile time. /// /// All type parameters have an implicit bound of `Sized`. The special syntax @@ -81,6 +87,7 @@ impl<T: ?Sized> !Send for *mut T {} /// ``` /// /// [trait object]: ../../book/ch17-02-trait-objects.html +#[doc(alias = "?", alias = "?Sized")] #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented( @@ -482,64 +489,6 @@ impl<T: ?Sized> !Sync for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Sync for *mut T {} -macro_rules! impls { - ($t: ident) => { - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> Hash for $t<T> { - #[inline] - fn hash<H: Hasher>(&self, _: &mut H) {} - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> cmp::PartialEq for $t<T> { - fn eq(&self, _other: &$t<T>) -> bool { - true - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> cmp::Eq for $t<T> {} - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> cmp::PartialOrd for $t<T> { - fn partial_cmp(&self, _other: &$t<T>) -> Option<cmp::Ordering> { - Option::Some(cmp::Ordering::Equal) - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> cmp::Ord for $t<T> { - fn cmp(&self, _other: &$t<T>) -> cmp::Ordering { - cmp::Ordering::Equal - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> Copy for $t<T> {} - - #[stable(feature = "rust1", since = "1.0.0")] - impl<T: ?Sized> Clone for $t<T> { - fn clone(&self) -> Self { - Self - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] - impl<T: ?Sized> const Default for $t<T> { - fn default() -> Self { - Self - } - } - - #[unstable(feature = "structural_match", issue = "31434")] - impl<T: ?Sized> StructuralPartialEq for $t<T> {} - - #[unstable(feature = "structural_match", issue = "31434")] - impl<T: ?Sized> StructuralEq for $t<T> {} - }; -} - /// Zero-sized type used to mark things that "act like" they own a `T`. /// /// Adding a `PhantomData<T>` field to your type tells the compiler that your @@ -677,15 +626,60 @@ macro_rules! impls { #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomData<T: ?Sized>; -impls! { PhantomData } +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Hash for PhantomData<T> { + #[inline] + fn hash<H: Hasher>(&self, _: &mut H) {} +} -mod impls { - #[stable(feature = "rust1", since = "1.0.0")] - unsafe impl<T: Sync + ?Sized> Send for &T {} - #[stable(feature = "rust1", since = "1.0.0")] - unsafe impl<T: Send + ?Sized> Send for &mut T {} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> cmp::PartialEq for PhantomData<T> { + fn eq(&self, _other: &PhantomData<T>) -> bool { + true + } } +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> cmp::Eq for PhantomData<T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> cmp::PartialOrd for PhantomData<T> { + fn partial_cmp(&self, _other: &PhantomData<T>) -> Option<cmp::Ordering> { + Option::Some(cmp::Ordering::Equal) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> cmp::Ord for PhantomData<T> { + fn cmp(&self, _other: &PhantomData<T>) -> cmp::Ordering { + cmp::Ordering::Equal + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Copy for PhantomData<T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Clone for PhantomData<T> { + fn clone(&self) -> Self { + Self + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] +impl<T: ?Sized> const Default for PhantomData<T> { + fn default() -> Self { + Self + } +} + +#[unstable(feature = "structural_match", issue = "31434")] +impl<T: ?Sized> StructuralPartialEq for PhantomData<T> {} + +#[unstable(feature = "structural_match", issue = "31434")] +impl<T: ?Sized> StructuralEq for PhantomData<T> {} + /// Compiler-internal trait used to indicate the type of enum discriminants. /// /// This trait is automatically implemented for every type and does not add any @@ -798,6 +792,7 @@ impl<T: ?Sized> Unpin for *mut T {} #[unstable(feature = "const_trait_impl", issue = "67792")] #[lang = "destruct"] #[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)] +#[const_trait] pub trait Destruct {} /// A marker for tuple types. @@ -805,7 +800,7 @@ pub trait Destruct {} /// The implementation of this trait is built-in and cannot be implemented /// for any user type. #[unstable(feature = "tuple_trait", issue = "none")] -#[cfg_attr(not(bootstrap), lang = "tuple_trait")] +#[lang = "tuple_trait"] #[rustc_on_unimplemented(message = "`{Self}` is not a tuple")] pub trait Tuple {} diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs index 2490c0767..7757c95de 100644 --- a/library/core/src/mem/maybe_uninit.rs +++ b/library/core/src/mem/maybe_uninit.rs @@ -146,7 +146,6 @@ use crate::slice; /// /// ``` /// use std::mem::MaybeUninit; -/// use std::ptr; /// /// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is /// // safe because the type we are claiming to have initialized here is a @@ -162,7 +161,7 @@ use crate::slice; /// /// // For each item in the array, drop if we allocated it. /// for elem in &mut data[0..data_len] { -/// unsafe { ptr::drop_in_place(elem.as_mut_ptr()); } +/// unsafe { elem.assume_init_drop(); } /// } /// ``` /// @@ -647,7 +646,7 @@ impl<T> MaybeUninit<T> { /// implements the [`Copy`] trait or not. When using multiple copies of the /// data (by calling `assume_init_read` multiple times, or first calling /// `assume_init_read` and then [`assume_init`]), it is your responsibility - /// to ensure that that data may indeed be duplicated. + /// to ensure that data may indeed be duplicated. /// /// [inv]: #initialization-invariant /// [`assume_init`]: MaybeUninit::assume_init @@ -1284,3 +1283,42 @@ impl<T> MaybeUninit<T> { } } } + +impl<T, const N: usize> MaybeUninit<[T; N]> { + /// Transposes a `MaybeUninit<[T; N]>` into a `[MaybeUninit<T>; N]`. + /// + /// # Examples + /// + /// ``` + /// #![feature(maybe_uninit_uninit_array_transpose)] + /// # use std::mem::MaybeUninit; + /// + /// let data: [MaybeUninit<u8>; 1000] = MaybeUninit::uninit().transpose(); + /// ``` + #[unstable(feature = "maybe_uninit_uninit_array_transpose", issue = "96097")] + #[inline] + pub const fn transpose(self) -> [MaybeUninit<T>; N] { + // SAFETY: T and MaybeUninit<T> have the same layout + unsafe { super::transmute_copy(&ManuallyDrop::new(self)) } + } +} + +impl<T, const N: usize> [MaybeUninit<T>; N] { + /// Transposes a `[MaybeUninit<T>; N]` into a `MaybeUninit<[T; N]>`. + /// + /// # Examples + /// + /// ``` + /// #![feature(maybe_uninit_uninit_array_transpose)] + /// # use std::mem::MaybeUninit; + /// + /// let data = [MaybeUninit::<u8>::uninit(); 1000]; + /// let data: MaybeUninit<[u8; 1000]> = data.transpose(); + /// ``` + #[unstable(feature = "maybe_uninit_uninit_array_transpose", issue = "96097")] + #[inline] + pub const fn transpose(self) -> MaybeUninit<[T; N]> { + // SAFETY: T and MaybeUninit<T> have the same layout + unsafe { super::transmute_copy(&ManuallyDrop::new(self)) } + } +} diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index d2dd2941d..9195da5a4 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -21,11 +21,10 @@ mod maybe_uninit; #[stable(feature = "maybe_uninit", since = "1.36.0")] pub use maybe_uninit::MaybeUninit; -mod valid_align; -// For now this type is left crate-local. It could potentially make sense to expose -// it publicly, as it would be a nice parameter type for methods which need to take -// alignment as a parameter, such as `Layout::padding_needed_for`. -pub(crate) use valid_align::ValidAlign; +// FIXME: This is left here for now to avoid complications around pending reverts. +// Once <https://github.com/rust-lang/rust/issues/101899> is fully resolved, +// this should be removed and the references in `alloc::Layout` updated. +pub(crate) use ptr::Alignment as ValidAlign; mod transmutability; #[unstable(feature = "transmutability", issue = "99571")] @@ -1009,18 +1008,18 @@ pub fn copy<T: Copy>(x: &T) -> T { *x } -/// Interprets `src` as having type `&U`, and then reads `src` without moving +/// Interprets `src` as having type `&Dst`, and then reads `src` without moving /// the contained value. /// -/// This function will unsafely assume the pointer `src` is valid for [`size_of::<U>`][size_of] -/// bytes by transmuting `&T` to `&U` and then reading the `&U` (except that this is done in a way -/// that is correct even when `&U` has stricter alignment requirements than `&T`). It will also -/// unsafely create a copy of the contained value instead of moving out of `src`. +/// This function will unsafely assume the pointer `src` is valid for [`size_of::<Dst>`][size_of] +/// bytes by transmuting `&Src` to `&Dst` and then reading the `&Dst` (except that this is done +/// in a way that is correct even when `&Dst` has stricter alignment requirements than `&Src`). +/// It will also unsafely create a copy of the contained value instead of moving out of `src`. /// -/// It is not a compile-time error if `T` and `U` have different sizes, but it -/// is highly encouraged to only invoke this function where `T` and `U` have the -/// same size. This function triggers [undefined behavior][ub] if `U` is larger than -/// `T`. +/// It is not a compile-time error if `Src` and `Dst` have different sizes, but it +/// is highly encouraged to only invoke this function where `Src` and `Dst` have the +/// same size. This function triggers [undefined behavior][ub] if `Dst` is larger than +/// `Src`. /// /// [ub]: ../../reference/behavior-considered-undefined.html /// @@ -1053,19 +1052,22 @@ pub fn copy<T: Copy>(x: &T) -> T { #[must_use] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_transmute_copy", issue = "83165")] -pub const unsafe fn transmute_copy<T, U>(src: &T) -> U { - assert!(size_of::<T>() >= size_of::<U>(), "cannot transmute_copy if U is larger than T"); +pub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst { + assert!( + size_of::<Src>() >= size_of::<Dst>(), + "cannot transmute_copy if Dst is larger than Src" + ); - // If U has a higher alignment requirement, src might not be suitably aligned. - if align_of::<U>() > align_of::<T>() { + // If Dst has a higher alignment requirement, src might not be suitably aligned. + if align_of::<Dst>() > align_of::<Src>() { // SAFETY: `src` is a reference which is guaranteed to be valid for reads. // The caller must guarantee that the actual transmutation is safe. - unsafe { ptr::read_unaligned(src as *const T as *const U) } + unsafe { ptr::read_unaligned(src as *const Src as *const Dst) } } else { // SAFETY: `src` is a reference which is guaranteed to be valid for reads. - // We just checked that `src as *const U` was properly aligned. + // We just checked that `src as *const Dst` was properly aligned. // The caller must guarantee that the actual transmutation is safe. - unsafe { ptr::read(src as *const T as *const U) } + unsafe { ptr::read(src as *const Src as *const Dst) } } } @@ -1178,3 +1180,44 @@ pub const fn discriminant<T>(v: &T) -> Discriminant<T> { pub const fn variant_count<T>() -> usize { intrinsics::variant_count::<T>() } + +/// Provides associated constants for various useful properties of types, +/// to give them a canonical form in our code and make them easier to read. +/// +/// This is here only to simplify all the ZST checks we need in the library. +/// It's not on a stabilization track right now. +#[doc(hidden)] +#[unstable(feature = "sized_type_properties", issue = "none")] +pub trait SizedTypeProperties: Sized { + /// `true` if this type requires no storage. + /// `false` if its [size](size_of) is greater than zero. + /// + /// # Examples + /// + /// ``` + /// #![feature(sized_type_properties)] + /// use core::mem::SizedTypeProperties; + /// + /// fn do_something_with<T>() { + /// if T::IS_ZST { + /// // ... special approach ... + /// } else { + /// // ... the normal thing ... + /// } + /// } + /// + /// struct MyUnit; + /// assert!(MyUnit::IS_ZST); + /// + /// // For negative checks, consider using UFCS to emphasize the negation + /// assert!(!<i32>::IS_ZST); + /// // As it can sometimes hide in the type otherwise + /// assert!(!String::IS_ZST); + /// ``` + #[doc(hidden)] + #[unstable(feature = "sized_type_properties", issue = "none")] + const IS_ZST: bool = size_of::<Self>() == 0; +} +#[doc(hidden)] +#[unstable(feature = "sized_type_properties", issue = "none")] +impl<T> SizedTypeProperties for T {} diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs index 87a378631..3b98efff2 100644 --- a/library/core/src/mem/transmutability.rs +++ b/library/core/src/mem/transmutability.rs @@ -4,7 +4,7 @@ /// any value of type `Self` are safely transmutable into a value of type `Dst`, in a given `Context`, /// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied. #[unstable(feature = "transmutability", issue = "99571")] -#[cfg_attr(not(bootstrap), lang = "transmute_trait")] +#[lang = "transmute_trait"] #[rustc_on_unimplemented( message = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`.", label = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`." @@ -17,7 +17,7 @@ where /// What transmutation safety conditions shall the compiler assume that *you* are checking? #[unstable(feature = "transmutability", issue = "99571")] -#[cfg_attr(not(bootstrap), lang = "transmute_opts")] +#[lang = "transmute_opts"] #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub struct Assume { /// When `true`, the compiler assumes that *you* are ensuring (either dynamically or statically) that diff --git a/library/core/src/num/dec2flt/lemire.rs b/library/core/src/num/dec2flt/lemire.rs index 75405f471..9f7594460 100644 --- a/library/core/src/num/dec2flt/lemire.rs +++ b/library/core/src/num/dec2flt/lemire.rs @@ -6,7 +6,7 @@ use crate::num::dec2flt::table::{ LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE, }; -/// Compute a float using an extended-precision representation. +/// Compute w * 10^q using an extended-precision float representation. /// /// Fast conversion of a the significant digits and decimal exponent /// a float to an extended representation with a binary float. This @@ -76,7 +76,7 @@ pub fn compute_float<F: RawFloat>(q: i64, mut w: u64) -> BiasedFp { return BiasedFp { f: mantissa, e: power2 }; } // Need to handle rounding ties. Normally, we need to round up, - // but if we fall right in between and and we have an even basis, we + // but if we fall right in between and we have an even basis, we // need to round down. // // This will only occur if: diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs index 1f6b40e5d..768dd8781 100644 --- a/library/core/src/num/error.rs +++ b/library/core/src/num/error.rs @@ -1,7 +1,6 @@ //! Error types for conversion to integral types. use crate::convert::Infallible; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; @@ -147,7 +146,6 @@ impl fmt::Display for ParseIntError { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for ParseIntError { #[allow(deprecated)] @@ -156,7 +154,6 @@ impl Error for ParseIntError { } } -#[cfg(not(bootstrap))] #[stable(feature = "try_from", since = "1.34.0")] impl Error for TryFromIntError { #[allow(deprecated)] diff --git a/library/core/src/num/flt2dec/strategy/grisu.rs b/library/core/src/num/flt2dec/strategy/grisu.rs index a4cb51c62..ed3e0edaf 100644 --- a/library/core/src/num/flt2dec/strategy/grisu.rs +++ b/library/core/src/num/flt2dec/strategy/grisu.rs @@ -253,7 +253,6 @@ pub fn format_shortest_opt<'a>( let delta1frac = delta1 & ((1 << e) - 1); // render integral parts, while checking for the accuracy at each step. - let mut kappa = max_kappa as i16; let mut ten_kappa = max_ten_kappa; // 10^kappa let mut remainder = plus1int; // digits yet to be rendered loop { @@ -290,12 +289,10 @@ pub fn format_shortest_opt<'a>( // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`. if i > max_kappa as usize { debug_assert_eq!(ten_kappa, 1); - debug_assert_eq!(kappa, 0); break; } // restore invariants - kappa -= 1; ten_kappa /= 10; remainder = r; } @@ -338,7 +335,6 @@ pub fn format_shortest_opt<'a>( } // restore invariants - kappa -= 1; remainder = r; } diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs index cc26c04a5..80472528f 100644 --- a/library/core/src/num/int_log10.rs +++ b/library/core/src/num/int_log10.rs @@ -1,5 +1,5 @@ /// These functions compute the integer logarithm of their type, assuming -/// that someone has already checked that the the value is strictly positive. +/// that someone has already checked that the value is strictly positive. // 0 < val <= u8::MAX #[inline] diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index e7deb728d..914dca61b 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -464,12 +464,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_unsigned(2), Some(3));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_unsigned(3), None);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -533,12 +532,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_sub_unsigned(2), Some(-1));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub_unsigned(3), None);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -654,7 +652,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));")] #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_rem(-1), None);")] @@ -706,7 +703,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_neg(), Some(-5));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_neg(), None);")] /// ``` @@ -822,7 +818,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!((-5", stringify!($SelfT), ").checked_abs(), Some(5));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_abs(), None);")] /// ``` @@ -874,7 +869,7 @@ macro_rules! int_impl { // Deal with the final bit of the exponent separately, since // squaring the base afterwards is not necessary and may cause a // needless overflow. - Some(try_opt!(acc.checked_mul(base))) + acc.checked_mul(base) } /// Saturating integer addition. Computes `self + rhs`, saturating at the numeric @@ -907,12 +902,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_unsigned(2), 3);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_add_unsigned(100), ", stringify!($SelfT), "::MAX);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -954,12 +948,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".saturating_sub_unsigned(127), -27);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_sub_unsigned(100), ", stringify!($SelfT), "::MIN);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1030,7 +1023,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(10", stringify!($SelfT), ".saturating_mul(12), 120);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.saturating_mul(10), ", stringify!($SelfT), "::MAX);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_mul(10), ", stringify!($SelfT), "::MIN);")] @@ -1089,7 +1081,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT), "::MAX);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);")] @@ -1135,12 +1126,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(100", stringify!($SelfT), ".wrapping_add_unsigned(27), 127);")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_add_unsigned(2), ", stringify!($SelfT), "::MIN + 1);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] @@ -1176,12 +1166,11 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".wrapping_sub_unsigned(127), -127);")] #[doc = concat!("assert_eq!((-2", stringify!($SelfT), ").wrapping_sub_unsigned(", stringify!($UnsignedT), "::MAX), -1);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] @@ -1504,7 +1493,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($SelfT), "::MIN, true));")] /// ``` @@ -1574,13 +1562,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_unsigned(2), (3, false));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN).overflowing_add_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MAX, false));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_unsigned(3), (", stringify!($SelfT), "::MIN, true));")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1600,7 +1587,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")] /// ``` @@ -1658,13 +1644,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_sub_unsigned(2), (-1, false));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX).overflowing_sub_unsigned(", stringify!($UnsignedT), "::MAX), (", stringify!($SelfT), "::MIN, false));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 2).overflowing_sub_unsigned(3), (", stringify!($SelfT), "::MAX, true));")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1711,7 +1696,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div(-1), (", stringify!($SelfT), "::MIN, true));")] /// ``` @@ -1774,7 +1758,6 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem(-1), (0, true));")] /// ``` @@ -2287,9 +2270,8 @@ macro_rules! int_impl { /// /// # Panics /// - /// When the number is negative, zero, or if the base is not at least 2; it - /// panics in debug mode and the return value is 0 in release - /// mode. + /// This function will panic if `self` is less than or equal to zero, + /// or if `base` is less then 2. /// /// # Examples /// @@ -2302,27 +2284,16 @@ macro_rules! int_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog(self, base: Self) -> u32 { - match self.checked_ilog(base) { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + assert!(base >= 2, "base of integer logarithm must be at least 2"); + self.checked_ilog(base).expect("argument of integer logarithm must be positive") } /// Returns the base 2 logarithm of the number, rounded down. /// /// # Panics /// - /// When the number is negative or zero it panics in debug mode and the return value - /// is 0 in release mode. + /// This function will panic if `self` is less than or equal to zero. /// /// # Examples /// @@ -2335,27 +2306,15 @@ macro_rules! int_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog2(self) -> u32 { - match self.checked_ilog2() { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + self.checked_ilog2().expect("argument of integer logarithm must be positive") } /// Returns the base 10 logarithm of the number, rounded down. /// /// # Panics /// - /// When the number is negative or zero it panics in debug mode and the return value - /// is 0 in release mode. + /// This function will panic if `self` is less than or equal to zero. /// /// # Example /// @@ -2368,19 +2327,8 @@ macro_rules! int_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog10(self) -> u32 { - match self.checked_ilog10() { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + self.checked_ilog10().expect("argument of integer logarithm must be positive") } /// Returns the logarithm of the number with respect to an arbitrary base, diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index ab17aa0c8..311c5fa5b 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -3,7 +3,6 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::ascii; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::intrinsics; use crate::mem; @@ -59,7 +58,6 @@ pub use wrapping::Wrapping; #[cfg(not(no_fp_fmt_parse))] pub use dec2flt::ParseFloatError; -#[cfg(not(bootstrap))] #[cfg(not(no_fp_fmt_parse))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for ParseFloatError { @@ -113,6 +111,9 @@ macro_rules! widening_impl { /// This returns the low-order (wrapping) bits and the high-order (overflow) bits /// of the result as two separate values, in that order. /// + /// If you also need to add a carry to the wide result, then you want + /// [`Self::carrying_mul`] instead. + /// /// # Examples /// /// Basic usage: @@ -148,6 +149,8 @@ macro_rules! widening_impl { /// additional amount of overflow. This allows for chaining together multiple /// multiplications to create "big integers" which represent larger values. /// + /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. + /// /// # Examples /// /// Basic usage: @@ -167,6 +170,31 @@ macro_rules! widening_impl { )] /// ``` /// + /// This is the core operation needed for scalar multiplication when + /// implementing it for wider-than-native types. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// fn scalar_mul_eq(little_endian_digits: &mut Vec<u16>, multiplicand: u16) { + /// let mut carry = 0; + /// for d in little_endian_digits.iter_mut() { + /// (*d, carry) = d.carrying_mul(multiplicand, carry); + /// } + /// if carry != 0 { + /// little_endian_digits.push(carry); + /// } + /// } + /// + /// let mut v = vec![10, 20]; + /// scalar_mul_eq(&mut v, 3); + /// assert_eq!(v, [30, 60]); + /// + /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D); + /// let mut v = vec![0x4321, 0x8765]; + /// scalar_mul_eq(&mut v, 0xFEED); + /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]); + /// ``` + /// /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul), /// except that it gives the value of the overflow instead of just whether one happened: /// @@ -594,6 +622,38 @@ impl u8 { matches!(*self, b'0'..=b'9') } + /// Checks if the value is an ASCII octal digit: + /// U+0030 '0' ..= U+0037 '7'. + /// + /// # Examples + /// + /// ``` + /// #![feature(is_ascii_octdigit)] + /// + /// let uppercase_a = b'A'; + /// let a = b'a'; + /// let zero = b'0'; + /// let seven = b'7'; + /// let nine = b'9'; + /// let percent = b'%'; + /// let lf = b'\n'; + /// + /// assert!(!uppercase_a.is_ascii_octdigit()); + /// assert!(!a.is_ascii_octdigit()); + /// assert!(zero.is_ascii_octdigit()); + /// assert!(seven.is_ascii_octdigit()); + /// assert!(!nine.is_ascii_octdigit()); + /// assert!(!percent.is_ascii_octdigit()); + /// assert!(!lf.is_ascii_octdigit()); + /// ``` + #[must_use] + #[unstable(feature = "is_ascii_octdigit", issue = "101288")] + #[rustc_const_unstable(feature = "is_ascii_octdigit", issue = "101288")] + #[inline] + pub const fn is_ascii_octdigit(&self) -> bool { + matches!(*self, b'0'..=b'7') + } + /// Checks if the value is an ASCII hexadecimal digit: /// /// - U+0030 '0' ..= U+0039 '9', or @@ -948,8 +1008,8 @@ impl usize { /// assert_eq!(num.classify(), FpCategory::Normal); /// assert_eq!(inf.classify(), FpCategory::Infinite); /// assert_eq!(zero.classify(), FpCategory::Zero); -/// assert_eq!(nan.classify(), FpCategory::Nan); /// assert_eq!(sub.classify(), FpCategory::Subnormal); +/// assert_eq!(nan.classify(), FpCategory::Nan); /// ``` #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index 532a09736..6b6f3417f 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -56,7 +56,10 @@ macro_rules! nonzero_integers { pub const unsafe fn new_unchecked(n: $Int) -> Self { // SAFETY: this is guaranteed to be safe by the caller. unsafe { - core::intrinsics::assert_unsafe_precondition!((n: $Int) => n != 0); + core::intrinsics::assert_unsafe_precondition!( + concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument"), + (n: $Int) => n != 0 + ); Self(n) } } @@ -721,6 +724,160 @@ macro_rules! nonzero_signed_operations { // SAFETY: absolute value of nonzero cannot yield zero values. unsafe { $Uty::new_unchecked(self.get().unsigned_abs()) } } + + /// Returns `true` if `self` is negative and `false` if the + /// number is positive. + /// + /// # Example + /// + /// ``` + /// #![feature(nonzero_negation_ops)] + /// + #[doc = concat!("# use std::num::", stringify!($Ty), ";")] + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")] + #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")] + /// + /// assert!(neg_five.is_negative()); + /// assert!(!pos_five.is_negative()); + /// # Some(()) + /// # } + /// ``` + #[must_use] + #[inline] + #[unstable(feature = "nonzero_negation_ops", issue = "102443")] + pub const fn is_negative(self) -> bool { + self.get().is_negative() + } + + /// Checked negation. Computes `-self`, returning `None` if `self == i32::MIN`. + /// + /// # Example + /// + /// ``` + /// #![feature(nonzero_negation_ops)] + /// + #[doc = concat!("# use std::num::", stringify!($Ty), ";")] + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")] + #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")] + #[doc = concat!("let min = ", stringify!($Ty), "::new(", + stringify!($Int), "::MIN)?;")] + /// + /// assert_eq!(pos_five.checked_neg(), Some(neg_five)); + /// assert_eq!(min.checked_neg(), None); + /// # Some(()) + /// # } + /// ``` + #[inline] + #[unstable(feature = "nonzero_negation_ops", issue = "102443")] + pub const fn checked_neg(self) -> Option<$Ty> { + if let Some(result) = self.get().checked_neg() { + // SAFETY: negation of nonzero cannot yield zero values. + return Some(unsafe { $Ty::new_unchecked(result) }); + } + None + } + + /// Negates self, overflowing if this is equal to the minimum value. + /// + #[doc = concat!("See [`", stringify!($Int), "::overflowing_neg`]")] + /// for documentation on overflow behaviour. + /// + /// # Example + /// + /// ``` + /// #![feature(nonzero_negation_ops)] + /// + #[doc = concat!("# use std::num::", stringify!($Ty), ";")] + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")] + #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")] + #[doc = concat!("let min = ", stringify!($Ty), "::new(", + stringify!($Int), "::MIN)?;")] + /// + /// assert_eq!(pos_five.overflowing_neg(), (neg_five, false)); + /// assert_eq!(min.overflowing_neg(), (min, true)); + /// # Some(()) + /// # } + /// ``` + #[inline] + #[unstable(feature = "nonzero_negation_ops", issue = "102443")] + pub const fn overflowing_neg(self) -> ($Ty, bool) { + let (result, overflow) = self.get().overflowing_neg(); + // SAFETY: negation of nonzero cannot yield zero values. + ((unsafe { $Ty::new_unchecked(result) }), overflow) + } + + /// Saturating negation. Computes `-self`, returning `MAX` if + /// `self == i32::MIN` instead of overflowing. + /// + /// # Example + /// + /// ``` + /// #![feature(nonzero_negation_ops)] + /// + #[doc = concat!("# use std::num::", stringify!($Ty), ";")] + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")] + #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")] + #[doc = concat!("let min = ", stringify!($Ty), "::new(", + stringify!($Int), "::MIN)?;")] + #[doc = concat!("let min_plus_one = ", stringify!($Ty), "::new(", + stringify!($Int), "::MIN + 1)?;")] + #[doc = concat!("let max = ", stringify!($Ty), "::new(", + stringify!($Int), "::MAX)?;")] + /// + /// assert_eq!(pos_five.saturating_neg(), neg_five); + /// assert_eq!(min.saturating_neg(), max); + /// assert_eq!(max.saturating_neg(), min_plus_one); + /// # Some(()) + /// # } + /// ``` + #[inline] + #[unstable(feature = "nonzero_negation_ops", issue = "102443")] + pub const fn saturating_neg(self) -> $Ty { + if let Some(result) = self.checked_neg() { + return result; + } + $Ty::MAX + } + + /// Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary + /// of the type. + /// + #[doc = concat!("See [`", stringify!($Int), "::wrapping_neg`]")] + /// for documentation on overflow behaviour. + /// + /// # Example + /// + /// ``` + /// #![feature(nonzero_negation_ops)] + /// + #[doc = concat!("# use std::num::", stringify!($Ty), ";")] + /// # fn main() { test().unwrap(); } + /// # fn test() -> Option<()> { + #[doc = concat!("let pos_five = ", stringify!($Ty), "::new(5)?;")] + #[doc = concat!("let neg_five = ", stringify!($Ty), "::new(-5)?;")] + #[doc = concat!("let min = ", stringify!($Ty), "::new(", + stringify!($Int), "::MIN)?;")] + /// + /// assert_eq!(pos_five.wrapping_neg(), neg_five); + /// assert_eq!(min.wrapping_neg(), min); + /// # Some(()) + /// # } + /// ``` + #[inline] + #[unstable(feature = "nonzero_negation_ops", issue = "102443")] + pub const fn wrapping_neg(self) -> $Ty { + let result = self.get().wrapping_neg(); + // SAFETY: negation of nonzero cannot yield zero values. + unsafe { $Ty::new_unchecked(result) } + } } )+ } diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 46fd7f2d0..335cc5124 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -474,13 +474,12 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(2), Some(3));")] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".checked_add_signed(-2), None);")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add_signed(3), None);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -693,8 +692,7 @@ macro_rules! uint_impl { /// /// # Panics /// - /// When the number is zero, or if the base is not at least 2; - /// it panics in debug mode and the return value is 0 in release mode. + /// This function will panic if `self` is zero, or if `base` is less then 2. /// /// # Examples /// @@ -707,27 +705,16 @@ macro_rules! uint_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog(self, base: Self) -> u32 { - match self.checked_ilog(base) { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + assert!(base >= 2, "base of integer logarithm must be at least 2"); + self.checked_ilog(base).expect("argument of integer logarithm must be positive") } /// Returns the base 2 logarithm of the number, rounded down. /// /// # Panics /// - /// When the number is zero it panics in debug mode and - /// the return value is 0 in release mode. + /// This function will panic if `self` is zero. /// /// # Examples /// @@ -740,27 +727,15 @@ macro_rules! uint_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog2(self) -> u32 { - match self.checked_ilog2() { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + self.checked_ilog2().expect("argument of integer logarithm must be positive") } /// Returns the base 10 logarithm of the number, rounded down. /// /// # Panics /// - /// When the number is zero it panics in debug mode and the - /// return value is 0 in release mode. + /// This function will panic if `self` is zero. /// /// # Example /// @@ -773,19 +748,8 @@ macro_rules! uint_impl { without modifying the original"] #[inline] #[track_caller] - #[rustc_inherit_overflow_checks] - #[allow(arithmetic_overflow)] pub const fn ilog10(self) -> u32 { - match self.checked_ilog10() { - Some(n) => n, - None => { - // In debug builds, trigger a panic on None. - // This should optimize completely out in release builds. - let _ = Self::MAX + 1; - - 0 - }, - } + self.checked_ilog10().expect("argument of integer logarithm must be positive") } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -1026,7 +990,7 @@ macro_rules! uint_impl { // squaring the base afterwards is not necessary and may cause a // needless overflow. - Some(try_opt!(acc.checked_mul(base))) + acc.checked_mul(base) } /// Saturating integer addition. Computes `self + rhs`, saturating at @@ -1057,13 +1021,12 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(2), 3);")] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".saturating_add_signed(-2), 0);")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).saturating_add_signed(4), ", stringify!($SelfT), "::MAX);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1198,13 +1161,12 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(2), 3);")] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_add_signed(-2), ", stringify!($SelfT), "::MAX);")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).wrapping_add_signed(4), 1);")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1494,7 +1456,6 @@ macro_rules! uint_impl { /// Basic usage /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));")] #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));")] /// ``` @@ -1508,37 +1469,42 @@ macro_rules! uint_impl { (a as Self, b) } - /// Calculates `self + rhs + carry` without the ability to overflow. + /// Calculates `self` + `rhs` + `carry` and returns a tuple containing + /// the sum and the output carry. /// - /// Performs "ternary addition" which takes in an extra bit to add, and may return an - /// additional bit of overflow. This allows for chaining together multiple additions - /// to create "big integers" which represent larger values. + /// Performs "ternary addition" of two integer operands and a carry-in + /// bit, and returns an output integer and a carry-out bit. This allows + /// chaining together multiple additions to create a wider addition, and + /// can be useful for bignum addition. /// #[doc = concat!("This can be thought of as a ", stringify!($BITS), "-bit \"full adder\", in the electronics sense.")] /// - /// # Examples + /// If the input carry is false, this method is equivalent to + /// [`overflowing_add`](Self::overflowing_add), and the output carry is + /// equal to the overflow flag. Note that although carry and overflow + /// flags are similar for unsigned integers, they are different for + /// signed integers. /// - /// Basic usage + /// # Examples /// /// ``` /// #![feature(bigint_helper_methods)] - #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")] - #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")] - #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (0, true));")] - #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (0, true));")] - #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (1, true));")] - #[doc = concat!("assert_eq!(", - stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ", - "(", stringify!($SelfT), "::MAX, true));" - )] - /// ``` /// - /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add): + #[doc = concat!("// 3 MAX (a = 3 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")] + #[doc = concat!("// + 5 7 (b = 5 × 2^", stringify!($BITS), " + 7)")] + /// // --------- + #[doc = concat!("// 9 6 (sum = 9 × 2^", stringify!($BITS), " + 6)")] /// - /// ``` - /// #![feature(bigint_helper_methods)] - #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")] - #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")] + #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (3, ", stringify!($SelfT), "::MAX);")] + #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (5, 7);")] + /// let carry0 = false; + /// + /// let (sum0, carry1) = a0.carrying_add(b0, carry0); + /// assert_eq!(carry1, true); + /// let (sum1, carry2) = a1.carrying_add(b1, carry1); + /// assert_eq!(carry2, false); + /// + /// assert_eq!((sum1, sum0), (9, 6)); /// ``` #[unstable(feature = "bigint_helper_methods", issue = "85532")] #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] @@ -1564,13 +1530,12 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// # #![feature(mixed_integer_ops)] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(2), (3, false));")] #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".overflowing_add_signed(-2), (", stringify!($SelfT), "::MAX, true));")] #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MAX - 2).overflowing_add_signed(4), (1, true));")] /// ``` - #[unstable(feature = "mixed_integer_ops", issue = "87840")] - #[rustc_const_unstable(feature = "mixed_integer_ops", issue = "87840")] + #[stable(feature = "mixed_integer_ops", since = "1.66.0")] + #[rustc_const_stable(feature = "mixed_integer_ops", since = "1.66.0")] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] @@ -1590,7 +1555,6 @@ macro_rules! uint_impl { /// Basic usage /// /// ``` - /// #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));")] #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));")] /// ``` @@ -1604,22 +1568,35 @@ macro_rules! uint_impl { (a as Self, b) } - /// Calculates `self - rhs - borrow` without the ability to overflow. + /// Calculates `self` − `rhs` − `borrow` and returns a tuple + /// containing the difference and the output borrow. /// - /// Performs "ternary subtraction" which takes in an extra bit to subtract, and may return - /// an additional bit of overflow. This allows for chaining together multiple subtractions - /// to create "big integers" which represent larger values. + /// Performs "ternary subtraction" by subtracting both an integer + /// operand and a borrow-in bit from `self`, and returns an output + /// integer and a borrow-out bit. This allows chaining together multiple + /// subtractions to create a wider subtraction, and can be useful for + /// bignum subtraction. /// /// # Examples /// - /// Basic usage - /// /// ``` /// #![feature(bigint_helper_methods)] - #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")] - #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")] - #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (", stringify!($SelfT), "::MAX, true));")] - #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")] + /// + #[doc = concat!("// 9 6 (a = 9 × 2^", stringify!($BITS), " + 6)")] + #[doc = concat!("// - 5 7 (b = 5 × 2^", stringify!($BITS), " + 7)")] + /// // --------- + #[doc = concat!("// 3 MAX (diff = 3 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")] + /// + #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (9, 6);")] + #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($SelfT), ") = (5, 7);")] + /// let borrow0 = false; + /// + /// let (diff0, borrow1) = a0.borrowing_sub(b0, borrow0); + /// assert_eq!(borrow1, true); + /// let (diff1, borrow2) = a1.borrowing_sub(b1, borrow1); + /// assert_eq!(borrow2, false); + /// + #[doc = concat!("assert_eq!((diff1, diff0), (3, ", stringify!($SelfT), "::MAX));")] /// ``` #[unstable(feature = "bigint_helper_methods", issue = "85532")] #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs index e367be8c1..75c52d3ec 100644 --- a/library/core/src/ops/arith.rs +++ b/library/core/src/ops/arith.rs @@ -65,38 +65,15 @@ /// ``` #[lang = "add"] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr( - bootstrap, - rustc_on_unimplemented( - on( - all(_Self = "{integer}", Rhs = "{float}"), - message = "cannot add a float to an integer", - ), - on( - all(_Self = "{float}", Rhs = "{integer}"), - message = "cannot add an integer to a float", - ), - message = "cannot add `{Rhs}` to `{Self}`", - label = "no implementation for `{Self} + {Rhs}`" - ) -)] -#[cfg_attr( - not(bootstrap), - rustc_on_unimplemented( - on( - all(_Self = "{integer}", Rhs = "{float}"), - message = "cannot add a float to an integer", - ), - on( - all(_Self = "{float}", Rhs = "{integer}"), - message = "cannot add an integer to a float", - ), - message = "cannot add `{Rhs}` to `{Self}`", - label = "no implementation for `{Self} + {Rhs}`", - append_const_msg, - ) +#[rustc_on_unimplemented( + on(all(_Self = "{integer}", Rhs = "{float}"), message = "cannot add a float to an integer",), + on(all(_Self = "{float}", Rhs = "{integer}"), message = "cannot add an integer to a float",), + message = "cannot add `{Rhs}` to `{Self}`", + label = "no implementation for `{Self} + {Rhs}`", + append_const_msg )] #[doc(alias = "+")] +#[const_trait] pub trait Add<Rhs = Self> { /// The resulting type after applying the `+` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -201,9 +178,11 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( message = "cannot subtract `{Rhs}` from `{Self}`", - label = "no implementation for `{Self} - {Rhs}`" + label = "no implementation for `{Self} - {Rhs}`", + append_const_msg )] #[doc(alias = "-")] +#[const_trait] pub trait Sub<Rhs = Self> { /// The resulting type after applying the `-` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -333,6 +312,7 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } label = "no implementation for `{Self} * {Rhs}`" )] #[doc(alias = "*")] +#[const_trait] pub trait Mul<Rhs = Self> { /// The resulting type after applying the `*` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -466,6 +446,7 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } label = "no implementation for `{Self} / {Rhs}`" )] #[doc(alias = "/")] +#[const_trait] pub trait Div<Rhs = Self> { /// The resulting type after applying the `/` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -568,6 +549,7 @@ div_impl_float! { f32 f64 } label = "no implementation for `{Self} % {Rhs}`" )] #[doc(alias = "%")] +#[const_trait] pub trait Rem<Rhs = Self> { /// The resulting type after applying the `%` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -682,6 +664,7 @@ rem_impl_float! { f32 f64 } #[lang = "neg"] #[stable(feature = "rust1", since = "1.0.0")] #[doc(alias = "-")] +#[const_trait] pub trait Neg { /// The resulting type after applying the `-` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -755,6 +738,7 @@ neg_impl! { isize i8 i16 i32 i64 i128 f32 f64 } )] #[doc(alias = "+")] #[doc(alias = "+=")] +#[const_trait] pub trait AddAssign<Rhs = Self> { /// Performs the `+=` operation. /// @@ -822,6 +806,7 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } )] #[doc(alias = "-")] #[doc(alias = "-=")] +#[const_trait] pub trait SubAssign<Rhs = Self> { /// Performs the `-=` operation. /// @@ -880,6 +865,7 @@ sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } )] #[doc(alias = "*")] #[doc(alias = "*=")] +#[const_trait] pub trait MulAssign<Rhs = Self> { /// Performs the `*=` operation. /// @@ -938,6 +924,7 @@ mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } )] #[doc(alias = "/")] #[doc(alias = "/=")] +#[const_trait] pub trait DivAssign<Rhs = Self> { /// Performs the `/=` operation. /// @@ -999,6 +986,7 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } )] #[doc(alias = "%")] #[doc(alias = "%=")] +#[const_trait] pub trait RemAssign<Rhs = Self> { /// Performs the `%=` operation. /// diff --git a/library/core/src/ops/bit.rs b/library/core/src/ops/bit.rs index 7c664226f..327009801 100644 --- a/library/core/src/ops/bit.rs +++ b/library/core/src/ops/bit.rs @@ -31,6 +31,7 @@ #[lang = "not"] #[stable(feature = "rust1", since = "1.0.0")] #[doc(alias = "!")] +#[const_trait] pub trait Not { /// The resulting type after applying the `!` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -143,6 +144,7 @@ impl const Not for ! { message = "no implementation for `{Self} & {Rhs}`", label = "no implementation for `{Self} & {Rhs}`" )] +#[const_trait] pub trait BitAnd<Rhs = Self> { /// The resulting type after applying the `&` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -244,6 +246,7 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} | {Rhs}`", label = "no implementation for `{Self} | {Rhs}`" )] +#[const_trait] pub trait BitOr<Rhs = Self> { /// The resulting type after applying the `|` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -345,6 +348,7 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} ^ {Rhs}`", label = "no implementation for `{Self} ^ {Rhs}`" )] +#[const_trait] pub trait BitXor<Rhs = Self> { /// The resulting type after applying the `^` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -445,6 +449,7 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} << {Rhs}`", label = "no implementation for `{Self} << {Rhs}`" )] +#[const_trait] pub trait Shl<Rhs = Self> { /// The resulting type after applying the `<<` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -564,6 +569,7 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 } message = "no implementation for `{Self} >> {Rhs}`", label = "no implementation for `{Self} >> {Rhs}`" )] +#[const_trait] pub trait Shr<Rhs = Self> { /// The resulting type after applying the `>>` operator. #[stable(feature = "rust1", since = "1.0.0")] @@ -692,6 +698,7 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } message = "no implementation for `{Self} &= {Rhs}`", label = "no implementation for `{Self} &= {Rhs}`" )] +#[const_trait] pub trait BitAndAssign<Rhs = Self> { /// Performs the `&=` operation. /// @@ -764,6 +771,7 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} |= {Rhs}`", label = "no implementation for `{Self} |= {Rhs}`" )] +#[const_trait] pub trait BitOrAssign<Rhs = Self> { /// Performs the `|=` operation. /// @@ -836,6 +844,7 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} ^= {Rhs}`", label = "no implementation for `{Self} ^= {Rhs}`" )] +#[const_trait] pub trait BitXorAssign<Rhs = Self> { /// Performs the `^=` operation. /// @@ -906,6 +915,7 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } message = "no implementation for `{Self} <<= {Rhs}`", label = "no implementation for `{Self} <<= {Rhs}`" )] +#[const_trait] pub trait ShlAssign<Rhs = Self> { /// Performs the `<<=` operation. /// @@ -989,6 +999,7 @@ shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } message = "no implementation for `{Self} >>= {Rhs}`", label = "no implementation for `{Self} >>= {Rhs}`" )] +#[const_trait] pub trait ShrAssign<Rhs = Self> { /// Performs the `>>=` operation. /// diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs index b1f5559dc..72ebe653c 100644 --- a/library/core/src/ops/control_flow.rs +++ b/library/core/src/ops/control_flow.rs @@ -95,7 +95,8 @@ pub enum ControlFlow<B, C = ()> { } #[unstable(feature = "try_trait_v2", issue = "84277")] -impl<B, C> ops::Try for ControlFlow<B, C> { +#[rustc_const_unstable(feature = "const_convert", issue = "88674")] +impl<B, C> const ops::Try for ControlFlow<B, C> { type Output = C; type Residual = ControlFlow<B, convert::Infallible>; @@ -114,7 +115,8 @@ impl<B, C> ops::Try for ControlFlow<B, C> { } #[unstable(feature = "try_trait_v2", issue = "84277")] -impl<B, C> ops::FromResidual for ControlFlow<B, C> { +#[rustc_const_unstable(feature = "const_convert", issue = "88674")] +impl<B, C> const ops::FromResidual for ControlFlow<B, C> { #[inline] fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self { match residual { @@ -124,7 +126,8 @@ impl<B, C> ops::FromResidual for ControlFlow<B, C> { } #[unstable(feature = "try_trait_v2_residual", issue = "91285")] -impl<B, C> ops::Residual<C> for ControlFlow<B, convert::Infallible> { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl<B, C> const ops::Residual<C> for ControlFlow<B, convert::Infallible> { type TryType = ControlFlow<B, C>; } diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs index d68932402..4f4c99c4a 100644 --- a/library/core/src/ops/deref.rs +++ b/library/core/src/ops/deref.rs @@ -61,6 +61,7 @@ #[doc(alias = "&*")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "Deref"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Deref { /// The resulting type after dereferencing. #[stable(feature = "rust1", since = "1.0.0")] @@ -169,6 +170,7 @@ impl<T: ?Sized> const Deref for &mut T { #[lang = "deref_mut"] #[doc(alias = "*")] #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] pub trait DerefMut: Deref { /// Mutably dereferences the value. #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/ops/drop.rs b/library/core/src/ops/drop.rs index de9ddb852..a2c3d978c 100644 --- a/library/core/src/ops/drop.rs +++ b/library/core/src/ops/drop.rs @@ -134,6 +134,7 @@ /// these types cannot have destructors. #[lang = "drop"] #[stable(feature = "rust1", since = "1.0.0")] +#[const_trait] pub trait Drop { /// Executes the destructor for this type. /// diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs index 8fdf22cf6..2e0a752c8 100644 --- a/library/core/src/ops/function.rs +++ b/library/core/src/ops/function.rs @@ -71,6 +71,7 @@ )] #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Fn<Args>: FnMut<Args> { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -158,6 +159,7 @@ pub trait Fn<Args>: FnMut<Args> { )] #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait FnMut<Args>: FnOnce<Args> { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -237,6 +239,7 @@ pub trait FnMut<Args>: FnOnce<Args> { )] #[fundamental] // so that regex can rely that `&str: !FnMut` #[must_use = "closures are lazy and do nothing unless called"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait FnOnce<Args> { /// The returned type after the call operator is used. #[lang = "fn_once_output"] diff --git a/library/core/src/ops/generator.rs b/library/core/src/ops/generator.rs index 3ebd6f8cd..fee4beb1e 100644 --- a/library/core/src/ops/generator.rs +++ b/library/core/src/ops/generator.rs @@ -83,7 +83,6 @@ pub trait Generator<R = ()> { /// `return` statement or implicitly as the last expression of a generator /// literal. For example futures would use this as `Result<T, E>` as it /// represents a completed future. - #[cfg_attr(bootstrap, lang = "generator_return")] type Return; /// Resumes the execution of this generator. diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs index e2e569cb7..dd4e3ac1c 100644 --- a/library/core/src/ops/index.rs +++ b/library/core/src/ops/index.rs @@ -55,6 +55,7 @@ #[doc(alias = "]")] #[doc(alias = "[")] #[doc(alias = "[]")] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Index<Idx: ?Sized> { /// The returned type after indexing. #[stable(feature = "rust1", since = "1.0.0")] @@ -163,6 +164,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind #[doc(alias = "[")] #[doc(alias = "]")] #[doc(alias = "[]")] +#[cfg_attr(not(bootstrap), const_trait)] pub trait IndexMut<Idx: ?Sized>: Index<Idx> { /// Performs the mutable indexing (`container[index]`) operation. /// diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs new file mode 100644 index 000000000..3e06776d2 --- /dev/null +++ b/library/core/src/ops/index_range.rs @@ -0,0 +1,171 @@ +use crate::intrinsics::{assert_unsafe_precondition, unchecked_add, unchecked_sub}; +use crate::iter::{FusedIterator, TrustedLen}; + +/// Like a `Range<usize>`, but with a safety invariant that `start <= end`. +/// +/// This means that `end - start` cannot overflow, allowing some μoptimizations. +/// +/// (Normal `Range` code needs to handle degenerate ranges like `10..0`, +/// which takes extra checks compared to only handling the canonical form.) +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct IndexRange { + start: usize, + end: usize, +} + +impl IndexRange { + /// # Safety + /// - `start <= end` + #[inline] + pub const unsafe fn new_unchecked(start: usize, end: usize) -> Self { + // SAFETY: comparisons on usize are pure + unsafe { + assert_unsafe_precondition!( + "IndexRange::new_unchecked requires `start <= end`", + (start: usize, end: usize) => start <= end + ) + }; + IndexRange { start, end } + } + + #[inline] + pub const fn zero_to(end: usize) -> Self { + IndexRange { start: 0, end } + } + + #[inline] + pub const fn start(&self) -> usize { + self.start + } + + #[inline] + pub const fn end(&self) -> usize { + self.end + } + + #[inline] + pub const fn len(&self) -> usize { + // SAFETY: By invariant, this cannot wrap + unsafe { unchecked_sub(self.end, self.start) } + } + + /// # Safety + /// - Can only be called when `start < end`, aka when `len > 0`. + #[inline] + unsafe fn next_unchecked(&mut self) -> usize { + debug_assert!(self.start < self.end); + + let value = self.start; + // SAFETY: The range isn't empty, so this cannot overflow + self.start = unsafe { unchecked_add(value, 1) }; + value + } + + /// # Safety + /// - Can only be called when `start < end`, aka when `len > 0`. + #[inline] + unsafe fn next_back_unchecked(&mut self) -> usize { + debug_assert!(self.start < self.end); + + // SAFETY: The range isn't empty, so this cannot overflow + let value = unsafe { unchecked_sub(self.end, 1) }; + self.end = value; + value + } + + /// Removes the first `n` items from this range, returning them as an `IndexRange`. + /// If there are fewer than `n`, then the whole range is returned and + /// `self` is left empty. + /// + /// This is designed to help implement `Iterator::advance_by`. + #[inline] + pub fn take_prefix(&mut self, n: usize) -> Self { + let mid = if n <= self.len() { + // SAFETY: We just checked that this will be between start and end, + // and thus the addition cannot overflow. + unsafe { unchecked_add(self.start, n) } + } else { + self.end + }; + let prefix = Self { start: self.start, end: mid }; + self.start = mid; + prefix + } + + /// Removes the last `n` items from this range, returning them as an `IndexRange`. + /// If there are fewer than `n`, then the whole range is returned and + /// `self` is left empty. + /// + /// This is designed to help implement `Iterator::advance_back_by`. + #[inline] + pub fn take_suffix(&mut self, n: usize) -> Self { + let mid = if n <= self.len() { + // SAFETY: We just checked that this will be between start and end, + // and thus the addition cannot overflow. + unsafe { unchecked_sub(self.end, n) } + } else { + self.start + }; + let suffix = Self { start: mid, end: self.end }; + self.end = mid; + suffix + } +} + +impl Iterator for IndexRange { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option<usize> { + if self.len() > 0 { + // SAFETY: We just checked that the range is non-empty + unsafe { Some(self.next_unchecked()) } + } else { + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn advance_by(&mut self, n: usize) -> Result<(), usize> { + let original_len = self.len(); + self.take_prefix(n); + if n > original_len { Err(original_len) } else { Ok(()) } + } +} + +impl DoubleEndedIterator for IndexRange { + #[inline] + fn next_back(&mut self) -> Option<usize> { + if self.len() > 0 { + // SAFETY: We just checked that the range is non-empty + unsafe { Some(self.next_back_unchecked()) } + } else { + None + } + } + + #[inline] + fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { + let original_len = self.len(); + self.take_suffix(n); + if n > original_len { Err(original_len) } else { Ok(()) } + } +} + +impl ExactSizeIterator for IndexRange { + #[inline] + fn len(&self) -> usize { + self.len() + } +} + +// SAFETY: Because we only deal in `usize`, our `len` is always perfect. +unsafe impl TrustedLen for IndexRange {} + +impl FusedIterator for IndexRange {} diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs index 31c1a1d09..a5e5b13b3 100644 --- a/library/core/src/ops/mod.rs +++ b/library/core/src/ops/mod.rs @@ -146,6 +146,7 @@ mod drop; mod function; mod generator; mod index; +mod index_range; mod range; mod try_trait; mod unsize; @@ -178,6 +179,8 @@ pub use self::index::{Index, IndexMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::range::{Range, RangeFrom, RangeFull, RangeTo}; +pub(crate) use self::index_range::IndexRange; + #[stable(feature = "inclusive_range", since = "1.26.0")] pub use self::range::{Bound, RangeBounds, RangeInclusive, RangeToInclusive}; diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index 10f041344..84a690468 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -128,7 +128,8 @@ use crate::ops::ControlFlow; )] #[doc(alias = "?")] #[lang = "Try"] -pub trait Try: FromResidual { +#[const_trait] +pub trait Try: ~const FromResidual { /// The type of the value produced by `?` when *not* short-circuiting. #[unstable(feature = "try_trait_v2", issue = "84277")] type Output; @@ -222,7 +223,7 @@ pub trait Try: FromResidual { /// Every `Try` type needs to be recreatable from its own associated /// `Residual` type, but can also have additional `FromResidual` implementations /// to support interconversion with other `Try` types. -#[cfg_attr(not(bootstrap), rustc_on_unimplemented( +#[rustc_on_unimplemented( on( all( from_desugaring = "QuestionMark", @@ -301,89 +302,10 @@ pub trait Try: FromResidual { label = "cannot use the `?` operator in {ItemContext} that returns `{Self}`", parent_label = "this function should return `Result` or `Option` to accept `?`" ), -))] -#[cfg_attr(bootstrap, rustc_on_unimplemented( - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::result::Result<T, E>", - R = "std::option::Option<std::convert::Infallible>" - ), - message = "the `?` operator can only be used on `Result`s, not `Option`s, \ - in {ItemContext} that returns `Result`", - label = "use `.ok_or(...)?` to provide an error compatible with `{Self}`", - enclosing_scope = "this function returns a `Result`" - ), - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::result::Result<T, E>", - ), - // There's a special error message in the trait selection code for - // `From` in `?`, so this is not shown for result-in-result errors, - // and thus it can be phrased more strongly than `ControlFlow`'s. - message = "the `?` operator can only be used on `Result`s \ - in {ItemContext} that returns `Result`", - label = "this `?` produces `{R}`, which is incompatible with `{Self}`", - enclosing_scope = "this function returns a `Result`" - ), - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::option::Option<T>", - R = "std::result::Result<T, E>", - ), - message = "the `?` operator can only be used on `Option`s, not `Result`s, \ - in {ItemContext} that returns `Option`", - label = "use `.ok()?` if you want to discard the `{R}` error information", - enclosing_scope = "this function returns an `Option`" - ), - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::option::Option<T>", - ), - // `Option`-in-`Option` always works, as there's only one possible - // residual, so this can also be phrased strongly. - message = "the `?` operator can only be used on `Option`s \ - in {ItemContext} that returns `Option`", - label = "this `?` produces `{R}`, which is incompatible with `{Self}`", - enclosing_scope = "this function returns an `Option`" - ), - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::ops::ControlFlow<B, C>", - R = "std::ops::ControlFlow<B, C>", - ), - message = "the `?` operator in {ItemContext} that returns `ControlFlow<B, _>` \ - can only be used on other `ControlFlow<B, _>`s (with the same Break type)", - label = "this `?` produces `{R}`, which is incompatible with `{Self}`", - enclosing_scope = "this function returns a `ControlFlow`", - note = "unlike `Result`, there's no `From`-conversion performed for `ControlFlow`" - ), - on( - all( - from_desugaring = "QuestionMark", - _Self = "std::ops::ControlFlow<B, C>", - // `R` is not a `ControlFlow`, as that case was matched previously - ), - message = "the `?` operator can only be used on `ControlFlow`s \ - in {ItemContext} that returns `ControlFlow`", - label = "this `?` produces `{R}`, which is incompatible with `{Self}`", - enclosing_scope = "this function returns a `ControlFlow`", - ), - on( - all(from_desugaring = "QuestionMark"), - message = "the `?` operator can only be used in {ItemContext} \ - that returns `Result` or `Option` \ - (or another type that implements `{FromResidual}`)", - label = "cannot use the `?` operator in {ItemContext} that returns `{Self}`", - enclosing_scope = "this function should return `Result` or `Option` to accept `?`" - ), -))] +)] #[rustc_diagnostic_item = "FromResidual"] #[unstable(feature = "try_trait_v2", issue = "84277")] +#[const_trait] pub trait FromResidual<R = <Self as Try>::Residual> { /// Constructs the type from a compatible `Residual` type. /// @@ -436,10 +358,11 @@ where /// and in the other direction, /// `<Result<Infallible, E> as Residual<T>>::TryType = Result<T, E>`. #[unstable(feature = "try_trait_v2_residual", issue = "91285")] +#[const_trait] pub trait Residual<O> { /// The "return" type of this meta-function. #[unstable(feature = "try_trait_v2_residual", issue = "91285")] - type TryType: Try<Output = O, Residual = Self>; + type TryType: ~const Try<Output = O, Residual = Self>; } #[unstable(feature = "pub_crate_should_not_need_unstable_attr", issue = "none")] @@ -456,16 +379,19 @@ pub(crate) type ChangeOutputType<T, V> = <<T as Try>::Residual as Residual<V>>:: pub(crate) struct NeverShortCircuit<T>(pub T); impl<T> NeverShortCircuit<T> { - /// Wrap a binary `FnMut` to return its result wrapped in a `NeverShortCircuit`. + /// Implementation for building `ConstFnMutClosure` for wrapping the output of a ~const FnMut in a `NeverShortCircuit`. #[inline] - pub fn wrap_mut_2<A, B>(mut f: impl FnMut(A, B) -> T) -> impl FnMut(A, B) -> Self { - move |a, b| NeverShortCircuit(f(a, b)) + pub const fn wrap_mut_2_imp<A, B, F: ~const FnMut(A, B) -> T>( + f: &mut F, + (a, b): (A, B), + ) -> NeverShortCircuit<T> { + NeverShortCircuit(f(a, b)) } } pub(crate) enum NeverShortCircuitResidual {} -impl<T> Try for NeverShortCircuit<T> { +impl<T> const Try for NeverShortCircuit<T> { type Output = T; type Residual = NeverShortCircuitResidual; @@ -480,14 +406,14 @@ impl<T> Try for NeverShortCircuit<T> { } } -impl<T> FromResidual for NeverShortCircuit<T> { +impl<T> const FromResidual for NeverShortCircuit<T> { #[inline] fn from_residual(never: NeverShortCircuitResidual) -> Self { match never {} } } -impl<T> Residual<T> for NeverShortCircuitResidual { +impl<T> const Residual<T> for NeverShortCircuitResidual { type TryType = NeverShortCircuit<T>; } diff --git a/library/core/src/option.rs b/library/core/src/option.rs index 934175863..f284b4359 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -559,22 +559,25 @@ impl<T> Option<T> { /// # Examples /// /// ``` - /// #![feature(is_some_with)] + /// #![feature(is_some_and)] /// /// let x: Option<u32> = Some(2); - /// assert_eq!(x.is_some_and(|&x| x > 1), true); + /// assert_eq!(x.is_some_and(|x| x > 1), true); /// /// let x: Option<u32> = Some(0); - /// assert_eq!(x.is_some_and(|&x| x > 1), false); + /// assert_eq!(x.is_some_and(|x| x > 1), false); /// /// let x: Option<u32> = None; - /// assert_eq!(x.is_some_and(|&x| x > 1), false); + /// assert_eq!(x.is_some_and(|x| x > 1), false); /// ``` #[must_use] #[inline] - #[unstable(feature = "is_some_with", issue = "93050")] - pub fn is_some_and(&self, f: impl FnOnce(&T) -> bool) -> bool { - matches!(self, Some(x) if f(x)) + #[unstable(feature = "is_some_and", issue = "93050")] + pub fn is_some_and(self, f: impl FnOnce(T) -> bool) -> bool { + match self { + None => false, + Some(x) => f(x), + } } /// Returns `true` if the option is a [`None`] value. @@ -834,19 +837,12 @@ impl<T> Option<T> { /// /// # Examples /// - /// Converts a string to an integer, turning poorly-formed strings - /// into 0 (the default value for integers). [`parse`] converts - /// a string to any other type that implements [`FromStr`], returning - /// [`None`] on error. - /// /// ``` - /// let good_year_from_input = "1909"; - /// let bad_year_from_input = "190blarg"; - /// let good_year = good_year_from_input.parse().ok().unwrap_or_default(); - /// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default(); + /// let x: Option<u32> = None; + /// let y: Option<u32> = Some(12); /// - /// assert_eq!(1909, good_year); - /// assert_eq!(0, bad_year); + /// assert_eq!(x.unwrap_or_default(), 0); + /// assert_eq!(y.unwrap_or_default(), 12); /// ``` /// /// [default value]: Default::default @@ -1717,8 +1713,6 @@ impl<T, U> Option<(T, U)> { /// # Examples /// /// ``` - /// #![feature(unzip_option)] - /// /// let x = Some((1, "hi")); /// let y = None::<(u8, u32)>; /// @@ -1726,8 +1720,13 @@ impl<T, U> Option<(T, U)> { /// assert_eq!(y.unzip(), (None, None)); /// ``` #[inline] - #[unstable(feature = "unzip_option", issue = "87800", reason = "recently added")] - pub const fn unzip(self) -> (Option<T>, Option<U>) { + #[stable(feature = "unzip_option", since = "1.66.0")] + #[rustc_const_unstable(feature = "const_option", issue = "67441")] + pub const fn unzip(self) -> (Option<T>, Option<U>) + where + T: ~const Destruct, + U: ~const Destruct, + { match self { Some((a, b)) => (Some(a), Some(b)), None => (None, None), @@ -2321,7 +2320,8 @@ impl<T> ops::FromResidual<ops::Yeet<()>> for Option<T> { } #[unstable(feature = "try_trait_v2_residual", issue = "91285")] -impl<T> ops::Residual<T> for Option<convert::Infallible> { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl<T> const ops::Residual<T> for Option<convert::Infallible> { type TryType = Option<T>; } diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs index 8eefd9ff2..6dcf23dde 100644 --- a/library/core/src/panic/location.rs +++ b/library/core/src/panic/location.rs @@ -123,8 +123,9 @@ impl<'a> Location<'a> { /// ``` #[must_use] #[stable(feature = "panic_hooks", since = "1.10.0")] + #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")] #[inline] - pub fn file(&self) -> &str { + pub const fn file(&self) -> &str { self.file } @@ -147,8 +148,9 @@ impl<'a> Location<'a> { /// ``` #[must_use] #[stable(feature = "panic_hooks", since = "1.10.0")] + #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")] #[inline] - pub fn line(&self) -> u32 { + pub const fn line(&self) -> u32 { self.line } @@ -171,8 +173,9 @@ impl<'a> Location<'a> { /// ``` #[must_use] #[stable(feature = "panic_col", since = "1.25.0")] + #[rustc_const_unstable(feature = "const_location_fields", issue = "102911")] #[inline] - pub fn column(&self) -> u32 { + pub const fn column(&self) -> u32 { self.col } } diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index d4afe0f53..a9de7c94e 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -29,6 +29,73 @@ use crate::fmt; use crate::panic::{Location, PanicInfo}; +// First we define the two main entry points that all panics go through. +// In the end both are just convenience wrappers around `panic_impl`. + +/// The entry point for panicking with a formatted message. +/// +/// This is designed to reduce the amount of code required at the call +/// site as much as possible (so that `panic!()` has as low an impact +/// on (e.g.) the inlining of other functions as possible), by moving +/// the actual formatting into this shared place. +#[cold] +// If panic_immediate_abort, inline the abort call, +// otherwise avoid inlining because of it is cold path. +#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] +#[cfg_attr(feature = "panic_immediate_abort", inline)] +#[track_caller] +#[lang = "panic_fmt"] // needed for const-evaluated panics +#[rustc_do_not_const_check] // hooked by const-eval +#[rustc_const_unstable(feature = "core_panic", issue = "none")] +pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! { + if cfg!(feature = "panic_immediate_abort") { + super::intrinsics::abort() + } + + // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call + // that gets resolved to the `#[panic_handler]` function. + extern "Rust" { + #[lang = "panic_impl"] + fn panic_impl(pi: &PanicInfo<'_>) -> !; + } + + let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true); + + // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call. + unsafe { panic_impl(&pi) } +} + +/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize. +/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed. +#[cold] +#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] +#[cfg_attr(feature = "panic_immediate_abort", inline)] +#[cfg_attr(not(bootstrap), rustc_nounwind)] +#[cfg_attr(bootstrap, rustc_allocator_nounwind)] +pub fn panic_str_nounwind(msg: &'static str) -> ! { + if cfg!(feature = "panic_immediate_abort") { + super::intrinsics::abort() + } + + // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call + // that gets resolved to the `#[panic_handler]` function. + extern "Rust" { + #[lang = "panic_impl"] + fn panic_impl(pi: &PanicInfo<'_>) -> !; + } + + // PanicInfo with the `can_unwind` flag set to false forces an abort. + let pieces = [msg]; + let fmt = fmt::Arguments::new_v1(&pieces, &[]); + let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false); + + // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call. + unsafe { panic_impl(&pi) } +} + +// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions +// above. + /// The underlying implementation of libcore's `panic!` macro when no formatting is used. #[cold] // never inline unless panic_immediate_abort to avoid code @@ -84,62 +151,17 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { panic!("index out of bounds: the len is {len} but the index is {index}") } -// This function is called directly by the codegen backend, and must not have -// any extra arguments (including those synthesized by track_caller). +/// Panic because we cannot unwind out of a function. +/// +/// This function is called directly by the codegen backend, and must not have +/// any extra arguments (including those synthesized by track_caller). #[cold] #[inline(never)] #[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function +#[cfg_attr(not(bootstrap), rustc_nounwind)] +#[cfg_attr(bootstrap, rustc_allocator_nounwind)] fn panic_no_unwind() -> ! { - if cfg!(feature = "panic_immediate_abort") { - super::intrinsics::abort() - } - - // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call - // that gets resolved to the `#[panic_handler]` function. - extern "Rust" { - #[lang = "panic_impl"] - fn panic_impl(pi: &PanicInfo<'_>) -> !; - } - - // PanicInfo with the `can_unwind` flag set to false forces an abort. - let fmt = format_args!("panic in a function that cannot unwind"); - let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false); - - // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call. - unsafe { panic_impl(&pi) } -} - -/// The entry point for panicking with a formatted message. -/// -/// This is designed to reduce the amount of code required at the call -/// site as much as possible (so that `panic!()` has as low an impact -/// on (e.g.) the inlining of other functions as possible), by moving -/// the actual formatting into this shared place. -#[cold] -// If panic_immediate_abort, inline the abort call, -// otherwise avoid inlining because of it is cold path. -#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] -#[cfg_attr(feature = "panic_immediate_abort", inline)] -#[track_caller] -#[lang = "panic_fmt"] // needed for const-evaluated panics -#[rustc_do_not_const_check] // hooked by const-eval -#[rustc_const_unstable(feature = "core_panic", issue = "none")] -pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! { - if cfg!(feature = "panic_immediate_abort") { - super::intrinsics::abort() - } - - // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call - // that gets resolved to the `#[panic_handler]` function. - extern "Rust" { - #[lang = "panic_impl"] - fn panic_impl(pi: &PanicInfo<'_>) -> !; - } - - let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), true); - - // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call. - unsafe { panic_impl(&pi) } + panic_str_nounwind("panic in a function that cannot unwind") } /// This function is used instead of panic_fmt in const eval. diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index 242f44ade..331714a99 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -611,7 +611,19 @@ mod prim_pointer {} /// /// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on /// an array. Indeed, this provides most of the API for working with arrays. -/// Slices have a dynamic size and do not coerce to arrays. +/// +/// Slices have a dynamic size and do not coerce to arrays. Instead, use +/// `slice.try_into().unwrap()` or `<ArrayType>::try_from(slice).unwrap()`. +/// +/// Array's `try_from(slice)` implementations (and the corresponding `slice.try_into()` +/// array implementations) succeed if the input slice length is the same as the result +/// array length. They optimize especially well when the optimizer can easily determine +/// the slice length, e.g. `<[u8; 4]>::try_from(&slice[4..8]).unwrap()`. Array implements +/// [TryFrom](crate::convert::TryFrom) returning: +/// +/// - `[T; N]` copies from the slice's elements +/// - `&[T; N]` references the original slice's elements +/// - `&mut [T; N]` references the original slice's elements /// /// You can move elements out of an array with a [slice pattern]. If you want /// one element, see [`mem::replace`]. @@ -640,6 +652,15 @@ mod prim_pointer {} /// for x in &array { } /// ``` /// +/// You can use `<ArrayType>::try_from(slice)` or `slice.try_into()` to get an array from +/// a slice: +/// +/// ``` +/// let bytes: [u8; 3] = [1, 0, 2]; +/// assert_eq!(1, u16::from_le_bytes(<[u8; 2]>::try_from(&bytes[0..2]).unwrap())); +/// assert_eq!(512, u16::from_le_bytes(bytes[1..3].try_into().unwrap())); +/// ``` +/// /// You can use a [slice pattern] to move elements out of an array: /// /// ``` diff --git a/library/core/src/mem/valid_align.rs b/library/core/src/ptr/alignment.rs index 32b2afb72..1390e09dd 100644 --- a/library/core/src/mem/valid_align.rs +++ b/library/core/src/ptr/alignment.rs @@ -1,4 +1,4 @@ -use crate::convert::TryFrom; +use crate::convert::{TryFrom, TryInto}; use crate::intrinsics::assert_unsafe_precondition; use crate::num::NonZeroUsize; use crate::{cmp, fmt, hash, mem, num}; @@ -8,16 +8,62 @@ use crate::{cmp, fmt, hash, mem, num}; /// /// Note that particularly large alignments, while representable in this type, /// are likely not to be supported by actual allocators and linkers. -#[derive(Copy, Clone)] +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +#[derive(Copy, Clone, Eq, PartialEq)] #[repr(transparent)] -pub(crate) struct ValidAlign(ValidAlignEnum); +pub struct Alignment(AlignmentEnum); -// ValidAlign is `repr(usize)`, but via extra steps. -const _: () = assert!(mem::size_of::<ValidAlign>() == mem::size_of::<usize>()); -const _: () = assert!(mem::align_of::<ValidAlign>() == mem::align_of::<usize>()); +// Alignment is `repr(usize)`, but via extra steps. +const _: () = assert!(mem::size_of::<Alignment>() == mem::size_of::<usize>()); +const _: () = assert!(mem::align_of::<Alignment>() == mem::align_of::<usize>()); -impl ValidAlign { - /// Creates a `ValidAlign` from a power-of-two `usize`. +fn _alignment_can_be_structurally_matched(a: Alignment) -> bool { + matches!(a, Alignment::MIN) +} + +impl Alignment { + /// The smallest possible alignment, 1. + /// + /// All addresses are always aligned at least this much. + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_alignment_type)] + /// use std::ptr::Alignment; + /// + /// assert_eq!(Alignment::MIN.as_usize(), 1); + /// ``` + #[unstable(feature = "ptr_alignment_type", issue = "102070")] + pub const MIN: Self = Self(AlignmentEnum::_Align1Shl0); + + /// Returns the alignment for a type. + /// + /// This provides the same numerical value as [`mem::align_of`], + /// but in an `Alignment` instead of a `usize. + #[unstable(feature = "ptr_alignment_type", issue = "102070")] + #[inline] + pub const fn of<T>() -> Self { + // SAFETY: rustc ensures that type alignment is always a power of two. + unsafe { Alignment::new_unchecked(mem::align_of::<T>()) } + } + + /// Creates an `Alignment` from a `usize`, or returns `None` if it's + /// not a power of two. + /// + /// Note that `0` is not a power of two, nor a valid alignment. + #[unstable(feature = "ptr_alignment_type", issue = "102070")] + #[inline] + pub const fn new(align: usize) -> Option<Self> { + if align.is_power_of_two() { + // SAFETY: Just checked it only has one bit set + Some(unsafe { Self::new_unchecked(align) }) + } else { + None + } + } + + /// Creates an `Alignment` from a power-of-two `usize`. /// /// # Safety /// @@ -25,101 +71,120 @@ impl ValidAlign { /// /// Equivalently, it must be `1 << exp` for some `exp` in `0..usize::BITS`. /// It must *not* be zero. + #[unstable(feature = "ptr_alignment_type", issue = "102070")] + #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] - pub(crate) const unsafe fn new_unchecked(align: usize) -> Self { + pub const unsafe fn new_unchecked(align: usize) -> Self { // SAFETY: Precondition passed to the caller. - unsafe { assert_unsafe_precondition!((align: usize) => align.is_power_of_two()) }; + unsafe { + assert_unsafe_precondition!( + "Alignment::new_unchecked requires a power of two", + (align: usize) => align.is_power_of_two() + ) + }; // SAFETY: By precondition, this must be a power of two, and // our variants encompass all possible powers of two. - unsafe { mem::transmute::<usize, ValidAlign>(align) } + unsafe { mem::transmute::<usize, Alignment>(align) } } + /// Returns the alignment as a [`usize`] + #[unstable(feature = "ptr_alignment_type", issue = "102070")] + #[rustc_const_unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] - pub(crate) const fn as_usize(self) -> usize { + pub const fn as_usize(self) -> usize { self.0 as usize } + /// Returns the alignment as a [`NonZeroUsize`] + #[unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] - pub(crate) const fn as_nonzero(self) -> NonZeroUsize { + pub const fn as_nonzero(self) -> NonZeroUsize { // SAFETY: All the discriminants are non-zero. unsafe { NonZeroUsize::new_unchecked(self.as_usize()) } } - /// Returns the base 2 logarithm of the alignment. + /// Returns the base-2 logarithm of the alignment. /// /// This is always exact, as `self` represents a power of two. + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_alignment_type)] + /// use std::ptr::Alignment; + /// + /// assert_eq!(Alignment::of::<u8>().log2(), 0); + /// assert_eq!(Alignment::new(1024).unwrap().log2(), 10); + /// ``` + #[unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] - pub(crate) fn log2(self) -> u32 { + pub fn log2(self) -> u32 { self.as_nonzero().trailing_zeros() } - - /// Returns the alignment for a type. - #[inline] - pub(crate) fn of<T>() -> Self { - // SAFETY: rustc ensures that type alignment is always a power of two. - unsafe { ValidAlign::new_unchecked(mem::align_of::<T>()) } - } } -impl fmt::Debug for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl fmt::Debug for Alignment { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2()) } } -impl TryFrom<NonZeroUsize> for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl TryFrom<NonZeroUsize> for Alignment { type Error = num::TryFromIntError; #[inline] - fn try_from(align: NonZeroUsize) -> Result<ValidAlign, Self::Error> { - if align.is_power_of_two() { - // SAFETY: Just checked for power-of-two - unsafe { Ok(ValidAlign::new_unchecked(align.get())) } - } else { - Err(num::TryFromIntError(())) - } + fn try_from(align: NonZeroUsize) -> Result<Alignment, Self::Error> { + align.get().try_into() } } -impl TryFrom<usize> for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl TryFrom<usize> for Alignment { type Error = num::TryFromIntError; #[inline] - fn try_from(align: usize) -> Result<ValidAlign, Self::Error> { - if align.is_power_of_two() { - // SAFETY: Just checked for power-of-two - unsafe { Ok(ValidAlign::new_unchecked(align)) } - } else { - Err(num::TryFromIntError(())) - } + fn try_from(align: usize) -> Result<Alignment, Self::Error> { + Self::new(align).ok_or(num::TryFromIntError(())) } } -impl cmp::Eq for ValidAlign {} +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl From<Alignment> for NonZeroUsize { + #[inline] + fn from(align: Alignment) -> NonZeroUsize { + align.as_nonzero() + } +} -impl cmp::PartialEq for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl From<Alignment> for usize { #[inline] - fn eq(&self, other: &Self) -> bool { - self.as_nonzero() == other.as_nonzero() + fn from(align: Alignment) -> usize { + align.as_usize() } } -impl cmp::Ord for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl cmp::Ord for Alignment { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_nonzero().cmp(&other.as_nonzero()) } } -impl cmp::PartialOrd for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl cmp::PartialOrd for Alignment { #[inline] fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } -impl hash::Hash for ValidAlign { +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +impl hash::Hash for Alignment { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_nonzero().hash(state) @@ -127,15 +192,15 @@ impl hash::Hash for ValidAlign { } #[cfg(target_pointer_width = "16")] -type ValidAlignEnum = ValidAlignEnum16; +type AlignmentEnum = AlignmentEnum16; #[cfg(target_pointer_width = "32")] -type ValidAlignEnum = ValidAlignEnum32; +type AlignmentEnum = AlignmentEnum32; #[cfg(target_pointer_width = "64")] -type ValidAlignEnum = ValidAlignEnum64; +type AlignmentEnum = AlignmentEnum64; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] #[repr(u16)] -enum ValidAlignEnum16 { +enum AlignmentEnum16 { _Align1Shl0 = 1 << 0, _Align1Shl1 = 1 << 1, _Align1Shl2 = 1 << 2, @@ -154,9 +219,9 @@ enum ValidAlignEnum16 { _Align1Shl15 = 1 << 15, } -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] #[repr(u32)] -enum ValidAlignEnum32 { +enum AlignmentEnum32 { _Align1Shl0 = 1 << 0, _Align1Shl1 = 1 << 1, _Align1Shl2 = 1 << 2, @@ -191,9 +256,9 @@ enum ValidAlignEnum32 { _Align1Shl31 = 1 << 31, } -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Eq, PartialEq)] #[repr(u64)] -enum ValidAlignEnum64 { +enum AlignmentEnum64 { _Align1Shl0 = 1 << 0, _Align1Shl1 = 1 << 1, _Align1Shl2 = 1 << 2, diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 43e883b8b..5a083227b 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -568,7 +568,6 @@ impl<T: ?Sized> *const T { /// /// For non-`Sized` pointees this operation changes only the data pointer, /// leaving the metadata untouched. - #[cfg(not(bootstrap))] #[unstable(feature = "ptr_mask", issue = "98290")] #[must_use = "returns a new pointer rather than modifying its argument"] #[inline(always)] @@ -695,7 +694,7 @@ impl<T: ?Sized> *const T { /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This computes the same value that [`offset_from`](#method.offset_from) - /// would compute, but with the added precondition that that the offset is + /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can @@ -762,7 +761,10 @@ impl<T: ?Sized> *const T { // SAFETY: The comparison has no side-effects, and the intrinsic // does this check internally in the CTFE implementation. unsafe { - assert_unsafe_precondition!([T](this: *const T, origin: *const T) => this >= origin) + assert_unsafe_precondition!( + "ptr::sub_ptr requires `this >= origin`", + [T](this: *const T, origin: *const T) => this >= origin + ) }; let pointee_size = mem::size_of::<T>(); @@ -803,7 +805,7 @@ impl<T: ?Sized> *const T { /// Returns whether two pointers are guaranteed to be inequal. /// - /// At runtime this function behaves like `Some(self == other)`. + /// At runtime this function behaves like `Some(self != other)`. /// However, in some contexts (e.g., compile-time evaluation), /// it is not always possible to determine inequality of two pointers, so this function may /// spuriously return `None` for pointers that later actually turn out to have its inequality known. diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs index 8865c834c..caa10f181 100644 --- a/library/core/src/ptr/metadata.rs +++ b/library/core/src/ptr/metadata.rs @@ -135,16 +135,16 @@ pub const fn from_raw_parts_mut<T: ?Sized>( } #[repr(C)] -pub(crate) union PtrRepr<T: ?Sized> { - pub(crate) const_ptr: *const T, - pub(crate) mut_ptr: *mut T, - pub(crate) components: PtrComponents<T>, +union PtrRepr<T: ?Sized> { + const_ptr: *const T, + mut_ptr: *mut T, + components: PtrComponents<T>, } #[repr(C)] -pub(crate) struct PtrComponents<T: ?Sized> { - pub(crate) data_address: *const (), - pub(crate) metadata: <T as Pointee>::Metadata, +struct PtrComponents<T: ?Sized> { + data_address: *const (), + metadata: <T as Pointee>::Metadata, } // Manual impl needed to avoid `T: Copy` bound. diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index e976abed7..565c38d22 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -377,6 +377,10 @@ use crate::intrinsics::{ use crate::mem::{self, MaybeUninit}; +mod alignment; +#[unstable(feature = "ptr_alignment_type", issue = "102070")] +pub use alignment::Alignment; + #[stable(feature = "rust1", since = "1.0.0")] #[doc(inline)] pub use crate::intrinsics::copy_nonoverlapping; @@ -390,7 +394,6 @@ pub use crate::intrinsics::copy; pub use crate::intrinsics::write_bytes; mod metadata; -pub(crate) use metadata::PtrRepr; #[unstable(feature = "ptr_metadata", issue = "81513")] pub use metadata::{from_raw_parts, from_raw_parts_mut, metadata, DynMetadata, Pointee, Thin}; @@ -578,12 +581,21 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T { /// Convert an address back to a pointer, picking up a previously 'exposed' provenance. /// /// This is equivalent to `addr as *const T`. The provenance of the returned pointer is that of *any* -/// pointer that was previously passed to [`expose_addr`][pointer::expose_addr] or a `ptr as usize` -/// cast. If there is no previously 'exposed' provenance that justifies the way this pointer will be -/// used, the program has undefined behavior. Note that there is no algorithm that decides which -/// provenance will be used. You can think of this as "guessing" the right provenance, and the guess -/// will be "maximally in your favor", in the sense that if there is any way to avoid undefined -/// behavior, then that is the guess that will be taken. +/// pointer that was previously exposed by passing it to [`expose_addr`][pointer::expose_addr], +/// or a `ptr as usize` cast. In addition, memory which is outside the control of the Rust abstract +/// machine (MMIO registers, for example) is always considered to be exposed, so long as this memory +/// is disjoint from memory that will be used by the abstract machine such as the stack, heap, +/// and statics. +/// +/// If there is no 'exposed' provenance that justifies the way this pointer will be used, +/// the program has undefined behavior. In particular, the aliasing rules still apply: pointers +/// and references that have been invalidated due to aliasing accesses cannot be used any more, +/// even if they have been exposed! +/// +/// Note that there is no algorithm that decides which provenance will be used. You can think of this +/// as "guessing" the right provenance, and the guess will be "maximally in your favor", in the sense +/// that if there is any way to avoid undefined behavior (while upholding all aliasing requirements), +/// then that is the guess that will be taken. /// /// On platforms with multiple address spaces, it is your responsibility to ensure that the /// address makes sense in the address space that this pointer will be used with. @@ -886,7 +898,10 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { // SAFETY: the caller must guarantee that `x` and `y` are // valid for writes and properly aligned. unsafe { - assert_unsafe_precondition!([T](x: *mut T, y: *mut T, count: usize) => + assert_unsafe_precondition!( + "ptr::swap_nonoverlapping requires that both pointer arguments are aligned and non-null \ + and the specified memory ranges do not overlap", + [T](x: *mut T, y: *mut T, count: usize) => is_aligned_and_not_null(x) && is_aligned_and_not_null(y) && is_nonoverlapping(x, y, count) @@ -983,7 +998,10 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T { // and cannot overlap `src` since `dst` must point to a distinct // allocated object. unsafe { - assert_unsafe_precondition!([T](dst: *mut T) => is_aligned_and_not_null(dst)); + assert_unsafe_precondition!( + "ptr::replace requires that the pointer argument is aligned and non-null", + [T](dst: *mut T) => is_aligned_and_not_null(dst) + ); mem::swap(&mut *dst, &mut src); // cannot overlap } src @@ -1114,6 +1132,10 @@ pub const unsafe fn read<T>(src: *const T) -> T { // Also, since we just wrote a valid value into `tmp`, it is guaranteed // to be properly initialized. unsafe { + assert_unsafe_precondition!( + "ptr::read requires that the pointer argument is aligned and non-null", + [T](src: *const T) => is_aligned_and_not_null(src) + ); copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); tmp.assume_init() } @@ -1307,6 +1329,10 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) { // `dst` cannot overlap `src` because the caller has mutable access // to `dst` while `src` is owned by this function. unsafe { + assert_unsafe_precondition!( + "ptr::write requires that the pointer argument is aligned and non-null", + [T](dst: *mut T) => is_aligned_and_not_null(dst) + ); copy_nonoverlapping(&src as *const T, dst, 1); intrinsics::forget(src); } @@ -1470,7 +1496,10 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) { pub unsafe fn read_volatile<T>(src: *const T) -> T { // SAFETY: the caller must uphold the safety contract for `volatile_load`. unsafe { - assert_unsafe_precondition!([T](src: *const T) => is_aligned_and_not_null(src)); + assert_unsafe_precondition!( + "ptr::read_volatile requires that the pointer argument is aligned and non-null", + [T](src: *const T) => is_aligned_and_not_null(src) + ); intrinsics::volatile_load(src) } } @@ -1541,7 +1570,10 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T { pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { // SAFETY: the caller must uphold the safety contract for `volatile_store`. unsafe { - assert_unsafe_precondition!([T](dst: *mut T) => is_aligned_and_not_null(dst)); + assert_unsafe_precondition!( + "ptr::write_volatile requires that the pointer argument is aligned and non-null", + [T](dst: *mut T) => is_aligned_and_not_null(dst) + ); intrinsics::volatile_store(dst, src); } } @@ -1728,6 +1760,12 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// by their address rather than comparing the values they point to /// (which is what the `PartialEq for &T` implementation does). /// +/// When comparing wide pointers, both the address and the metadata are tested for equality. +/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers +/// to values of the same underlying type can compare inequal (because vtables are duplicated in +/// multiple codegen units), and pointers to values of *different* underlying type can compare equal +/// (since identical vtables can be deduplicated within a codegen unit). +/// /// # Examples /// /// ``` @@ -1754,41 +1792,6 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// assert!(!std::ptr::eq(&a[..2], &a[..3])); /// assert!(!std::ptr::eq(&a[0..2], &a[1..3])); /// ``` -/// -/// Traits are also compared by their implementation: -/// -/// ``` -/// #[repr(transparent)] -/// struct Wrapper { member: i32 } -/// -/// trait Trait {} -/// impl Trait for Wrapper {} -/// impl Trait for i32 {} -/// -/// let wrapper = Wrapper { member: 10 }; -/// -/// // Pointers have equal addresses. -/// assert!(std::ptr::eq( -/// &wrapper as *const Wrapper as *const u8, -/// &wrapper.member as *const i32 as *const u8 -/// )); -/// -/// // Objects have equal addresses, but `Trait` has different implementations. -/// assert!(!std::ptr::eq( -/// &wrapper as &dyn Trait, -/// &wrapper.member as &dyn Trait, -/// )); -/// assert!(!std::ptr::eq( -/// &wrapper as &dyn Trait as *const dyn Trait, -/// &wrapper.member as &dyn Trait as *const dyn Trait, -/// )); -/// -/// // Converting the reference to a `*const u8` compares by address. -/// assert!(std::ptr::eq( -/// &wrapper as &dyn Trait as *const dyn Trait as *const u8, -/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8, -/// )); -/// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { @@ -1856,9 +1859,16 @@ macro_rules! maybe_fnptr_doc { // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { + fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* } + }; + (@c_unwind $FnTy: ty, $($Arg: ident),*) => { + #[cfg(not(bootstrap))] + fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* } + }; + (#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> PartialEq for $FnTy { #[inline] fn eq(&self, other: &Self) -> bool { @@ -1869,13 +1879,13 @@ macro_rules! fnptr_impls_safety_abi { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> Eq for $FnTy {} } maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> PartialOrd for $FnTy { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { @@ -1886,7 +1896,7 @@ macro_rules! fnptr_impls_safety_abi { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> Ord for $FnTy { #[inline] fn cmp(&self, other: &Self) -> Ordering { @@ -1897,7 +1907,7 @@ macro_rules! fnptr_impls_safety_abi { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> hash::Hash for $FnTy { fn hash<HH: hash::Hasher>(&self, state: &mut HH) { state.write_usize(*self as usize) @@ -1907,7 +1917,7 @@ macro_rules! fnptr_impls_safety_abi { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::pointer_fmt_inner(*self as usize, f) @@ -1917,7 +1927,7 @@ macro_rules! fnptr_impls_safety_abi { maybe_fnptr_doc! { $($Arg)* @ - #[stable(feature = "fnptr_impls", since = "1.4.0")] + #[$meta] impl<Ret, $($Arg),*> fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::pointer_fmt_inner(*self as usize, f) @@ -1932,16 +1942,22 @@ macro_rules! fnptr_impls_args { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ } + fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } }; () => { // No variadic functions with 0 parameters fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } + fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } + fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, } }; } diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index e277b8181..6764002bc 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -80,10 +80,14 @@ impl<T: ?Sized> *mut T { #[unstable(feature = "set_ptr_value", issue = "75091")] #[must_use = "returns a new pointer rather than modifying its argument"] #[inline] - pub fn with_metadata_of<U>(self, mut val: *mut U) -> *mut U + pub fn with_metadata_of<U>(self, val: *const U) -> *mut U where U: ?Sized, { + // Prepare in the type system that we will replace the pointer value with a mutable + // pointer, taking the mutable provenance from the `self` pointer. + let mut val = val as *mut U; + // Pointer to the pointer value within the value. let target = &mut val as *mut *mut U as *mut *mut u8; // SAFETY: In case of a thin pointer, this operations is identical // to a simple assignment. In case of a fat pointer, with the current @@ -584,7 +588,6 @@ impl<T: ?Sized> *mut T { /// /// For non-`Sized` pointees this operation changes only the data pointer, /// leaving the metadata untouched. - #[cfg(not(bootstrap))] #[unstable(feature = "ptr_mask", issue = "98290")] #[must_use = "returns a new pointer rather than modifying its argument"] #[inline(always)] @@ -727,7 +730,7 @@ impl<T: ?Sized> *mut T { /// Returns whether two pointers are guaranteed to be inequal. /// - /// At runtime this function behaves like `Some(self == other)`. + /// At runtime this function behaves like `Some(self != other)`. /// However, in some contexts (e.g., compile-time evaluation), /// it is not always possible to determine inequality of two pointers, so this function may /// spuriously return `None` for pointers that later actually turn out to have its inequality known. @@ -868,7 +871,7 @@ impl<T: ?Sized> *mut T { /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This computes the same value that [`offset_from`](#method.offset_from) - /// would compute, but with the added precondition that that the offset is + /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index f3ef094cb..c18264d13 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -2,6 +2,7 @@ use crate::cmp::Ordering; use crate::convert::From; use crate::fmt; use crate::hash; +use crate::intrinsics::assert_unsafe_precondition; use crate::marker::Unsize; use crate::mem::{self, MaybeUninit}; use crate::num::NonZeroUsize; @@ -195,7 +196,10 @@ impl<T: ?Sized> NonNull<T> { #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { // SAFETY: the caller must guarantee that `ptr` is non-null. - unsafe { NonNull { pointer: ptr as _ } } + unsafe { + assert_unsafe_precondition!("NonNull::new_unchecked requires that the pointer is non-null", [T: ?Sized](ptr: *mut T) => !ptr.is_null()); + NonNull { pointer: ptr as _ } + } } /// Creates a new `NonNull` if `ptr` is non-null. diff --git a/library/core/src/result.rs b/library/core/src/result.rs index 76eaa191f..3f33c5fd6 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -548,22 +548,25 @@ impl<T, E> Result<T, E> { /// # Examples /// /// ``` - /// #![feature(is_some_with)] + /// #![feature(is_some_and)] /// /// let x: Result<u32, &str> = Ok(2); - /// assert_eq!(x.is_ok_and(|&x| x > 1), true); + /// assert_eq!(x.is_ok_and(|x| x > 1), true); /// /// let x: Result<u32, &str> = Ok(0); - /// assert_eq!(x.is_ok_and(|&x| x > 1), false); + /// assert_eq!(x.is_ok_and(|x| x > 1), false); /// /// let x: Result<u32, &str> = Err("hey"); - /// assert_eq!(x.is_ok_and(|&x| x > 1), false); + /// assert_eq!(x.is_ok_and(|x| x > 1), false); /// ``` #[must_use] #[inline] - #[unstable(feature = "is_some_with", issue = "93050")] - pub fn is_ok_and(&self, f: impl FnOnce(&T) -> bool) -> bool { - matches!(self, Ok(x) if f(x)) + #[unstable(feature = "is_some_and", issue = "93050")] + pub fn is_ok_and(self, f: impl FnOnce(T) -> bool) -> bool { + match self { + Err(_) => false, + Ok(x) => f(x), + } } /// Returns `true` if the result is [`Err`]. @@ -592,7 +595,7 @@ impl<T, E> Result<T, E> { /// # Examples /// /// ``` - /// #![feature(is_some_with)] + /// #![feature(is_some_and)] /// use std::io::{Error, ErrorKind}; /// /// let x: Result<u32, Error> = Err(Error::new(ErrorKind::NotFound, "!")); @@ -606,9 +609,12 @@ impl<T, E> Result<T, E> { /// ``` #[must_use] #[inline] - #[unstable(feature = "is_some_with", issue = "93050")] - pub fn is_err_and(&self, f: impl FnOnce(&E) -> bool) -> bool { - matches!(self, Err(x) if f(x)) + #[unstable(feature = "is_some_and", issue = "93050")] + pub fn is_err_and(self, f: impl FnOnce(E) -> bool) -> bool { + match self { + Ok(_) => false, + Err(e) => f(e), + } } ///////////////////////////////////////////////////////////////////////// @@ -2066,9 +2072,6 @@ impl<A, E, V: FromIterator<A>> FromIterator<Result<A, E>> for Result<V, E> { /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16. #[inline] fn from_iter<I: IntoIterator<Item = Result<A, E>>>(iter: I) -> Result<V, E> { - // FIXME(#11084): This could be replaced with Iterator::scan when this - // performance bug is closed. - iter::try_process(iter.into_iter(), |i| i.collect()) } } @@ -2116,6 +2119,7 @@ impl<T, E, F: From<E>> ops::FromResidual<ops::Yeet<E>> for Result<T, F> { } #[unstable(feature = "try_trait_v2_residual", issue = "91285")] -impl<T, E> ops::Residual<T> for Result<convert::Infallible, E> { +#[rustc_const_unstable(feature = "const_try", issue = "74935")] +impl<T, E> const ops::Residual<T> for Result<convert::Infallible, E> { type TryType = Result<T, E>; } diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs index 3403a5a86..6d2f7330d 100644 --- a/library/core/src/slice/index.rs +++ b/library/core/src/slice/index.rs @@ -139,6 +139,8 @@ mod private_slice_index { impl Sealed for ops::RangeToInclusive<usize> {} #[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")] impl Sealed for (ops::Bound<usize>, ops::Bound<usize>) {} + + impl Sealed for ops::IndexRange {} } /// A helper trait used for indexing operations. @@ -158,6 +160,7 @@ mod private_slice_index { message = "the type `{T}` cannot be indexed by `{Self}`", label = "slice indices are of type `usize` or ranges of `usize`" )] +#[const_trait] pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed { /// The output type returned by methods. #[stable(feature = "slice_get_slice", since = "1.28.0")] @@ -229,7 +232,10 @@ unsafe impl<T> const SliceIndex<[T]> for usize { // `self` is in bounds of `slice` so `self` cannot overflow an `isize`, // so the call to `add` is safe. unsafe { - assert_unsafe_precondition!([T](this: usize, slice: *const [T]) => this < slice.len()); + assert_unsafe_precondition!( + "slice::get_unchecked requires that the index is within the slice", + [T](this: usize, slice: *const [T]) => this < slice.len() + ); slice.as_ptr().add(self) } } @@ -239,7 +245,10 @@ unsafe impl<T> const SliceIndex<[T]> for usize { let this = self; // SAFETY: see comments for `get_unchecked` above. unsafe { - assert_unsafe_precondition!([T](this: usize, slice: *mut [T]) => this < slice.len()); + assert_unsafe_precondition!( + "slice::get_unchecked_mut requires that the index is within the slice", + [T](this: usize, slice: *mut [T]) => this < slice.len() + ); slice.as_mut_ptr().add(self) } } @@ -257,6 +266,83 @@ unsafe impl<T> const SliceIndex<[T]> for usize { } } +/// Because `IndexRange` guarantees `start <= end`, fewer checks are needed here +/// than there are for a general `Range<usize>` (which might be `100..3`). +#[rustc_const_unstable(feature = "const_index_range_slice_index", issue = "none")] +unsafe impl<T> const SliceIndex<[T]> for ops::IndexRange { + type Output = [T]; + + #[inline] + fn get(self, slice: &[T]) -> Option<&[T]> { + if self.end() <= slice.len() { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { Some(&*self.get_unchecked(slice)) } + } else { + None + } + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + if self.end() <= slice.len() { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { Some(&mut *self.get_unchecked_mut(slice)) } + } else { + None + } + } + + #[inline] + unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] { + let end = self.end(); + // SAFETY: the caller guarantees that `slice` is not dangling, so it + // cannot be longer than `isize::MAX`. They also guarantee that + // `self` is in bounds of `slice` so `self` cannot overflow an `isize`, + // so the call to `add` is safe. + + unsafe { + assert_unsafe_precondition!( + "slice::get_unchecked requires that the index is within the slice", + [T](end: usize, slice: *const [T]) => end <= slice.len() + ); + ptr::slice_from_raw_parts(slice.as_ptr().add(self.start()), self.len()) + } + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] { + let end = self.end(); + // SAFETY: see comments for `get_unchecked` above. + unsafe { + assert_unsafe_precondition!( + "slice::get_unchecked_mut requires that the index is within the slice", + [T](end: usize, slice: *mut [T]) => end <= slice.len() + ); + ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start()), self.len()) + } + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + if self.end() <= slice.len() { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { &*self.get_unchecked(slice) } + } else { + slice_end_index_len_fail(self.end(), slice.len()) + } + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + if self.end() <= slice.len() { + // SAFETY: `self` is checked to be valid and in bounds above. + unsafe { &mut *self.get_unchecked_mut(slice) } + } else { + slice_end_index_len_fail(self.end(), slice.len()) + } + } +} + #[stable(feature = "slice_get_slice_impls", since = "1.15.0")] #[rustc_const_unstable(feature = "const_slice_index", issue = "none")] unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> { @@ -291,8 +377,11 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> { // so the call to `add` is safe. unsafe { - assert_unsafe_precondition!([T](this: ops::Range<usize>, slice: *const [T]) => - this.end >= this.start && this.end <= slice.len()); + assert_unsafe_precondition!( + "slice::get_unchecked requires that the range is within the slice", + [T](this: ops::Range<usize>, slice: *const [T]) => + this.end >= this.start && this.end <= slice.len() + ); ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) } } @@ -302,8 +391,11 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> { let this = ops::Range { start: self.start, end: self.end }; // SAFETY: see comments for `get_unchecked` above. unsafe { - assert_unsafe_precondition!([T](this: ops::Range<usize>, slice: *mut [T]) => - this.end >= this.start && this.end <= slice.len()); + assert_unsafe_precondition!( + "slice::get_unchecked_mut requires that the range is within the slice", + [T](this: ops::Range<usize>, slice: *mut [T]) => + this.end >= this.start && this.end <= slice.len() + ); ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start) } } diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 395c56784..8a8962828 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -9,7 +9,7 @@ use crate::fmt; use crate::intrinsics::{assume, exact_div, unchecked_sub}; use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use crate::marker::{PhantomData, Send, Sized, Sync}; -use crate::mem; +use crate::mem::{self, SizedTypeProperties}; use crate::num::NonZeroUsize; use crate::ptr::NonNull; @@ -91,11 +91,8 @@ impl<'a, T> Iter<'a, T> { unsafe { assume(!ptr.is_null()); - let end = if mem::size_of::<T>() == 0 { - ptr.wrapping_byte_add(slice.len()) - } else { - ptr.add(slice.len()) - }; + let end = + if T::IS_ZST { ptr.wrapping_byte_add(slice.len()) } else { ptr.add(slice.len()) }; Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData } } @@ -127,6 +124,7 @@ impl<'a, T> Iter<'a, T> { /// ``` #[must_use] #[stable(feature = "iter_to_slice", since = "1.4.0")] + #[inline] pub fn as_slice(&self) -> &'a [T] { self.make_slice() } @@ -146,6 +144,7 @@ iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, { #[stable(feature = "rust1", since = "1.0.0")] impl<T> Clone for Iter<'_, T> { + #[inline] fn clone(&self) -> Self { Iter { ptr: self.ptr, end: self.end, _marker: self._marker } } @@ -153,6 +152,7 @@ impl<T> Clone for Iter<'_, T> { #[stable(feature = "slice_iter_as_ref", since = "1.13.0")] impl<T> AsRef<[T]> for Iter<'_, T> { + #[inline] fn as_ref(&self) -> &[T] { self.as_slice() } @@ -227,11 +227,8 @@ impl<'a, T> IterMut<'a, T> { unsafe { assume(!ptr.is_null()); - let end = if mem::size_of::<T>() == 0 { - ptr.wrapping_byte_add(slice.len()) - } else { - ptr.add(slice.len()) - }; + let end = + if T::IS_ZST { ptr.wrapping_byte_add(slice.len()) } else { ptr.add(slice.len()) }; Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData } } @@ -303,6 +300,7 @@ impl<'a, T> IterMut<'a, T> { /// ``` #[must_use] #[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")] + #[inline] pub fn as_slice(&self) -> &[T] { self.make_slice() } @@ -351,6 +349,7 @@ impl<'a, T> IterMut<'a, T> { #[stable(feature = "slice_iter_mut_as_slice", since = "1.53.0")] impl<T> AsRef<[T]> for IterMut<'_, T> { + #[inline] fn as_ref(&self) -> &[T] { self.as_slice() } diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 6c9e7574e..ce51d48e3 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -100,7 +100,7 @@ macro_rules! iterator { // Unsafe because the offset must not exceed `self.len()`. #[inline(always)] unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T { - if mem::size_of::<T>() == 0 { + if T::IS_ZST { zst_shrink!(self, offset); self.ptr.as_ptr() } else { @@ -140,7 +140,7 @@ macro_rules! iterator { // since we check if the iterator is empty first. unsafe { assume(!self.ptr.as_ptr().is_null()); - if mem::size_of::<T>() != 0 { + if !<T>::IS_ZST { assume(!self.end.is_null()); } if is_empty!(self) { @@ -166,7 +166,7 @@ macro_rules! iterator { fn nth(&mut self, n: usize) -> Option<$elem> { if n >= len!(self) { // This iterator is now empty. - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // We have to do it this way as `ptr` may never be 0, but `end` // could be (due to wrapping). self.end = self.ptr.as_ptr(); @@ -355,7 +355,7 @@ macro_rules! iterator { // empty first. unsafe { assume(!self.ptr.as_ptr().is_null()); - if mem::size_of::<T>() != 0 { + if !<T>::IS_ZST { assume(!self.end.is_null()); } if is_empty!(self) { diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index 7de1f48e6..c848c2e18 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -141,8 +141,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { - let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); - let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); + let u = *(ptr.add(offset - 2 * chunk_bytes) as *const Chunk); + let v = *(ptr.add(offset - chunk_bytes) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 6a7150d29..4f1bb1734 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -9,7 +9,7 @@ use crate::cmp::Ordering::{self, Greater, Less}; use crate::intrinsics::{assert_unsafe_precondition, exact_div}; use crate::marker::Copy; -use crate::mem; +use crate::mem::{self, SizedTypeProperties}; use crate::num::NonZeroUsize; use crate::ops::{Bound, FnMut, OneSidedRange, Range, RangeBounds}; use crate::option::Option; @@ -123,18 +123,11 @@ impl<T> [T] { #[lang = "slice_len_fn"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")] + #[rustc_allow_const_fn_unstable(ptr_metadata)] #[inline] #[must_use] - // SAFETY: const sound because we transmute out the length field as a usize (which it must be) pub const fn len(&self) -> usize { - // FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable. - // As of this writing this causes a "Const-stable functions can only call other - // const-stable functions" error. - - // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T - // and PtrComponents<T> have the same memory layouts. Only std can make this - // guarantee. - unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata } + ptr::metadata(self) } /// Returns `true` if the slice has a length of 0. @@ -660,7 +653,10 @@ impl<T> [T] { let ptr = this.as_mut_ptr(); // SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()` unsafe { - assert_unsafe_precondition!([T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len()); + assert_unsafe_precondition!( + "slice::swap_unchecked requires that the indices are within the slice", + [T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len() + ); ptr::swap(ptr.add(a), ptr.add(b)); } } @@ -976,7 +972,10 @@ impl<T> [T] { let this = self; // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length let new_len = unsafe { - assert_unsafe_precondition!([T](this: &[T], N: usize) => N != 0 && this.len() % N == 0); + assert_unsafe_precondition!( + "slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks", + [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0 + ); exact_div(self.len(), N) }; // SAFETY: We cast a slice of `new_len * N` elements into @@ -1116,7 +1115,10 @@ impl<T> [T] { let this = &*self; // SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length let new_len = unsafe { - assert_unsafe_precondition!([T](this: &[T], N: usize) => N != 0 && this.len() % N == 0); + assert_unsafe_precondition!( + "slice::as_chunks_unchecked_mut requires `N != 0` and the slice to split exactly into `N`-element chunks", + [T](this: &[T], N: usize) => N != 0 && this.len() % N == 0 + ); exact_div(this.len(), N) }; // SAFETY: We cast a slice of `new_len * N` elements into @@ -1580,7 +1582,8 @@ impl<T> [T] { #[inline] #[track_caller] #[must_use] - pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { + #[rustc_const_unstable(feature = "const_slice_split_at_mut", issue = "101804")] + pub const fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { assert!(mid <= self.len()); // SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which // fulfills the requirements of `from_raw_parts_mut`. @@ -1679,9 +1682,10 @@ impl<T> [T] { /// assert_eq!(v, [1, 2, 3, 4, 5, 6]); /// ``` #[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")] + #[rustc_const_unstable(feature = "const_slice_split_at_mut", issue = "101804")] #[inline] #[must_use] - pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) { + pub const unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) { let len = self.len(); let ptr = self.as_mut_ptr(); @@ -1690,7 +1694,10 @@ impl<T> [T] { // `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference // is fine. unsafe { - assert_unsafe_precondition!((mid: usize, len: usize) => mid <= len); + assert_unsafe_precondition!( + "slice::split_at_mut_unchecked requires the index to be within the slice", + (mid: usize, len: usize) => mid <= len + ); (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) } } @@ -2074,7 +2081,7 @@ impl<T> [T] { SplitN::new(self.split(pred), n) } - /// Returns an iterator over subslices separated by elements that match + /// Returns an iterator over mutable subslices separated by elements that match /// `pred`, limited to returning at most `n` items. The matched element is /// not contained in the subslices. /// @@ -2357,6 +2364,28 @@ impl<T> [T] { /// assert!(match r { Ok(1..=4) => true, _ => false, }); /// ``` /// + /// If you want to find that whole *range* of matching items, rather than + /// an arbitrary matching one, that can be done using [`partition_point`]: + /// ``` + /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; + /// + /// let low = s.partition_point(|x| x < &1); + /// assert_eq!(low, 1); + /// let high = s.partition_point(|x| x <= &1); + /// assert_eq!(high, 5); + /// let r = s.binary_search(&1); + /// assert!((low..high).contains(&r.unwrap())); + /// + /// assert!(s[..low].iter().all(|&x| x < 1)); + /// assert!(s[low..high].iter().all(|&x| x == 1)); + /// assert!(s[high..].iter().all(|&x| x > 1)); + /// + /// // For something not found, the "range" of equal items is empty + /// assert_eq!(s.partition_point(|x| x < &11), 9); + /// assert_eq!(s.partition_point(|x| x <= &11), 9); + /// assert_eq!(s.binary_search(&11), Err(9)); + /// ``` + /// /// If you want to insert an item to a sorted vector, while maintaining /// sort order, consider using [`partition_point`]: /// @@ -2424,15 +2453,20 @@ impl<T> [T] { where F: FnMut(&'a T) -> Ordering, { + // INVARIANTS: + // - 0 <= left <= left + size = right <= self.len() + // - f returns Less for everything in self[..left] + // - f returns Greater for everything in self[right..] let mut size = self.len(); let mut left = 0; let mut right = size; while left < right { let mid = left + size / 2; - // SAFETY: the call is made safe by the following invariants: - // - `mid >= 0` - // - `mid < size`: `mid` is limited by `[left; right)` bound. + // SAFETY: the while condition means `size` is strictly positive, so + // `size/2 < size`. Thus `left + size/2 < left + size`, which + // coupled with the `left + size <= self.len()` invariant means + // we have `left + size/2 < self.len()`, and this is in-bounds. let cmp = f(unsafe { self.get_unchecked(mid) }); // The reason why we use if/else control flow rather than match @@ -2450,6 +2484,10 @@ impl<T> [T] { size = right - left; } + + // SAFETY: directly true from the overall invariant. + // Note that this is `<=`, unlike the assume in the `Ok` path. + unsafe { crate::intrinsics::assume(left <= self.len()) }; Err(left) } @@ -2540,7 +2578,7 @@ impl<T> [T] { where T: Ord, { - sort::quicksort(self, |a, b| a.lt(b)); + sort::quicksort(self, T::lt); } /// Sorts the slice with a comparator function, but might not preserve the order of equal @@ -2643,9 +2681,10 @@ impl<T> [T] { /// less than or equal to any value at a position `j > index`. Additionally, this reordering is /// unstable (i.e. any number of equal elements may end up at position `index`), in-place /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth - /// element" in other libraries. It returns a triplet of the following values: all elements less - /// than the one at the given index, the value at the given index, and all elements greater than - /// the one at the given index. + /// element" in other libraries. It returns a triplet of the following from the reordered slice: + /// the subslice prior to `index`, the element at `index`, and the subslice after `index`; + /// accordingly, the values in those two subslices will respectively all be less-than-or-equal-to + /// and greater-than-or-equal-to the value of the element at `index`. /// /// # Current implementation /// @@ -2679,8 +2718,7 @@ impl<T> [T] { where T: Ord, { - let mut f = |a: &T, b: &T| a.lt(b); - sort::partition_at_index(self, index, &mut f) + sort::partition_at_index(self, index, T::lt) } /// Reorder the slice with a comparator function such that the element at `index` is at its @@ -2690,10 +2728,11 @@ impl<T> [T] { /// less than or equal to any value at a position `j > index` using the comparator function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following - /// values: all elements less than the one at the given index, the value at the given index, - /// and all elements greater than the one at the given index, using the provided comparator - /// function. + /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// the slice reordered according to the provided comparator function: the subslice prior to + /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in + /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to + /// the value of the element at `index`. /// /// # Current implementation /// @@ -2731,8 +2770,7 @@ impl<T> [T] { where F: FnMut(&T, &T) -> Ordering, { - let mut f = |a: &T, b: &T| compare(a, b) == Less; - sort::partition_at_index(self, index, &mut f) + sort::partition_at_index(self, index, |a: &T, b: &T| compare(a, b) == Less) } /// Reorder the slice with a key extraction function such that the element at `index` is at its @@ -2742,10 +2780,11 @@ impl<T> [T] { /// less than or equal to any value at a position `j > index` using the key extraction function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following - /// values: all elements less than the one at the given index, the value at the given index, and - /// all elements greater than the one at the given index, using the provided key extraction - /// function. + /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// the slice reordered according to the provided key extraction function: the subslice prior to + /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in + /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to + /// the value of the element at `index`. /// /// # Current implementation /// @@ -2784,8 +2823,7 @@ impl<T> [T] { F: FnMut(&T) -> K, K: Ord, { - let mut g = |a: &T, b: &T| f(a).lt(&f(b)); - sort::partition_at_index(self, index, &mut g) + sort::partition_at_index(self, index, |a: &T, b: &T| f(a).lt(&f(b))) } /// Moves all consecutive repeated elements to the end of the slice according to the @@ -3459,7 +3497,7 @@ impl<T> [T] { #[must_use] pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) { // Note that most of this function will be constant-evaluated, - if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 { + if U::IS_ZST || T::IS_ZST { // handle ZSTs specially, which is – don't handle them at all. return (self, &[], &[]); } @@ -3520,7 +3558,7 @@ impl<T> [T] { #[must_use] pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) { // Note that most of this function will be constant-evaluated, - if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 { + if U::IS_ZST || T::IS_ZST { // handle ZSTs specially, which is – don't handle them at all. return (self, &mut [], &mut []); } @@ -3776,6 +3814,16 @@ impl<T> [T] { /// assert!(v[i..].iter().all(|&x| !(x < 5))); /// ``` /// + /// If all elements of the slice match the predicate, including if the slice + /// is empty, then the length of the slice will be returned: + /// + /// ``` + /// let a = [2, 4, 8]; + /// assert_eq!(a.partition_point(|x| x < &100), a.len()); + /// let a: [i32; 0] = []; + /// assert_eq!(a.partition_point(|x| x < &100), 0); + /// ``` + /// /// If you want to insert an item to a sorted vector, while maintaining /// sort order: /// @@ -4066,7 +4114,7 @@ impl<T, const N: usize> [[T; N]] { /// ``` #[unstable(feature = "slice_flatten", issue = "95629")] pub fn flatten(&self) -> &[T] { - let len = if crate::mem::size_of::<T>() == 0 { + let len = if T::IS_ZST { self.len().checked_mul(N).expect("slice len overflow") } else { // SAFETY: `self.len() * N` cannot overflow because `self` is @@ -4104,7 +4152,7 @@ impl<T, const N: usize> [[T; N]] { /// ``` #[unstable(feature = "slice_flatten", issue = "95629")] pub fn flatten_mut(&mut self) -> &mut [T] { - let len = if crate::mem::size_of::<T>() == 0 { + let len = if T::IS_ZST { self.len().checked_mul(N).expect("slice len overflow") } else { // SAFETY: `self.len() * N` cannot overflow because `self` is diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs index f1e8bc79b..052fd34d0 100644 --- a/library/core/src/slice/raw.rs +++ b/library/core/src/slice/raw.rs @@ -1,7 +1,9 @@ //! Free functions to create `&[T]` and `&mut [T]`. use crate::array; -use crate::intrinsics::{assert_unsafe_precondition, is_aligned_and_not_null}; +use crate::intrinsics::{ + assert_unsafe_precondition, is_aligned_and_not_null, is_valid_allocation_size, +}; use crate::ops::Range; use crate::ptr; @@ -90,9 +92,10 @@ use crate::ptr; pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] { // SAFETY: the caller must uphold the safety contract for `from_raw_parts`. unsafe { - assert_unsafe_precondition!([T](data: *const T, len: usize) => - is_aligned_and_not_null(data) - && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize + assert_unsafe_precondition!( + "slice::from_raw_parts requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`", + [T](data: *const T, len: usize) => is_aligned_and_not_null(data) + && is_valid_allocation_size::<T>(len) ); &*ptr::slice_from_raw_parts(data, len) } @@ -134,9 +137,10 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] { // SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`. unsafe { - assert_unsafe_precondition!([T](data: *mut T, len: usize) => - is_aligned_and_not_null(data) - && crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize + assert_unsafe_precondition!( + "slice::from_raw_parts_mut requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`", + [T](data: *mut T, len: usize) => is_aligned_and_not_null(data) + && is_valid_allocation_size::<T>(len) ); &mut *ptr::slice_from_raw_parts_mut(data, len) } @@ -188,6 +192,10 @@ pub const fn from_mut<T>(s: &mut T) -> &mut [T] { /// /// Note that a range created from [`slice::as_ptr_range`] fulfills these requirements. /// +/// # Panics +/// +/// This function panics if `T` is a Zero-Sized Type (“ZST”). +/// /// # Caveat /// /// The lifetime for the returned slice is inferred from its usage. To @@ -219,9 +227,15 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] { unsafe { from_raw_parts(range.start, range.end.sub_ptr(range.start)) } } -/// Performs the same functionality as [`from_ptr_range`], except that a +/// Forms a mutable slice from a pointer range. +/// +/// This is the same functionality as [`from_ptr_range`], except that a /// mutable slice is returned. /// +/// This function is useful for interacting with foreign interfaces which +/// use two pointers to refer to a range of elements in memory, as is +/// common in C++. +/// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: @@ -247,6 +261,18 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] { /// /// Note that a range created from [`slice::as_mut_ptr_range`] fulfills these requirements. /// +/// # Panics +/// +/// This function panics if `T` is a Zero-Sized Type (“ZST”). +/// +/// # Caveat +/// +/// The lifetime for the returned slice is inferred from its usage. To +/// prevent accidental misuse, it's suggested to tie the lifetime to whichever +/// source lifetime is safe in the context, such as by providing a helper +/// function taking the lifetime of a host value for the slice, or by explicit +/// annotation. +/// /// # Examples /// /// ``` diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs index 4589c6c0f..fa8c238f8 100644 --- a/library/core/src/slice/rotate.rs +++ b/library/core/src/slice/rotate.rs @@ -1,5 +1,5 @@ use crate::cmp; -use crate::mem::{self, MaybeUninit}; +use crate::mem::{self, MaybeUninit, SizedTypeProperties}; use crate::ptr; /// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first @@ -63,7 +63,7 @@ use crate::ptr; /// when `left < right` the swapping happens from the left instead. pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize) { type BufType = [usize; 32]; - if mem::size_of::<T>() == 0 { + if T::IS_ZST { return; } loop { diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index c6c03c0b0..87f77b7f2 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -7,7 +7,7 @@ //! stable sorting implementation. use crate::cmp; -use crate::mem::{self, MaybeUninit}; +use crate::mem::{self, MaybeUninit, SizedTypeProperties}; use crate::ptr; /// When dropped, copies from `src` into `dest`. @@ -813,7 +813,7 @@ where F: FnMut(&T, &T) -> bool, { // Sorting has no meaningful behavior on zero-sized types. - if mem::size_of::<T>() == 0 { + if T::IS_ZST { return; } @@ -898,7 +898,7 @@ where panic!("partition_at_index index {} greater than length of slice {}", index, v.len()); } - if mem::size_of::<T>() == 0 { + if T::IS_ZST { // Sorting has no meaningful behavior on zero-sized types. Do nothing. } else if index == v.len() - 1 { // Find max element and place it in the last position of the array. We're free to use diff --git a/library/core/src/str/error.rs b/library/core/src/str/error.rs index 343889b69..a11b5add4 100644 --- a/library/core/src/str/error.rs +++ b/library/core/src/str/error.rs @@ -1,6 +1,5 @@ //! Defines utf8 error type. -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt; @@ -124,7 +123,6 @@ impl fmt::Display for Utf8Error { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for Utf8Error { #[allow(deprecated)] @@ -148,7 +146,6 @@ impl fmt::Display for ParseBoolError { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl Error for ParseBoolError { #[allow(deprecated)] diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs index f673aa2a4..fbc0fc397 100644 --- a/library/core/src/str/mod.rs +++ b/library/core/src/str/mod.rs @@ -2642,5 +2642,4 @@ impl_fn_for_zst! { } #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(bootstrap))] impl !crate::error::Error for &str {} diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs index 031fb8e8b..ec2cb429e 100644 --- a/library/core/src/str/pattern.rs +++ b/library/core/src/str/pattern.rs @@ -267,7 +267,7 @@ pub unsafe trait Searcher<'a> { /// The index ranges returned by this trait are not required /// to exactly match those of the forward search in reverse. /// -/// For the reason why this trait is marked unsafe, see them +/// For the reason why this trait is marked unsafe, see the /// parent trait [`Searcher`]. pub unsafe trait ReverseSearcher<'a>: Searcher<'a> { /// Performs the next search step starting from the back. diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs index e9649fc91..d3ed811b1 100644 --- a/library/core/src/str/traits.rs +++ b/library/core/src/str/traits.rs @@ -507,7 +507,6 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> { /// /// ``` /// use std::str::FromStr; -/// use std::num::ParseIntError; /// /// #[derive(Debug, PartialEq)] /// struct Point { @@ -515,18 +514,21 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> { /// y: i32 /// } /// +/// #[derive(Debug, PartialEq, Eq)] +/// struct ParsePointError; +/// /// impl FromStr for Point { -/// type Err = ParseIntError; +/// type Err = ParsePointError; /// /// fn from_str(s: &str) -> Result<Self, Self::Err> { /// let (x, y) = s /// .strip_prefix('(') /// .and_then(|s| s.strip_suffix(')')) /// .and_then(|s| s.split_once(',')) -/// .unwrap(); +/// .ok_or(ParsePointError)?; /// -/// let x_fromstr = x.parse::<i32>()?; -/// let y_fromstr = y.parse::<i32>()?; +/// let x_fromstr = x.parse::<i32>().map_err(|_| ParsePointError)?; +/// let y_fromstr = y.parse::<i32>().map_err(|_| ParsePointError)?; /// /// Ok(Point { x: x_fromstr, y: y_fromstr }) /// } @@ -538,6 +540,8 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> { /// // Implicit calls, through parse /// assert_eq!("(1,2)".parse(), expected); /// assert_eq!("(1,2)".parse::<Point>(), expected); +/// // Invalid input string +/// assert!(Point::from_str("(1 2)").is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait FromStr: Sized { @@ -573,8 +577,8 @@ impl FromStr for bool { /// Parse a `bool` from a string. /// - /// Yields a `Result<bool, ParseBoolError>`, because `s` may or may not - /// actually be parseable. + /// The only accepted values are `"true"` and `"false"`. Any other input + /// will return an error. /// /// # Examples /// diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 3c96290fc..edc68d6fa 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -294,7 +294,7 @@ impl AtomicBool { /// ``` /// use std::sync::atomic::AtomicBool; /// - /// let atomic_true = AtomicBool::new(true); + /// let atomic_true = AtomicBool::new(true); /// let atomic_false = AtomicBool::new(false); /// ``` #[inline] @@ -955,6 +955,14 @@ impl AtomicBool { /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// + /// # Considerations + /// + /// This method is not magic; it is not provided by the hardware. + /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks. + /// In particular, this method will not circumvent the [ABA Problem]. + /// + /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem + /// /// # Examples /// /// ```rust @@ -1171,7 +1179,7 @@ impl<T> AtomicPtr<T> { /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(ptr); /// /// let value = some_ptr.load(Ordering::Relaxed); /// ``` @@ -1198,7 +1206,7 @@ impl<T> AtomicPtr<T> { /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// @@ -1230,7 +1238,7 @@ impl<T> AtomicPtr<T> { /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// @@ -1282,9 +1290,9 @@ impl<T> AtomicPtr<T> { /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(ptr); /// - /// let other_ptr = &mut 10; + /// let other_ptr = &mut 10; /// /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed); /// ``` @@ -1325,9 +1333,9 @@ impl<T> AtomicPtr<T> { /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(ptr); /// - /// let other_ptr = &mut 10; + /// let other_ptr = &mut 10; /// /// let value = some_ptr.compare_exchange(ptr, other_ptr, /// Ordering::SeqCst, Ordering::Relaxed); @@ -1422,6 +1430,14 @@ impl<T> AtomicPtr<T> { /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// + /// # Considerations + /// + /// This method is not magic; it is not provided by the hardware. + /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks. + /// In particular, this method will not circumvent the [ABA Problem]. + /// + /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem + /// /// # Examples /// /// ```rust @@ -1626,8 +1642,8 @@ impl<T> AtomicPtr<T> { /// and the argument `val`, and stores a pointer with provenance of the /// current pointer and the resulting address. /// - /// This is equivalent equivalent to using [`map_addr`] to atomically - /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged + /// This is equivalent to using [`map_addr`] to atomically perform + /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged /// pointer schemes to atomically set tag bits. /// /// **Caveat**: This operation returns the previous value. To compute the @@ -1677,8 +1693,8 @@ impl<T> AtomicPtr<T> { /// pointer, and the argument `val`, and stores a pointer with provenance of /// the current pointer and the resulting address. /// - /// This is equivalent equivalent to using [`map_addr`] to atomically - /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged + /// This is equivalent to using [`map_addr`] to atomically perform + /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged /// pointer schemes to atomically unset tag bits. /// /// **Caveat**: This operation returns the previous value. To compute the @@ -1727,8 +1743,8 @@ impl<T> AtomicPtr<T> { /// pointer, and the argument `val`, and stores a pointer with provenance of /// the current pointer and the resulting address. /// - /// This is equivalent equivalent to using [`map_addr`] to atomically - /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged + /// This is equivalent to using [`map_addr`] to atomically perform + /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged /// pointer schemes to atomically toggle tag bits. /// /// **Caveat**: This operation returns the previous value. To compute the @@ -2510,6 +2526,16 @@ macro_rules! atomic_int { /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// + /// # Considerations + /// + /// This method is not magic; it is not provided by the hardware. + /// It is implemented in terms of + #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange_weak`],")] + /// and suffers from the same drawbacks. + /// In particular, this method will not circumvent the [ABA Problem]. + /// + /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem + /// /// # Examples /// /// ```rust diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs index a7519ab5a..c65c27500 100644 --- a/library/core/src/sync/exclusive.rs +++ b/library/core/src/sync/exclusive.rs @@ -100,6 +100,7 @@ impl<T: Sized> Exclusive<T> { /// Wrap a value in an `Exclusive` #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn new(t: T) -> Self { Self { inner: t } } @@ -107,6 +108,7 @@ impl<T: Sized> Exclusive<T> { /// Unwrap the value contained in the `Exclusive` #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn into_inner(self) -> T { self.inner } @@ -116,6 +118,7 @@ impl<T: ?Sized> Exclusive<T> { /// Get exclusive access to the underlying value. #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn get_mut(&mut self) -> &mut T { &mut self.inner } @@ -128,6 +131,7 @@ impl<T: ?Sized> Exclusive<T> { /// produce _pinned_ access to the underlying value. #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { // SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned // `Pin::map_unchecked_mut` is not const, so we do this conversion manually @@ -139,6 +143,7 @@ impl<T: ?Sized> Exclusive<T> { /// building an `Exclusive` with [`Exclusive::new`]. #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn from_mut(r: &'_ mut T) -> &'_ mut Exclusive<T> { // SAFETY: repr is ≥ C, so refs have the same layout; and `Exclusive` properties are `&mut`-agnostic unsafe { &mut *(r as *mut T as *mut Exclusive<T>) } @@ -149,6 +154,7 @@ impl<T: ?Sized> Exclusive<T> { /// building an `Exclusive` with [`Exclusive::new`]. #[unstable(feature = "exclusive_wrapper", issue = "98407")] #[must_use] + #[inline] pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut Exclusive<T>> { // SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned // `Pin::map_unchecked_mut` is not const, so we do this conversion manually @@ -158,6 +164,7 @@ impl<T: ?Sized> Exclusive<T> { #[unstable(feature = "exclusive_wrapper", issue = "98407")] impl<T> From<T> for Exclusive<T> { + #[inline] fn from(t: T) -> Self { Self::new(t) } @@ -166,7 +173,7 @@ impl<T> From<T> for Exclusive<T> { #[unstable(feature = "exclusive_wrapper", issue = "98407")] impl<T: Future + ?Sized> Future for Exclusive<T> { type Output = T::Output; - + #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.get_pin_mut().poll(cx) } diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs index 60ecc9c0b..0cff972df 100644 --- a/library/core/src/task/wake.rs +++ b/library/core/src/task/wake.rs @@ -186,17 +186,19 @@ pub struct Context<'a> { impl<'a> Context<'a> { /// Create a new `Context` from a [`&Waker`](Waker). #[stable(feature = "futures_api", since = "1.36.0")] + #[rustc_const_unstable(feature = "const_waker", issue = "102012")] #[must_use] #[inline] - pub fn from_waker(waker: &'a Waker) -> Self { + pub const fn from_waker(waker: &'a Waker) -> Self { Context { waker, _marker: PhantomData } } /// Returns a reference to the [`Waker`] for the current task. #[stable(feature = "futures_api", since = "1.36.0")] + #[rustc_const_unstable(feature = "const_waker", issue = "102012")] #[must_use] #[inline] - pub fn waker(&self) -> &'a Waker { + pub const fn waker(&self) -> &'a Waker { &self.waker } } @@ -311,7 +313,8 @@ impl Waker { #[inline] #[must_use] #[stable(feature = "futures_api", since = "1.36.0")] - pub unsafe fn from_raw(waker: RawWaker) -> Waker { + #[rustc_const_unstable(feature = "const_waker", issue = "102012")] + pub const unsafe fn from_raw(waker: RawWaker) -> Waker { Waker { waker } } diff --git a/library/core/src/time.rs b/library/core/src/time.rs index 4f29ecc0f..ba1cb6efa 100644 --- a/library/core/src/time.rs +++ b/library/core/src/time.rs @@ -29,6 +29,20 @@ const NANOS_PER_MICRO: u32 = 1_000; const MILLIS_PER_SEC: u64 = 1_000; const MICROS_PER_SEC: u64 = 1_000_000; +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(0)] +#[rustc_layout_scalar_valid_range_end(999_999_999)] +struct Nanoseconds(u32); + +impl Default for Nanoseconds { + #[inline] + fn default() -> Self { + // SAFETY: 0 is within the valid range + unsafe { Nanoseconds(0) } + } +} + /// A `Duration` type to represent a span of time, typically used for system /// timeouts. /// @@ -71,7 +85,7 @@ const MICROS_PER_SEC: u64 = 1_000_000; #[cfg_attr(not(test), rustc_diagnostic_item = "Duration")] pub struct Duration { secs: u64, - nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC + nanos: Nanoseconds, // Always 0 <= nanos < NANOS_PER_SEC } impl Duration { @@ -188,7 +202,8 @@ impl Duration { None => panic!("overflow in Duration::new"), }; let nanos = nanos % NANOS_PER_SEC; - Duration { secs, nanos } + // SAFETY: nanos % NANOS_PER_SEC < NANOS_PER_SEC, therefore nanos is within the valid range + Duration { secs, nanos: unsafe { Nanoseconds(nanos) } } } /// Creates a new `Duration` from the specified number of whole seconds. @@ -208,7 +223,7 @@ impl Duration { #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] pub const fn from_secs(secs: u64) -> Duration { - Duration { secs, nanos: 0 } + Duration::new(secs, 0) } /// Creates a new `Duration` from the specified number of milliseconds. @@ -228,10 +243,7 @@ impl Duration { #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] pub const fn from_millis(millis: u64) -> Duration { - Duration { - secs: millis / MILLIS_PER_SEC, - nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI, - } + Duration::new(millis / MILLIS_PER_SEC, ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI) } /// Creates a new `Duration` from the specified number of microseconds. @@ -251,10 +263,7 @@ impl Duration { #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] pub const fn from_micros(micros: u64) -> Duration { - Duration { - secs: micros / MICROS_PER_SEC, - nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO, - } + Duration::new(micros / MICROS_PER_SEC, ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO) } /// Creates a new `Duration` from the specified number of nanoseconds. @@ -274,10 +283,7 @@ impl Duration { #[inline] #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")] pub const fn from_nanos(nanos: u64) -> Duration { - Duration { - secs: nanos / (NANOS_PER_SEC as u64), - nanos: (nanos % (NANOS_PER_SEC as u64)) as u32, - } + Duration::new(nanos / (NANOS_PER_SEC as u64), (nanos % (NANOS_PER_SEC as u64)) as u32) } /// Returns true if this `Duration` spans no time. @@ -301,7 +307,7 @@ impl Duration { #[rustc_const_stable(feature = "duration_zero", since = "1.53.0")] #[inline] pub const fn is_zero(&self) -> bool { - self.secs == 0 && self.nanos == 0 + self.secs == 0 && self.nanos.0 == 0 } /// Returns the number of _whole_ seconds contained by this `Duration`. @@ -352,7 +358,7 @@ impl Duration { #[must_use] #[inline] pub const fn subsec_millis(&self) -> u32 { - self.nanos / NANOS_PER_MILLI + self.nanos.0 / NANOS_PER_MILLI } /// Returns the fractional part of this `Duration`, in whole microseconds. @@ -375,7 +381,7 @@ impl Duration { #[must_use] #[inline] pub const fn subsec_micros(&self) -> u32 { - self.nanos / NANOS_PER_MICRO + self.nanos.0 / NANOS_PER_MICRO } /// Returns the fractional part of this `Duration`, in nanoseconds. @@ -398,7 +404,7 @@ impl Duration { #[must_use] #[inline] pub const fn subsec_nanos(&self) -> u32 { - self.nanos + self.nanos.0 } /// Returns the total number of whole milliseconds contained by this `Duration`. @@ -416,7 +422,7 @@ impl Duration { #[must_use] #[inline] pub const fn as_millis(&self) -> u128 { - self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128 + self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos.0 / NANOS_PER_MILLI) as u128 } /// Returns the total number of whole microseconds contained by this `Duration`. @@ -434,7 +440,7 @@ impl Duration { #[must_use] #[inline] pub const fn as_micros(&self) -> u128 { - self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128 + self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos.0 / NANOS_PER_MICRO) as u128 } /// Returns the total number of nanoseconds contained by this `Duration`. @@ -452,7 +458,7 @@ impl Duration { #[must_use] #[inline] pub const fn as_nanos(&self) -> u128 { - self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128 + self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.0 as u128 } /// Checked `Duration` addition. Computes `self + other`, returning [`None`] @@ -475,7 +481,7 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] pub const fn checked_add(self, rhs: Duration) -> Option<Duration> { if let Some(mut secs) = self.secs.checked_add(rhs.secs) { - let mut nanos = self.nanos + rhs.nanos; + let mut nanos = self.nanos.0 + rhs.nanos.0; if nanos >= NANOS_PER_SEC { nanos -= NANOS_PER_SEC; if let Some(new_secs) = secs.checked_add(1) { @@ -485,7 +491,7 @@ impl Duration { } } debug_assert!(nanos < NANOS_PER_SEC); - Some(Duration { secs, nanos }) + Some(Duration::new(secs, nanos)) } else { None } @@ -535,16 +541,16 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] pub const fn checked_sub(self, rhs: Duration) -> Option<Duration> { if let Some(mut secs) = self.secs.checked_sub(rhs.secs) { - let nanos = if self.nanos >= rhs.nanos { - self.nanos - rhs.nanos + let nanos = if self.nanos.0 >= rhs.nanos.0 { + self.nanos.0 - rhs.nanos.0 } else if let Some(sub_secs) = secs.checked_sub(1) { secs = sub_secs; - self.nanos + NANOS_PER_SEC - rhs.nanos + self.nanos.0 + NANOS_PER_SEC - rhs.nanos.0 } else { return None; }; debug_assert!(nanos < NANOS_PER_SEC); - Some(Duration { secs, nanos }) + Some(Duration::new(secs, nanos)) } else { None } @@ -593,13 +599,13 @@ impl Duration { #[rustc_const_stable(feature = "duration_consts_2", since = "1.58.0")] pub const fn checked_mul(self, rhs: u32) -> Option<Duration> { // Multiply nanoseconds as u64, because it cannot overflow that way. - let total_nanos = self.nanos as u64 * rhs as u64; + let total_nanos = self.nanos.0 as u64 * rhs as u64; let extra_secs = total_nanos / (NANOS_PER_SEC as u64); let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32; if let Some(s) = self.secs.checked_mul(rhs as u64) { if let Some(secs) = s.checked_add(extra_secs) { debug_assert!(nanos < NANOS_PER_SEC); - return Some(Duration { secs, nanos }); + return Some(Duration::new(secs, nanos)); } } None @@ -653,9 +659,9 @@ impl Duration { let secs = self.secs / (rhs as u64); let carry = self.secs - secs * (rhs as u64); let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64); - let nanos = self.nanos / rhs + (extra_nanos as u32); + let nanos = self.nanos.0 / rhs + (extra_nanos as u32); debug_assert!(nanos < NANOS_PER_SEC); - Some(Duration { secs, nanos }) + Some(Duration::new(secs, nanos)) } else { None } @@ -677,7 +683,7 @@ impl Duration { #[inline] #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")] pub const fn as_secs_f64(&self) -> f64 { - (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64) + (self.secs as f64) + (self.nanos.0 as f64) / (NANOS_PER_SEC as f64) } /// Returns the number of seconds contained by this `Duration` as `f32`. @@ -696,7 +702,7 @@ impl Duration { #[inline] #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")] pub const fn as_secs_f32(&self) -> f32 { - (self.secs as f32) + (self.nanos as f32) / (NANOS_PER_SEC as f32) + (self.secs as f32) + (self.nanos.0 as f32) / (NANOS_PER_SEC as f32) } /// Creates a new `Duration` from the specified number of seconds represented @@ -987,13 +993,13 @@ macro_rules! sum_durations { for entry in $iter { total_secs = total_secs.checked_add(entry.secs).expect("overflow in iter::sum over durations"); - total_nanos = match total_nanos.checked_add(entry.nanos as u64) { + total_nanos = match total_nanos.checked_add(entry.nanos.0 as u64) { Some(n) => n, None => { total_secs = total_secs .checked_add(total_nanos / NANOS_PER_SEC as u64) .expect("overflow in iter::sum over durations"); - (total_nanos % NANOS_PER_SEC as u64) + entry.nanos as u64 + (total_nanos % NANOS_PER_SEC as u64) + entry.nanos.0 as u64 } }; } @@ -1001,7 +1007,7 @@ macro_rules! sum_durations { .checked_add(total_nanos / NANOS_PER_SEC as u64) .expect("overflow in iter::sum over durations"); total_nanos = total_nanos % NANOS_PER_SEC as u64; - Duration { secs: total_secs, nanos: total_nanos as u32 } + Duration::new(total_secs, total_nanos as u32) }}; } @@ -1037,7 +1043,7 @@ impl fmt::Debug for Duration { /// to the formatter's `width`, if specified. fn fmt_decimal( f: &mut fmt::Formatter<'_>, - mut integer_part: u64, + integer_part: u64, mut fractional_part: u32, mut divisor: u32, prefix: &str, @@ -1069,7 +1075,7 @@ impl fmt::Debug for Duration { // normal floating point numbers. However, we only need to do work // when rounding up. This happens if the first digit of the // remaining ones is >= 5. - if fractional_part > 0 && fractional_part >= divisor * 5 { + let integer_part = if fractional_part > 0 && fractional_part >= divisor * 5 { // Round up the number contained in the buffer. We go through // the buffer backwards and keep track of the carry. let mut rev_pos = pos; @@ -1093,9 +1099,18 @@ impl fmt::Debug for Duration { // the whole buffer to '0's and need to increment the integer // part. if carry { - integer_part += 1; + // If `integer_part == u64::MAX` and precision < 9, any + // carry of the overflow during rounding of the + // `fractional_part` into the `integer_part` will cause the + // `integer_part` itself to overflow. Avoid this by using an + // `Option<u64>`, with `None` representing `u64::MAX + 1`. + integer_part.checked_add(1) + } else { + Some(integer_part) } - } + } else { + Some(integer_part) + }; // Determine the end of the buffer: if precision is set, we just // use as many digits from the buffer (capped to 9). If it isn't @@ -1105,7 +1120,12 @@ impl fmt::Debug for Duration { // This closure emits the formatted duration without emitting any // padding (padding is calculated below). let emit_without_padding = |f: &mut fmt::Formatter<'_>| { - write!(f, "{}{}", prefix, integer_part)?; + if let Some(integer_part) = integer_part { + write!(f, "{}{}", prefix, integer_part)?; + } else { + // u64::MAX + 1 == 18446744073709551616 + write!(f, "{}18446744073709551616", prefix)?; + } // Write the decimal point and the fractional part (if any). if end > 0 { @@ -1135,12 +1155,17 @@ impl fmt::Debug for Duration { // 2. The postfix: can be "µs" so we have to count UTF8 characters. let mut actual_w = prefix.len() + postfix.chars().count(); // 3. The integer part: - if let Some(log) = integer_part.checked_ilog10() { - // integer_part is > 0, so has length log10(x)+1 - actual_w += 1 + log as usize; + if let Some(integer_part) = integer_part { + if let Some(log) = integer_part.checked_ilog10() { + // integer_part is > 0, so has length log10(x)+1 + actual_w += 1 + log as usize; + } else { + // integer_part is 0, so has length 1. + actual_w += 1; + } } else { - // integer_part is 0, so has length 1. - actual_w += 1; + // integer_part is u64::MAX + 1, so has length 20 + actual_w += 20; } // 4. The fractional part (if any): if end > 0 { @@ -1166,27 +1191,27 @@ impl fmt::Debug for Duration { let prefix = if f.sign_plus() { "+" } else { "" }; if self.secs > 0 { - fmt_decimal(f, self.secs, self.nanos, NANOS_PER_SEC / 10, prefix, "s") - } else if self.nanos >= NANOS_PER_MILLI { + fmt_decimal(f, self.secs, self.nanos.0, NANOS_PER_SEC / 10, prefix, "s") + } else if self.nanos.0 >= NANOS_PER_MILLI { fmt_decimal( f, - (self.nanos / NANOS_PER_MILLI) as u64, - self.nanos % NANOS_PER_MILLI, + (self.nanos.0 / NANOS_PER_MILLI) as u64, + self.nanos.0 % NANOS_PER_MILLI, NANOS_PER_MILLI / 10, prefix, "ms", ) - } else if self.nanos >= NANOS_PER_MICRO { + } else if self.nanos.0 >= NANOS_PER_MICRO { fmt_decimal( f, - (self.nanos / NANOS_PER_MICRO) as u64, - self.nanos % NANOS_PER_MICRO, + (self.nanos.0 / NANOS_PER_MICRO) as u64, + self.nanos.0 % NANOS_PER_MICRO, NANOS_PER_MICRO / 10, prefix, "µs", ) } else { - fmt_decimal(f, self.nanos as u64, 0, 1, prefix, "ns") + fmt_decimal(f, self.nanos.0 as u64, 0, 1, prefix, "ns") } } } @@ -1200,7 +1225,6 @@ impl fmt::Debug for Duration { /// # Example /// /// ``` -/// #![feature(duration_checked_float)] /// use std::time::Duration; /// /// if let Err(e) = Duration::try_from_secs_f32(-1.0) { @@ -1208,33 +1232,33 @@ impl fmt::Debug for Duration { /// } /// ``` #[derive(Debug, Clone, PartialEq, Eq)] -#[unstable(feature = "duration_checked_float", issue = "83400")] -pub struct FromFloatSecsError { - kind: FromFloatSecsErrorKind, +#[stable(feature = "duration_checked_float", since = "1.66.0")] +pub struct TryFromFloatSecsError { + kind: TryFromFloatSecsErrorKind, } -impl FromFloatSecsError { +impl TryFromFloatSecsError { const fn description(&self) -> &'static str { match self.kind { - FromFloatSecsErrorKind::Negative => { + TryFromFloatSecsErrorKind::Negative => { "can not convert float seconds to Duration: value is negative" } - FromFloatSecsErrorKind::OverflowOrNan => { + TryFromFloatSecsErrorKind::OverflowOrNan => { "can not convert float seconds to Duration: value is either too big or NaN" } } } } -#[unstable(feature = "duration_checked_float", issue = "83400")] -impl fmt::Display for FromFloatSecsError { +#[stable(feature = "duration_checked_float", since = "1.66.0")] +impl fmt::Display for TryFromFloatSecsError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.description().fmt(f) } } #[derive(Debug, Clone, PartialEq, Eq)] -enum FromFloatSecsErrorKind { +enum TryFromFloatSecsErrorKind { // Value is negative. Negative, // Value is either too big to be represented as `Duration` or `NaN`. @@ -1254,8 +1278,8 @@ macro_rules! try_from_secs { const MANT_MASK: $bits_ty = (1 << $mant_bits) - 1; const EXP_MASK: $bits_ty = (1 << $exp_bits) - 1; - if $secs.is_sign_negative() { - return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::Negative }); + if $secs < 0.0 { + return Err(TryFromFloatSecsError { kind: TryFromFloatSecsErrorKind::Negative }); } let bits = $secs.to_bits(); @@ -1314,10 +1338,10 @@ macro_rules! try_from_secs { let secs = u64::from(mant) << (exp - $mant_bits); (secs, 0) } else { - return Err(FromFloatSecsError { kind: FromFloatSecsErrorKind::OverflowOrNan }); + return Err(TryFromFloatSecsError { kind: TryFromFloatSecsErrorKind::OverflowOrNan }); }; - Ok(Duration { secs, nanos }) + Ok(Duration::new(secs, nanos)) }}; } @@ -1330,8 +1354,6 @@ impl Duration { /// /// # Examples /// ``` - /// #![feature(duration_checked_float)] - /// /// use std::time::Duration; /// /// let res = Duration::try_from_secs_f32(0.0); @@ -1379,9 +1401,10 @@ impl Duration { /// let res = Duration::try_from_secs_f32(val); /// assert_eq!(res, Ok(Duration::new(1, 2_929_688))); /// ``` - #[unstable(feature = "duration_checked_float", issue = "83400")] + #[stable(feature = "duration_checked_float", since = "1.66.0")] + #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")] #[inline] - pub const fn try_from_secs_f32(secs: f32) -> Result<Duration, FromFloatSecsError> { + pub const fn try_from_secs_f32(secs: f32) -> Result<Duration, TryFromFloatSecsError> { try_from_secs!( secs = secs, mantissa_bits = 23, @@ -1400,8 +1423,6 @@ impl Duration { /// /// # Examples /// ``` - /// #![feature(duration_checked_float)] - /// /// use std::time::Duration; /// /// let res = Duration::try_from_secs_f64(0.0); @@ -1457,9 +1478,10 @@ impl Duration { /// let res = Duration::try_from_secs_f64(val); /// assert_eq!(res, Ok(Duration::new(1, 2_929_688))); /// ``` - #[unstable(feature = "duration_checked_float", issue = "83400")] + #[stable(feature = "duration_checked_float", since = "1.66.0")] + #[rustc_const_unstable(feature = "duration_consts_float", issue = "72440")] #[inline] - pub const fn try_from_secs_f64(secs: f64) -> Result<Duration, FromFloatSecsError> { + pub const fn try_from_secs_f64(secs: f64) -> Result<Duration, TryFromFloatSecsError> { try_from_secs!( secs = secs, mantissa_bits = 52, diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs index aa8a2425b..fc91fe468 100644 --- a/library/core/src/tuple.rs +++ b/library/core/src/tuple.rs @@ -93,7 +93,8 @@ macro_rules! tuple_impls { maybe_tuple_doc! { $($T)+ @ #[stable(feature = "rust1", since = "1.0.0")] - impl<$($T:Default),+> Default for ($($T,)+) { + #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] + impl<$($T: ~const Default),+> const Default for ($($T,)+) { #[inline] fn default() -> ($($T,)+) { ($({ let x: $T = Default::default(); x},)+) diff --git a/library/core/src/unicode/printable.rs b/library/core/src/unicode/printable.rs index 31cf88a41..ffb18a5ba 100644 --- a/library/core/src/unicode/printable.rs +++ b/library/core/src/unicode/printable.rs @@ -54,7 +54,7 @@ pub(crate) fn is_printable(x: char) -> bool { if 0x2a6e0 <= x && x < 0x2a700 { return false; } - if 0x2b739 <= x && x < 0x2b740 { + if 0x2b73a <= x && x < 0x2b740 { return false; } if 0x2b81e <= x && x < 0x2b820 { @@ -69,7 +69,10 @@ pub(crate) fn is_printable(x: char) -> bool { if 0x2fa1e <= x && x < 0x30000 { return false; } - if 0x3134b <= x && x < 0xe0100 { + if 0x3134b <= x && x < 0x31350 { + return false; + } + if 0x323b0 <= x && x < 0xe0100 { return false; } if 0xe01f0 <= x && x < 0x110000 { @@ -92,7 +95,7 @@ const SINGLETONS0U: &[(u8, u8)] = &[ (0x0b, 25), (0x0c, 26), (0x0d, 16), - (0x0e, 13), + (0x0e, 12), (0x0f, 4), (0x10, 3), (0x12, 18), @@ -142,24 +145,24 @@ const SINGLETONS0L: &[u8] = &[ 0xe4, 0xe5, 0xf0, 0x0d, 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6, - 0xbe, 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb, - 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, - 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, - 0x8f, 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, - 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, - 0xfe, 0xff, 0x80, 0x6d, 0x71, 0xde, 0xdf, 0x0e, - 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, - 0xae, 0xaf, 0x7f, 0xbb, 0xbc, 0x16, 0x17, 0x1e, - 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, - 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, - 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, - 0x96, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, - 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, - 0x30, 0x8f, 0x1f, 0xd2, 0xd4, 0xce, 0xff, 0x4e, - 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, - 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, - 0x42, 0x45, 0x90, 0x91, 0x53, 0x67, 0x75, 0xc8, - 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, + 0xbe, 0xbf, 0xc5, 0xc7, 0xcf, 0xda, 0xdb, 0x48, + 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e, + 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, + 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, + 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, + 0xff, 0x80, 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x1f, + 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae, + 0xaf, 0x7f, 0xbb, 0xbc, 0x16, 0x17, 0x1e, 0x1f, + 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, + 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, + 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96, + 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, 0xc7, + 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30, + 0x8f, 0x1f, 0xd2, 0xd4, 0xce, 0xff, 0x4e, 0x4f, + 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f, + 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, + 0x45, 0x90, 0x91, 0x53, 0x67, 0x75, 0xc8, 0xc9, + 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, ]; #[rustfmt::skip] const SINGLETONS1U: &[(u8, u8)] = &[ @@ -184,10 +187,12 @@ const SINGLETONS1U: &[(u8, u8)] = &[ (0x19, 13), (0x1c, 5), (0x1d, 8), + (0x1f, 1), (0x24, 1), (0x6a, 4), (0x6b, 2), (0xaf, 3), + (0xb1, 2), (0xbc, 2), (0xcf, 2), (0xd1, 2), @@ -203,7 +208,7 @@ const SINGLETONS1U: &[(u8, u8)] = &[ (0xee, 32), (0xf0, 4), (0xf8, 2), - (0xfa, 2), + (0xfa, 3), (0xfb, 1), ]; #[rustfmt::skip] @@ -220,18 +225,19 @@ const SINGLETONS1L: &[u8] = &[ 0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8, 0xa9, 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92, - 0x6f, 0x5f, 0xbf, 0xee, 0xef, 0x5a, 0x62, 0xf4, - 0xfc, 0xff, 0x9a, 0x9b, 0x2e, 0x2f, 0x27, 0x28, - 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, - 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15, - 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc, - 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e, - 0x3f, 0xe7, 0xec, 0xef, 0xff, 0xc5, 0xc6, 0x04, - 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a, - 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58, - 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b, - 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, - 0xb0, 0xc0, 0xd0, 0xae, 0xaf, 0x6e, 0x6f, 0x93, + 0x11, 0x6f, 0x5f, 0xbf, 0xee, 0xef, 0x5a, 0x62, + 0xf4, 0xfc, 0xff, 0x53, 0x54, 0x9a, 0x9b, 0x2e, + 0x2f, 0x27, 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3, + 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, 0xc4, 0x06, + 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51, + 0xa6, 0xa7, 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a, + 0x22, 0x25, 0x3e, 0x3f, 0xe7, 0xec, 0xef, 0xff, + 0xc5, 0xc6, 0x04, 0x20, 0x23, 0x25, 0x26, 0x28, + 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c, 0x50, 0x53, + 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63, + 0x65, 0x66, 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a, + 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0, 0xae, 0xaf, + 0x6e, 0x6f, 0xbe, 0x93, ]; #[rustfmt::skip] const NORMAL0: &[u8] = &[ @@ -272,7 +278,7 @@ const NORMAL0: &[u8] = &[ 0x1b, 0x07, 0x57, 0x07, 0x02, 0x06, - 0x16, 0x0d, + 0x17, 0x0c, 0x50, 0x04, 0x43, 0x03, 0x2d, 0x03, @@ -424,8 +430,8 @@ const NORMAL1: &[u8] = &[ 0x33, 0x07, 0x2e, 0x08, 0x0a, 0x81, 0x26, - 0x52, 0x4e, - 0x28, 0x08, + 0x52, 0x4b, + 0x2b, 0x08, 0x2a, 0x16, 0x1a, 0x26, 0x1c, 0x14, @@ -438,7 +444,7 @@ const NORMAL1: &[u8] = &[ 0x48, 0x08, 0x27, 0x09, 0x75, 0x0b, - 0x3f, 0x41, + 0x42, 0x3e, 0x2a, 0x06, 0x3b, 0x05, 0x0a, 0x06, @@ -464,7 +470,8 @@ const NORMAL1: &[u8] = &[ 0x45, 0x1b, 0x48, 0x08, 0x53, 0x0d, - 0x49, 0x81, 0x07, + 0x49, 0x07, + 0x0a, 0x80, 0xf6, 0x46, 0x0a, 0x1d, 0x03, 0x47, 0x49, @@ -473,14 +480,17 @@ const NORMAL1: &[u8] = &[ 0x0a, 0x06, 0x39, 0x07, 0x0a, 0x81, 0x36, - 0x19, 0x80, 0xb7, + 0x19, 0x07, + 0x3b, 0x03, + 0x1c, 0x56, 0x01, 0x0f, 0x32, 0x0d, 0x83, 0x9b, 0x66, 0x75, 0x0b, 0x80, 0xc4, 0x8a, 0x4c, 0x63, 0x0d, - 0x84, 0x2f, 0x8f, 0xd1, + 0x84, 0x30, 0x10, + 0x16, 0x8f, 0xaa, 0x82, 0x47, 0xa1, 0xb9, 0x82, 0x39, 0x07, 0x2a, 0x04, @@ -498,8 +508,9 @@ const NORMAL1: &[u8] = &[ 0x97, 0xf8, 0x08, 0x84, 0xd6, 0x2a, 0x09, 0xa2, 0xe7, - 0x81, 0x33, 0x2d, - 0x03, 0x11, + 0x81, 0x33, 0x0f, + 0x01, 0x1d, + 0x06, 0x0e, 0x04, 0x08, 0x81, 0x8c, 0x89, 0x04, 0x6b, 0x05, @@ -511,21 +522,26 @@ const NORMAL1: &[u8] = &[ 0x80, 0xf6, 0x0a, 0x73, 0x08, 0x70, 0x15, - 0x46, 0x80, 0x9a, + 0x46, 0x7a, + 0x14, 0x0c, 0x14, 0x0c, 0x57, 0x09, 0x19, 0x80, 0x87, 0x81, 0x47, 0x03, 0x85, 0x42, 0x0f, 0x15, 0x84, 0x50, - 0x1f, 0x80, 0xe1, - 0x2b, 0x80, 0xd5, + 0x1f, 0x06, + 0x06, 0x80, 0xd5, + 0x2b, 0x05, + 0x3e, 0x21, + 0x01, 0x70, 0x2d, 0x03, 0x1a, 0x04, 0x02, 0x81, 0x40, 0x1f, 0x11, 0x3a, 0x05, - 0x01, 0x84, 0xe0, + 0x01, 0x81, 0xd0, + 0x2a, 0x82, 0xe6, 0x80, 0xf7, 0x29, 0x4c, 0x04, 0x0a, 0x04, @@ -546,11 +562,11 @@ const NORMAL1: &[u8] = &[ 0x09, 0x07, 0x02, 0x0e, 0x06, 0x80, 0x9a, - 0x83, 0xd8, 0x05, - 0x10, 0x03, + 0x83, 0xd8, 0x04, + 0x11, 0x03, 0x0d, 0x03, - 0x74, 0x0c, - 0x59, 0x07, + 0x77, 0x04, + 0x5f, 0x06, 0x0c, 0x04, 0x01, 0x0f, 0x0c, 0x04, @@ -559,15 +575,12 @@ const NORMAL1: &[u8] = &[ 0x28, 0x08, 0x22, 0x4e, 0x81, 0x54, 0x0c, - 0x15, 0x03, - 0x05, 0x03, - 0x07, 0x09, 0x1d, 0x03, - 0x0b, 0x05, - 0x06, 0x0a, - 0x0a, 0x06, - 0x08, 0x08, - 0x07, 0x09, + 0x09, 0x07, + 0x36, 0x08, + 0x0e, 0x04, + 0x09, 0x07, + 0x09, 0x07, 0x80, 0xcb, 0x25, 0x0a, 0x84, 0x06, ]; diff --git a/library/core/src/unicode/unicode_data.rs b/library/core/src/unicode/unicode_data.rs index 7301da2af..bd69ca520 100644 --- a/library/core/src/unicode/unicode_data.rs +++ b/library/core/src/unicode/unicode_data.rs @@ -99,21 +99,21 @@ fn skip_search<const SOR: usize, const OFFSETS: usize>( offset_idx % 2 == 1 } -pub const UNICODE_VERSION: (u8, u8, u8) = (14, 0, 0); +pub const UNICODE_VERSION: (u8, u8, u8) = (15, 0, 0); #[rustfmt::skip] pub mod alphabetic { - static SHORT_OFFSET_RUNS: [u32; 51] = [ - 706, 33559113, 876615277, 956309270, 1166025910, 1314925568, 1319120901, 1398813696, - 1449151936, 1451271309, 1455465997, 1463867300, 1652619520, 1663105646, 1665203518, - 1711342208, 1797326647, 1891700352, 2044795904, 2397118176, 2485199770, 2495688592, - 2506175535, 2512471040, 2514568775, 2516674560, 2518772281, 2520870464, 2552334328, - 2583792854, 2587996144, 2594287907, 2608968444, 2621553664, 2623656960, 2644629158, - 2722225920, 2770461328, 2808211424, 2816601600, 2850156848, 2988572672, 3001198304, - 3003299641, 3007499938, 3015896033, 3020093440, 3022191134, 3024289792, 3026391883, - 3029603147, + static SHORT_OFFSET_RUNS: [u32; 53] = [ + 706, 33559113, 872420973, 952114966, 1161831606, 1310731264, 1314926597, 1394619392, + 1444957632, 1447077005, 1451271693, 1459672996, 1648425216, 1658911342, 1661009214, + 1707147904, 1793132343, 1887506048, 2040601600, 2392923872, 2481005466, 2504077200, + 2514564144, 2520859648, 2527151687, 2529257472, 2531355193, 2533453376, 2564917240, + 2596375766, 2600579056, 2606870819, 2621551356, 2642525184, 2644628480, 2665600678, + 2743197440, 2791432848, 2841765072, 2850154464, 2854350336, 2887905584, 3026321408, + 3038947040, 3041048378, 3045248674, 3053644769, 3057842176, 3059939870, 3062038528, + 3064140619, 3066241968, 3071550384, ]; - static OFFSETS: [u8; 1445] = [ + static OFFSETS: [u8; 1465] = [ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 0, 4, 12, 14, 5, 7, 1, 1, 1, 86, 1, 42, 5, 1, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 2, 1, 6, 41, 39, 14, 1, 1, 1, 2, 1, 2, 1, 1, 8, 27, 4, 4, 29, 11, 5, 56, 1, 7, 14, 102, 1, 8, 4, 8, 4, 3, 10, @@ -123,50 +123,51 @@ pub mod alphabetic { 2, 1, 2, 4, 5, 4, 2, 2, 2, 4, 1, 7, 4, 1, 1, 17, 6, 11, 3, 1, 9, 1, 3, 1, 22, 1, 7, 1, 2, 1, 5, 3, 9, 1, 3, 1, 2, 3, 1, 15, 4, 21, 4, 4, 3, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2, 2, 2, 9, 2, 4, 2, 1, 5, 13, 1, 16, 2, 1, 6, 3, 3, 1, 4, 3, 2, 1, 1, 1, 2, 3, 2, 3, 3, 3, - 12, 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 4, 1, 8, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2, - 1, 3, 2, 1, 2, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 6, 2, 1, 4, - 13, 2, 13, 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24, - 1, 9, 1, 1, 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1, - 1, 1, 19, 1, 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 17, 6, 16, 1, 36, 67, 55, - 1, 1, 2, 5, 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1, - 4, 2, 33, 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2, - 17, 1, 26, 5, 75, 3, 11, 7, 20, 11, 21, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4, - 1, 67, 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2, - 20, 50, 1, 23, 2, 11, 3, 49, 52, 1, 15, 1, 8, 51, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3, - 10, 36, 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2, - 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1, - 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2, - 4, 5, 5, 4, 1, 17, 41, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1, 16, 23, - 9, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2, 5, 4, - 86, 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 67, 46, 2, 0, 3, 16, 10, - 2, 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 64, 5, 2, 1, 1, 1, 5, 24, 20, 1, 33, 24, 52, 12, - 68, 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1, - 55, 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1, - 43, 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1, - 1, 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89, - 3, 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3, + 12, 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 13, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2, 1, 3, + 2, 1, 2, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 6, 2, 1, 4, 13, + 3, 12, 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24, 1, 9, + 1, 1, 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1, 1, 1, + 19, 1, 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 19, 4, 16, 1, 36, 67, 55, 1, 1, + 2, 5, 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1, 4, 2, + 33, 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2, 17, 1, + 26, 5, 75, 3, 11, 7, 20, 11, 21, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4, 1, 67, + 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2, 20, + 50, 1, 23, 2, 11, 3, 49, 52, 1, 15, 1, 8, 51, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3, 10, + 36, 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2, 6, 2, + 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1, 7, + 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2, 4, + 5, 5, 4, 1, 17, 41, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1, 16, 23, 9, + 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2, 5, 4, 86, + 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 67, 46, 2, 0, 3, 16, 10, 2, + 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 64, 5, 2, 1, 1, 1, 5, 24, 20, 1, 33, 24, 52, 12, 68, + 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1, 55, + 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1, 43, + 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1, 1, + 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89, 3, + 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3, 49, 47, 32, 13, 30, 5, 43, 5, 30, 2, 36, 4, 8, 1, 5, 42, 158, 18, 36, 4, 36, 4, 40, 8, 52, 12, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1, 7, 1, 2, 67, 0, 9, 22, 10, 8, 24, 6, 1, 42, 1, 9, 69, 6, 2, 1, 1, 44, 1, 2, 3, 1, 2, 23, 10, 23, 9, 31, 65, 19, 1, 2, 10, 22, 10, 26, 70, 56, 6, 2, 64, 4, 1, 2, 5, 8, 1, 3, 1, 29, 42, 29, 3, 29, 35, 8, 1, 28, 27, 54, 10, 22, 10, 19, 13, 18, 110, 73, 55, 51, 13, 51, 13, 40, 0, 42, 1, 2, 3, 2, 78, 29, 10, 1, 8, 22, 42, - 18, 46, 21, 27, 23, 9, 70, 43, 5, 12, 55, 9, 1, 13, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64, - 1, 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 1, 65, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57, + 18, 46, 21, 27, 23, 9, 70, 43, 5, 10, 57, 9, 1, 13, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64, + 1, 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 4, 62, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57, 23, 4, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2, 2, 2, 3, 1, 6, 1, 5, 7, 156, 66, 1, 3, 1, 4, 20, 3, 30, 66, 2, 2, 1, 1, 184, 54, 2, 7, 25, 6, 34, 63, 1, 1, 3, 1, 59, 54, 2, 1, 71, 27, 2, 14, 21, 7, 185, 57, 103, 64, 31, 8, 2, 1, 2, 8, 1, 2, 1, 30, 1, 2, 2, 2, 2, 4, 93, 8, 2, 46, 2, 6, 1, 1, 1, 2, 27, 51, 2, 10, 17, 72, 5, 1, 18, 73, 0, 9, 1, 45, 1, 7, 1, 1, 49, 30, 2, 22, 1, 14, 73, 7, 1, 2, 1, 44, 3, 1, 1, 2, 1, 3, 1, 1, 2, 2, 24, 6, 1, 2, 1, - 37, 1, 2, 1, 4, 1, 1, 0, 23, 185, 1, 79, 0, 102, 111, 17, 196, 0, 97, 15, 0, 0, 0, 0, 0, 7, - 31, 17, 79, 17, 30, 18, 48, 16, 4, 31, 21, 5, 19, 0, 64, 128, 75, 4, 57, 7, 17, 64, 2, 1, 1, - 12, 2, 14, 0, 8, 0, 42, 9, 0, 4, 1, 7, 1, 2, 1, 0, 45, 3, 17, 4, 8, 0, 0, 107, 5, 13, 3, 9, - 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, - 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, - 25, 1, 31, 1, 25, 1, 8, 0, 31, 225, 7, 1, 17, 2, 7, 1, 2, 1, 5, 213, 45, 10, 7, 16, 1, 0, - 30, 18, 44, 0, 7, 1, 4, 1, 2, 1, 15, 1, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1, 2, - 1, 1, 10, 1, 4, 1, 1, 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 1, 1, 2, 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0, 26, - 6, 26, 6, 26, 0, 0, 32, 0, 7, 222, 2, 0, 14, 0, 0, 0, 0, 0, 0, + 37, 1, 2, 1, 4, 1, 1, 0, 23, 9, 17, 1, 41, 3, 3, 111, 1, 79, 0, 102, 111, 17, 196, 0, 97, + 15, 0, 17, 6, 0, 0, 0, 0, 7, 31, 17, 79, 17, 30, 18, 48, 16, 4, 31, 21, 5, 19, 0, 64, 128, + 75, 4, 57, 7, 17, 64, 2, 1, 1, 12, 2, 14, 0, 8, 0, 42, 9, 0, 4, 1, 7, 1, 2, 1, 0, 15, 1, 29, + 3, 2, 1, 14, 4, 8, 0, 0, 107, 5, 13, 3, 9, 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2, + 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25, + 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 31, 6, 6, 213, 7, 1, + 17, 2, 7, 1, 2, 1, 5, 5, 62, 33, 1, 112, 45, 10, 7, 16, 1, 0, 30, 18, 44, 0, 28, 0, 7, 1, 4, + 1, 2, 1, 15, 1, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1, 2, 1, 1, 10, 1, 4, 1, 1, + 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, + 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0, 26, 6, 26, 6, 26, 0, 0, 32, + 0, 6, 222, 2, 0, 14, 0, 0, 0, 0, 0, 5, 0, 0, ]; pub fn lookup(c: char) -> bool { super::skip_search( @@ -182,11 +183,11 @@ pub mod case_ignorable { static SHORT_OFFSET_RUNS: [u32; 35] = [ 688, 44045149, 572528402, 576724925, 807414908, 878718981, 903913493, 929080568, 933275148, 937491230, 1138818560, 1147208189, 1210124160, 1222707713, 1235291428, 1260457643, - 1264654383, 1491147067, 1499536432, 1558257395, 1621177392, 1625385712, 1629581135, - 1642180592, 1658961053, 1671548672, 1679937895, 1688328704, 1709301760, 1734467888, - 1755439790, 1759635664, 1768027131, 1777205249, 1782514160, + 1264654383, 1499535675, 1507925040, 1566646003, 1629566000, 1650551536, 1658941263, + 1671540720, 1688321181, 1700908800, 1709298023, 1717688832, 1738661888, 1763828398, + 1797383403, 1805773008, 1809970171, 1819148289, 1824457200, ]; - static OFFSETS: [u8; 855] = [ + static OFFSETS: [u8; 875] = [ 39, 1, 6, 1, 11, 1, 35, 1, 1, 1, 71, 1, 4, 1, 1, 1, 4, 1, 2, 2, 0, 192, 4, 2, 4, 1, 9, 2, 1, 1, 251, 7, 207, 1, 5, 1, 49, 45, 1, 1, 1, 2, 1, 2, 1, 1, 44, 1, 11, 6, 10, 11, 1, 1, 35, 1, 10, 21, 16, 1, 101, 8, 1, 10, 1, 4, 33, 1, 1, 1, 30, 27, 91, 11, 58, 11, 4, 1, 2, 1, 24, @@ -195,7 +196,7 @@ pub mod case_ignorable { 57, 1, 4, 5, 1, 2, 4, 1, 20, 2, 22, 6, 1, 1, 58, 1, 2, 1, 1, 4, 8, 1, 7, 2, 11, 2, 30, 1, 61, 1, 12, 1, 50, 1, 3, 1, 55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 6, 1, 5, 2, 20, 2, 28, 2, 57, 2, 4, 4, 8, 1, 20, 2, 29, 1, 72, 1, 7, 3, 1, 1, 90, 1, 2, 7, 11, 9, - 98, 1, 2, 9, 9, 1, 1, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, + 98, 1, 2, 9, 9, 1, 1, 7, 73, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, 102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 94, 1, 0, 3, 0, 3, 29, 2, 30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 3, 1, 5, 1, 45, 5, 51, 1, 65, 2, 34, 1, 118, 3, 4, 2, 9, 1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 39, 1, 8, 31, @@ -209,15 +210,16 @@ pub mod case_ignorable { 1, 1, 27, 1, 14, 2, 5, 2, 1, 1, 100, 5, 9, 3, 121, 1, 2, 1, 4, 1, 0, 1, 147, 17, 0, 16, 3, 1, 12, 16, 34, 1, 2, 1, 169, 1, 7, 1, 6, 1, 11, 1, 35, 1, 1, 1, 47, 1, 45, 2, 67, 1, 21, 3, 0, 1, 226, 1, 149, 5, 0, 6, 1, 42, 1, 9, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, - 153, 11, 49, 4, 123, 1, 54, 15, 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 2, 1, 4, 1, 10, 1, 50, 3, - 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2, - 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1, 2, 238, 4, 6, 2, - 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 0, - 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0, - 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1, - 0, 2, 0, 9, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17, 64, 2, 1, 2, 0, 4, 1, 7, 1, 2, 0, 2, 1, 4, 0, - 46, 2, 23, 0, 3, 9, 16, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, - 1, 17, 2, 7, 1, 2, 1, 5, 0, 14, 0, 1, 61, 4, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0, + 80, 3, 70, 11, 49, 4, 123, 1, 54, 15, 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 2, 1, 4, 1, 10, 1, + 50, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 2, 1, 157, 1, + 3, 8, 21, 2, 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1, 2, + 238, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, + 0, 9, 1, 2, 0, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, + 13, 1, 2, 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, + 3, 1, 1, 1, 0, 2, 11, 2, 52, 5, 5, 1, 1, 1, 0, 17, 6, 15, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17, + 64, 2, 1, 2, 0, 4, 1, 7, 1, 2, 0, 2, 1, 4, 0, 46, 2, 23, 0, 3, 9, 16, 2, 7, 30, 4, 148, 3, + 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 5, 62, 33, 1, 160, + 14, 0, 1, 61, 4, 0, 5, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0, ]; pub fn lookup(c: char) -> bool { super::skip_search( @@ -230,24 +232,24 @@ pub mod case_ignorable { #[rustfmt::skip] pub mod cased { - static SHORT_OFFSET_RUNS: [u32; 21] = [ + static SHORT_OFFSET_RUNS: [u32; 22] = [ 4256, 115348384, 136322176, 144711446, 163587254, 320875520, 325101120, 350268208, 392231680, 404815649, 413205504, 421595008, 467733632, 484513952, 492924480, 497144832, - 501339814, 578936576, 627173632, 635564336, 640872842, + 501339814, 578936576, 627171376, 639756544, 643952944, 649261450, ]; - static OFFSETS: [u8; 311] = [ + static OFFSETS: [u8; 315] = [ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 195, 1, 4, 4, 208, 1, 36, 7, 2, 30, 5, 96, 1, 42, 4, 2, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 9, - 41, 0, 38, 1, 1, 5, 1, 2, 43, 2, 3, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2, + 41, 0, 38, 1, 1, 5, 1, 2, 43, 1, 4, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2, 38, 2, 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1, 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 6, 4, 1, 2, 4, 5, 5, 4, 1, 17, 32, 3, 2, 0, 52, 0, 229, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, - 0, 46, 18, 30, 132, 102, 3, 4, 1, 59, 5, 2, 1, 1, 1, 5, 27, 2, 1, 3, 0, 43, 1, 13, 7, 80, 0, + 0, 46, 18, 30, 132, 102, 3, 4, 1, 59, 5, 2, 1, 1, 1, 5, 24, 5, 1, 3, 0, 43, 1, 14, 6, 80, 0, 7, 12, 5, 0, 26, 6, 26, 0, 80, 96, 36, 4, 36, 116, 11, 1, 15, 1, 7, 1, 2, 1, 11, 1, 15, 1, 7, 1, 2, 0, 1, 2, 3, 1, 42, 1, 9, 0, 51, 13, 51, 0, 64, 0, 64, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, - 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 10, 1, 20, 0, - 68, 0, 26, 6, 26, 6, 26, 0, + 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 10, 1, 20, 6, 6, + 0, 62, 0, 68, 0, 26, 6, 26, 6, 26, 0, ]; pub fn lookup(c: char) -> bool { super::skip_search( @@ -277,14 +279,14 @@ pub mod cc { #[rustfmt::skip] pub mod grapheme_extend { - static SHORT_OFFSET_RUNS: [u32; 32] = [ + static SHORT_OFFSET_RUNS: [u32; 33] = [ 768, 2098307, 6292881, 10490717, 522196754, 526393356, 731917551, 740306986, 752920175, 761309186, 778107678, 908131840, 912326558, 920715773, 924912129, 937495844, 962662059, - 966858799, 1205935152, 1277239027, 1340173040, 1344368463, 1352776861, 1365364480, - 1369559397, 1377950208, 1407311872, 1432478000, 1453449902, 1457645776, 1466826784, - 1476329968, + 966858799, 1214323760, 1285627635, 1348547648, 1369533168, 1377922895, 1386331293, + 1398918912, 1403113829, 1411504640, 1440866304, 1466032814, 1495393516, 1503783120, + 1508769824, 1518273008, ]; - static OFFSETS: [u8; 707] = [ + static OFFSETS: [u8; 727] = [ 0, 112, 0, 7, 0, 45, 1, 1, 1, 2, 1, 2, 1, 1, 72, 11, 48, 21, 16, 1, 101, 7, 2, 6, 2, 2, 1, 4, 35, 1, 30, 27, 91, 11, 58, 9, 9, 1, 24, 4, 1, 9, 1, 3, 1, 5, 43, 3, 60, 8, 42, 24, 1, 32, 55, 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 29, 1, 58, 1, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 26, 1, 2, @@ -292,7 +294,7 @@ pub mod grapheme_extend { 1, 1, 58, 1, 1, 2, 1, 4, 8, 1, 7, 3, 10, 2, 30, 1, 59, 1, 1, 1, 12, 1, 9, 1, 40, 1, 3, 1, 55, 1, 1, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 2, 1, 3, 1, 5, 2, 7, 2, 11, 2, 28, 2, 57, 2, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 29, 1, 72, 1, 4, 1, 2, 3, 1, 1, 8, 1, 81, 1, 2, 7, - 12, 8, 98, 1, 2, 9, 11, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, + 12, 8, 98, 1, 2, 9, 11, 7, 73, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, 102, 4, 1, 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 0, 3, 0, 3, 29, 2, 30, 2, 30, 2, 64, 2, 1, 7, 8, 1, 2, 11, 9, 1, 45, 3, 1, 1, 117, 2, 34, 1, 118, 3, 4, 2, 9, 1, 6, 3, 219, 2, 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 48, 31, 49, 4, 48, 7, 1, @@ -301,16 +303,17 @@ pub mod grapheme_extend { 4, 1, 10, 32, 2, 80, 2, 0, 1, 3, 1, 4, 1, 25, 2, 5, 1, 151, 2, 26, 18, 13, 1, 38, 8, 25, 11, 46, 3, 48, 1, 2, 4, 2, 2, 39, 1, 67, 6, 2, 2, 2, 2, 12, 1, 8, 1, 47, 1, 51, 1, 1, 3, 2, 2, 5, 2, 1, 1, 42, 2, 8, 1, 238, 1, 2, 1, 4, 1, 0, 1, 0, 16, 16, 16, 0, 2, 0, 1, 226, 1, 149, - 5, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 153, 11, 49, 4, 123, 1, 54, 15, 41, 1, - 2, 2, 10, 3, 49, 4, 2, 2, 7, 1, 61, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, - 2, 1, 1, 2, 6, 1, 160, 1, 3, 8, 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5, 195, 8, 2, 3, - 1, 1, 23, 1, 81, 1, 2, 6, 1, 1, 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2, - 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1, 10, 2, 1, 1, 4, - 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0, 7, 1, 6, 1, - 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1, 0, 2, 0, 5, - 59, 7, 0, 1, 63, 4, 81, 1, 0, 2, 0, 46, 2, 23, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3, - 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 0, 7, 0, 1, 61, 4, - 0, 7, 109, 7, 0, 96, 128, 240, 0, + 5, 0, 3, 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 80, 3, 70, 11, 49, 4, 123, 1, 54, 15, + 41, 1, 2, 2, 10, 3, 49, 4, 2, 2, 7, 1, 61, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, + 95, 3, 2, 1, 1, 2, 6, 1, 2, 1, 157, 1, 3, 8, 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5, + 195, 8, 2, 3, 1, 1, 23, 1, 81, 1, 2, 6, 1, 1, 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2, + 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1, + 10, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, + 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, + 1, 0, 2, 11, 2, 52, 5, 5, 1, 1, 1, 0, 1, 6, 15, 0, 5, 59, 7, 0, 1, 63, 4, 81, 1, 0, 2, 0, + 46, 2, 23, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, + 15, 0, 7, 1, 17, 2, 7, 1, 2, 1, 5, 100, 1, 160, 7, 0, 1, 61, 4, 0, 4, 0, 7, 109, 7, 0, 96, + 128, 240, 0, ]; pub fn lookup(c: char) -> bool { super::skip_search( @@ -327,50 +330,52 @@ pub mod lowercase { 14, 17, 0, 0, 9, 0, 0, 12, 13, 10, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1, 0, 15, 0, 8, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, - 3, 0, 0, 7, + 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, + 3, 18, 0, 7, ]; - const BITSET_INDEX_CHUNKS: &'static [[u8; 16]; 19] = &[ + const BITSET_INDEX_CHUNKS: &'static [[u8; 16]; 20] = &[ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 14, 55, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 44, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 42, 0, 50, 46, 48, 32], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 43, 0, 51, 47, 49, 33], [0, 0, 0, 0, 10, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26], + [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27], [0, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 57, 0, 55, 55, 55, 0, 21, 21, 67, 21, 35, 24, 23, 36], - [0, 5, 74, 0, 28, 15, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 64, 33, 17, 22, 51, 52, 47, 45, 8, 34, 40, 0, 27, 13, 30], - [11, 58, 0, 4, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 31, 0], - [16, 25, 21, 37, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [16, 49, 2, 20, 66, 9, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [63, 39, 54, 12, 73, 61, 18, 1, 6, 62, 71, 19, 68, 69, 3, 44], + [0, 0, 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 57, 0, 55, 55, 55, 0, 22, 22, 67, 22, 36, 25, 24, 37], + [0, 5, 68, 0, 29, 15, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 64, 34, 17, 23, 52, 53, 48, 46, 8, 35, 42, 0, 28, 13, 31], + [11, 58, 0, 6, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 32, 0], + [16, 26, 22, 38, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [16, 50, 2, 21, 66, 9, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [16, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [63, 41, 54, 12, 75, 61, 18, 1, 7, 62, 74, 20, 71, 72, 4, 45], ]; const BITSET_CANONICAL: &'static [u64; 55] = &[ 0b0000000000000000000000000000000000000000000000000000000000000000, 0b1111111111111111110000000000000000000000000011111111111111111111, 0b1010101010101010101010101010101010101010101010101010100000000010, + 0b0000000000000111111111111111111111111111111111111111111111111111, 0b1111111111111111111111000000000000000000000000001111110111111111, - 0b0000111111111111111111111111111111111111000000000000000000000000, 0b1000000000000010000000000000000000000000000000000000000000000000, + 0b0000111111111111111111111111111111111111000000000000000000000000, 0b0000111111111111111111111111110000000000000000000000000011111111, - 0b0000000000000111111111111111111111111111111111111111111111111111, 0b1111111111111111111111111111111111111111111111111010101010000101, 0b1111111111111111111111111111111100000000000000000000000000000000, 0b1111111111111111111111111111110000000000000000000000000000000000, 0b1111111111111111111111110000000000000000000000000000000000000000, 0b1111111111111111111111000000000000000000000000001111111111101111, 0b1111111111111111111100000000000000000000000000010000000000000000, - 0b1111111111111111000000011111111111110111111111111111111111111111, + 0b1111111111111111000000111111111111110111111111111111111111111111, 0b1111111111111111000000000000000000000000000000000100001111000000, 0b1111111111111111000000000000000000000000000000000000000000000000, 0b1111111101111111111111111111111110000000000000000000000000000000, 0b1111110000000000000000000000000011111111111111111111111111000000, + 0b1111011111111111111111111111111111111111111111110000000000000000, 0b1111000000000000000000000000001111110111111111111111111111111100, 0b1010101010101010101010101010101010101010101010101101010101010100, 0b1010101010101010101010101010101010101010101010101010101010101010, @@ -384,16 +389,16 @@ pub mod lowercase { 0b0001101111111011111111111111101111111111100000000000000000000000, 0b0001100100101111101010101010101010101010111000110111111111111111, 0b0000011111111101111111111111111111111111111111111111111110111001, - 0b0000011101000000000000000000000000000010101010100000010100001010, + 0b0000011101011100000000000000000000000010101010100000010100001010, 0b0000010000100000000001000000000000000000000000000000000000000000, 0b0000000111111111111111111111111111111111111011111111111111111111, 0b0000000011111111000000001111111100000000001111110000000011111111, 0b0000000011011100000000001111111100000000110011110000000011011100, 0b0000000000001000010100000001101010101010101010101010101010101010, 0b0000000000000000001000001011111111111111111111111111111111111111, + 0b0000000000000000000001111110000001111111111111111111101111111111, 0b0000000000000000000000001111111111111111110111111100000000000000, 0b0000000000000000000000000001111100000000000000000000000000000011, - 0b0000000000000000000000000000000001111111111111111111101111111111, 0b0000000000000000000000000000000000111010101010101010101010101010, 0b0000000000000000000000000000000000000000111110000000000001111111, 0b0000000000000000000000000000000000000000000000000000101111110111, @@ -405,13 +410,12 @@ pub mod lowercase { 0b1010101010101011101010101010100000000000000000000000000000000000, 0b1101010010101010101010101010101010101010101010101010101101010101, 0b1110011001010001001011010010101001001110001001000011000100101001, - 0b1110011111111111111111111111111111111111111111110000000000000000, 0b1110101111000000000000000000000000001111111111111111111111111100, ]; - const BITSET_MAPPING: &'static [(u8, u8); 20] = &[ + const BITSET_MAPPING: &'static [(u8, u8); 21] = &[ (0, 64), (1, 188), (1, 183), (1, 176), (1, 109), (1, 124), (1, 126), (1, 66), (1, 70), - (1, 77), (2, 146), (2, 144), (2, 83), (3, 12), (3, 6), (4, 156), (4, 78), (5, 187), - (6, 132), (7, 93), + (1, 77), (2, 146), (2, 144), (2, 83), (3, 93), (3, 147), (3, 133), (4, 12), (4, 6), + (5, 187), (6, 78), (7, 132), ]; #[rustc_const_unstable(feature = "const_unicode_case_lookup", issue = "101400")] @@ -428,14 +432,14 @@ pub mod lowercase { #[rustfmt::skip] pub mod n { - static SHORT_OFFSET_RUNS: [u32; 38] = [ + static SHORT_OFFSET_RUNS: [u32; 39] = [ 1632, 18876774, 31461440, 102765417, 111154926, 115349830, 132128880, 165684320, 186656630, 195046653, 199241735, 203436434, 216049184, 241215536, 249605104, 274792208, 278987015, 283181793, 295766104, 320933114, 383848032, 392238160, 434181712, 442570976, 455154768, - 463544256, 476128256, 480340576, 484535936, 501338848, 505534414, 513925440, 518120176, - 522315975, 526511217, 534900992, 555875312, 561183738, + 463544144, 476128256, 484534880, 488730240, 505533120, 509728718, 522314048, 526508784, + 530703600, 534898887, 539094129, 547483904, 568458224, 573766650, ]; - static OFFSETS: [u8; 269] = [ + static OFFSETS: [u8; 275] = [ 48, 10, 120, 2, 5, 1, 2, 3, 0, 10, 134, 10, 198, 10, 0, 10, 118, 10, 4, 6, 108, 10, 118, 10, 118, 10, 2, 6, 110, 13, 115, 10, 8, 7, 103, 10, 104, 7, 7, 19, 109, 10, 96, 10, 118, 10, 70, 20, 0, 10, 70, 10, 0, 20, 0, 3, 239, 10, 6, 10, 22, 10, 0, 10, 128, 11, 165, 10, 6, 10, @@ -445,9 +449,9 @@ pub mod n { 29, 1, 8, 1, 134, 5, 202, 10, 0, 8, 25, 7, 39, 9, 75, 5, 22, 6, 160, 2, 2, 16, 2, 46, 64, 9, 52, 2, 30, 3, 75, 5, 104, 8, 24, 8, 41, 7, 0, 6, 48, 10, 0, 31, 158, 10, 42, 4, 112, 7, 134, 30, 128, 10, 60, 10, 144, 10, 7, 20, 251, 10, 0, 10, 118, 10, 0, 10, 102, 10, 102, 12, 0, - 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 21, 0, 111, 0, 10, 86, 10, 134, 10, 1, 7, 0, 23, 0, - 20, 108, 25, 0, 50, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4, 76, 45, 1, 15, 0, 13, 0, - 10, 0, + 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 10, 102, 21, 0, 111, 0, 10, 86, 10, 134, 10, 1, 7, 0, + 23, 0, 20, 12, 20, 108, 25, 0, 50, 0, 10, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4, + 76, 45, 1, 15, 0, 13, 0, 10, 0, ]; pub fn lookup(c: char) -> bool { super::skip_search( diff --git a/library/core/tests/ascii.rs b/library/core/tests/ascii.rs index 6d2cf3e83..f5f2dd047 100644 --- a/library/core/tests/ascii.rs +++ b/library/core/tests/ascii.rs @@ -252,6 +252,23 @@ fn test_is_ascii_digit() { } #[test] +fn test_is_ascii_octdigit() { + assert_all!(is_ascii_octdigit, "", "01234567"); + assert_none!( + is_ascii_octdigit, + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] fn test_is_ascii_hexdigit() { assert_all!(is_ascii_hexdigit, "", "0123456789", "abcdefABCDEF",); assert_none!( @@ -454,6 +471,7 @@ fn ascii_ctype_const() { is_ascii_lowercase => [true, false, false, false, false]; is_ascii_alphanumeric => [true, true, true, false, false]; is_ascii_digit => [false, false, true, false, false]; + is_ascii_octdigit => [false, false, false, false, false]; is_ascii_hexdigit => [true, true, true, false, false]; is_ascii_punctuation => [false, false, false, true, false]; is_ascii_graphic => [true, true, true, true, false]; diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 4a0e162bc..51f858ade 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -2,12 +2,12 @@ #![feature(array_chunks)] #![feature(array_methods)] #![feature(array_windows)] -#![feature(bench_black_box)] #![feature(bigint_helper_methods)] #![feature(cell_update)] #![feature(const_assume)] #![feature(const_black_box)] #![feature(const_bool_to_option)] +#![feature(const_caller_location)] #![feature(const_cell_into_inner)] #![feature(const_convert)] #![feature(const_heap)] @@ -21,6 +21,7 @@ #![feature(const_ptr_write)] #![feature(const_trait_impl)] #![feature(const_likely)] +#![feature(const_location_fields)] #![feature(core_intrinsics)] #![feature(core_private_bignum)] #![feature(core_private_diy_float)] @@ -48,8 +49,8 @@ #![feature(slice_from_ptr_range)] #![feature(split_as_slice)] #![feature(maybe_uninit_uninit_array)] -#![feature(maybe_uninit_array_assume_init)] #![feature(maybe_uninit_write_slice)] +#![feature(maybe_uninit_uninit_array_transpose)] #![feature(min_specialization)] #![feature(numfmt)] #![feature(step_trait)] @@ -74,6 +75,7 @@ #![feature(iterator_try_reduce)] #![feature(const_mut_refs)] #![feature(const_pin)] +#![feature(const_waker)] #![feature(never_type)] #![feature(unwrap_infallible)] #![feature(pointer_byte_offsets)] @@ -93,13 +95,13 @@ #![feature(strict_provenance_atomic_ptr)] #![feature(trusted_random_access)] #![feature(unsize)] -#![feature(unzip_option)] #![feature(const_array_from_ref)] #![feature(const_slice_from_ref)] #![feature(waker_getters)] #![feature(slice_flatten)] #![feature(provide_any)] #![feature(utf8_chunks)] +#![feature(is_ascii_octdigit)] #![deny(unsafe_op_in_unsafe_fn)] extern crate test; @@ -130,6 +132,7 @@ mod nonzero; mod num; mod ops; mod option; +mod panic; mod pattern; mod pin; mod pin_macro; diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs index 6856d1a1f..0362e1c8a 100644 --- a/library/core/tests/mem.rs +++ b/library/core/tests/mem.rs @@ -130,7 +130,11 @@ fn test_transmute_copy_grow_panics() { payload .downcast::<&'static str>() .and_then(|s| { - if *s == "cannot transmute_copy if U is larger than T" { Ok(s) } else { Err(s) } + if *s == "cannot transmute_copy if Dst is larger than Src" { + Ok(s) + } else { + Err(s) + } }) .unwrap_or_else(|p| panic::resume_unwind(p)); } @@ -163,18 +167,18 @@ fn assume_init_good() { #[test] fn uninit_array_assume_init() { - let mut array: [MaybeUninit<i16>; 5] = MaybeUninit::uninit_array(); + let mut array = [MaybeUninit::<i16>::uninit(); 5]; array[0].write(3); array[1].write(1); array[2].write(4); array[3].write(1); array[4].write(5); - let array = unsafe { MaybeUninit::array_assume_init(array) }; + let array = unsafe { array.transpose().assume_init() }; assert_eq!(array, [3, 1, 4, 1, 5]); - let [] = unsafe { MaybeUninit::<!>::array_assume_init([]) }; + let [] = unsafe { [MaybeUninit::<!>::uninit(); 0].transpose().assume_init() }; } #[test] diff --git a/library/core/tests/num/int_log.rs b/library/core/tests/num/int_log.rs index be203fb5c..a1edb1a51 100644 --- a/library/core/tests/num/int_log.rs +++ b/library/core/tests/num/int_log.rs @@ -164,3 +164,33 @@ fn ilog10_u64() { fn ilog10_u128() { ilog10_loop! { u128, 38 } } + +#[test] +#[should_panic(expected = "argument of integer logarithm must be positive")] +fn ilog2_of_0_panic() { + let _ = 0u32.ilog2(); +} + +#[test] +#[should_panic(expected = "argument of integer logarithm must be positive")] +fn ilog10_of_0_panic() { + let _ = 0u32.ilog10(); +} + +#[test] +#[should_panic(expected = "argument of integer logarithm must be positive")] +fn ilog3_of_0_panic() { + let _ = 0u32.ilog(3); +} + +#[test] +#[should_panic(expected = "base of integer logarithm must be at least 2")] +fn ilog0_of_1_panic() { + let _ = 1u32.ilog(0); +} + +#[test] +#[should_panic(expected = "base of integer logarithm must be at least 2")] +fn ilog1_of_1_panic() { + let _ = 1u32.ilog(1); +} diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs index 49580cdcc..c79e909e4 100644 --- a/library/core/tests/num/mod.rs +++ b/library/core/tests/num/mod.rs @@ -172,7 +172,7 @@ fn test_can_not_overflow() { // Calcutate the string length for the smallest overflowing number: let max_len_string = format_radix(num, base as u128); - // Ensure that that string length is deemed to potentially overflow: + // Ensure that string length is deemed to potentially overflow: assert!(can_overflow::<$t>(base, &max_len_string)); } )*) diff --git a/library/core/tests/num/wrapping.rs b/library/core/tests/num/wrapping.rs index 8ded139a1..c5a719883 100644 --- a/library/core/tests/num/wrapping.rs +++ b/library/core/tests/num/wrapping.rs @@ -75,8 +75,6 @@ wrapping_test!(test_wrapping_u64, u64, u64::MIN, u64::MAX); wrapping_test!(test_wrapping_u128, u128, u128::MIN, u128::MAX); wrapping_test!(test_wrapping_usize, usize, usize::MIN, usize::MAX); -// Don't warn about overflowing ops on 32-bit platforms -#[cfg_attr(target_pointer_width = "32", allow(const_err))] #[test] fn wrapping_int_api() { assert_eq!(i8::MAX.wrapping_add(1), i8::MIN); diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs index 9f5e537dc..f36f7c268 100644 --- a/library/core/tests/option.rs +++ b/library/core/tests/option.rs @@ -57,6 +57,7 @@ fn test_get_resource() { } #[test] +#[cfg_attr(not(bootstrap), allow(for_loops_over_fallibles))] fn test_option_dance() { let x = Some(()); let mut y = Some(5); diff --git a/library/core/tests/panic.rs b/library/core/tests/panic.rs new file mode 100644 index 000000000..24b6c56b3 --- /dev/null +++ b/library/core/tests/panic.rs @@ -0,0 +1 @@ +mod location; diff --git a/library/core/tests/panic/location.rs b/library/core/tests/panic/location.rs new file mode 100644 index 000000000..d20241d83 --- /dev/null +++ b/library/core/tests/panic/location.rs @@ -0,0 +1,31 @@ +use core::panic::Location; + +// Note: Some of the following tests depend on the source location, +// so please be careful when editing this file. + +#[test] +fn location_const_caller() { + const _CALLER_REFERENCE: &Location<'static> = Location::caller(); + const _CALLER: Location<'static> = *Location::caller(); +} + +#[test] +fn location_const_file() { + const CALLER: &Location<'static> = Location::caller(); + const FILE: &str = CALLER.file(); + assert_eq!(FILE, file!()); +} + +#[test] +fn location_const_line() { + const CALLER: &Location<'static> = Location::caller(); + const LINE: u32 = CALLER.line(); + assert_eq!(LINE, 21); +} + +#[test] +fn location_const_column() { + const CALLER: &Location<'static> = Location::caller(); + const COLUMN: u32 = CALLER.column(); + assert_eq!(COLUMN, 40); +} diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs index b8f6fe696..9e1fbea79 100644 --- a/library/core/tests/slice.rs +++ b/library/core/tests/slice.rs @@ -1284,7 +1284,6 @@ fn test_windows_zip() { } #[test] -#[allow(const_err)] fn test_iter_ref_consistency() { use std::fmt::Debug; diff --git a/library/core/tests/task.rs b/library/core/tests/task.rs index d71fef9e5..56be30e92 100644 --- a/library/core/tests/task.rs +++ b/library/core/tests/task.rs @@ -1,4 +1,4 @@ -use core::task::Poll; +use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; #[test] fn poll_const() { @@ -12,3 +12,18 @@ fn poll_const() { const IS_PENDING: bool = POLL.is_pending(); assert!(IS_PENDING); } + +#[test] +fn waker_const() { + const VOID_TABLE: RawWakerVTable = RawWakerVTable::new(|_| VOID_WAKER, |_| {}, |_| {}, |_| {}); + + const VOID_WAKER: RawWaker = RawWaker::new(&(), &VOID_TABLE); + + static WAKER: Waker = unsafe { Waker::from_raw(VOID_WAKER) }; + + static CONTEXT: Context<'static> = Context::from_waker(&WAKER); + + static WAKER_REF: &'static Waker = CONTEXT.waker(); + + WAKER_REF.wake_by_ref(); +} diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs index fe2d2f241..a05128de4 100644 --- a/library/core/tests/time.rs +++ b/library/core/tests/time.rs @@ -197,9 +197,31 @@ fn correct_sum() { #[test] fn debug_formatting_extreme_values() { assert_eq!( - format!("{:?}", Duration::new(18_446_744_073_709_551_615, 123_456_789)), + format!("{:?}", Duration::new(u64::MAX, 123_456_789)), "18446744073709551615.123456789s" ); + assert_eq!(format!("{:.0?}", Duration::MAX), "18446744073709551616s"); + assert_eq!(format!("{:.0?}", Duration::new(u64::MAX, 500_000_000)), "18446744073709551616s"); + assert_eq!(format!("{:.0?}", Duration::new(u64::MAX, 499_999_999)), "18446744073709551615s"); + assert_eq!( + format!("{:.3?}", Duration::new(u64::MAX, 999_500_000)), + "18446744073709551616.000s" + ); + assert_eq!( + format!("{:.3?}", Duration::new(u64::MAX, 999_499_999)), + "18446744073709551615.999s" + ); + assert_eq!( + format!("{:.8?}", Duration::new(u64::MAX, 999_999_995)), + "18446744073709551616.00000000s" + ); + assert_eq!( + format!("{:.8?}", Duration::new(u64::MAX, 999_999_994)), + "18446744073709551615.99999999s" + ); + assert_eq!(format!("{:21.0?}", Duration::MAX), "18446744073709551616s"); + assert_eq!(format!("{:22.0?}", Duration::MAX), "18446744073709551616s "); + assert_eq!(format!("{:24.0?}", Duration::MAX), "18446744073709551616s "); } #[test] @@ -445,3 +467,11 @@ fn duration_const() { const SATURATING_MUL: Duration = MAX.saturating_mul(2); assert_eq!(SATURATING_MUL, MAX); } + +#[test] +fn from_neg_zero() { + assert_eq!(Duration::try_from_secs_f32(-0.0), Ok(Duration::ZERO)); + assert_eq!(Duration::try_from_secs_f64(-0.0), Ok(Duration::ZERO)); + assert_eq!(Duration::from_secs_f32(-0.0), Duration::ZERO); + assert_eq!(Duration::from_secs_f64(-0.0), Duration::ZERO); +} diff --git a/library/panic_unwind/src/emcc.rs b/library/panic_unwind/src/emcc.rs index 7c233c7c3..c6d423085 100644 --- a/library/panic_unwind/src/emcc.rs +++ b/library/panic_unwind/src/emcc.rs @@ -47,7 +47,12 @@ static EXCEPTION_TYPE_INFO: TypeInfo = TypeInfo { name: b"rust_panic\0".as_ptr(), }; +// NOTE(nbdd0121): The `canary` field will be part of stable ABI after `c_unwind` stabilization. +#[repr(C)] struct Exception { + // See `gcc.rs` on why this is present. We already have a static here so just use it. + canary: *const TypeInfo, + // This is necessary because C++ code can capture our exception with // std::exception_ptr and rethrow it multiple times, possibly even in // another thread. @@ -70,27 +75,38 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> { let catch_data = &*(ptr as *mut CatchData); let adjusted_ptr = __cxa_begin_catch(catch_data.ptr as *mut libc::c_void) as *mut Exception; - let out = if catch_data.is_rust_panic { - let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst); - if was_caught { - // Since cleanup() isn't allowed to panic, we just abort instead. - intrinsics::abort(); - } - (*adjusted_ptr).data.take().unwrap() - } else { + if !catch_data.is_rust_panic { super::__rust_foreign_exception(); - }; + } + + let canary = ptr::addr_of!((*adjusted_ptr).canary).read(); + if !ptr::eq(canary, &EXCEPTION_TYPE_INFO) { + super::__rust_foreign_exception(); + } + + let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst); + if was_caught { + // Since cleanup() isn't allowed to panic, we just abort instead. + intrinsics::abort(); + } + let out = (*adjusted_ptr).data.take().unwrap(); __cxa_end_catch(); out } pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 { - let sz = mem::size_of_val(&data); - let exception = __cxa_allocate_exception(sz) as *mut Exception; + let exception = __cxa_allocate_exception(mem::size_of::<Exception>()) as *mut Exception; if exception.is_null() { return uw::_URC_FATAL_PHASE1_ERROR as u32; } - ptr::write(exception, Exception { caught: AtomicBool::new(false), data: Some(data) }); + ptr::write( + exception, + Exception { + canary: &EXCEPTION_TYPE_INFO, + caught: AtomicBool::new(false), + data: Some(data), + }, + ); __cxa_throw(exception as *mut _, &EXCEPTION_TYPE_INFO, exception_cleanup); } diff --git a/library/panic_unwind/src/gcc.rs b/library/panic_unwind/src/gcc.rs index 261404e87..0b7a873a6 100644 --- a/library/panic_unwind/src/gcc.rs +++ b/library/panic_unwind/src/gcc.rs @@ -38,12 +38,23 @@ use alloc::boxed::Box; use core::any::Any; +use core::ptr; use unwind as uw; +// In case where multiple copies of std exist in a single process, +// we use address of this static variable to distinguish an exception raised by +// this copy and some other copy (which needs to be treated as foreign exception). +static CANARY: u8 = 0; + +// NOTE(nbdd0121) +// Once `c_unwind` feature is stabilized, there will be ABI stability requirement +// on this struct. The first two field must be `_Unwind_Exception` and `canary`, +// as it may be accessed by a different version of the std with a different compiler. #[repr(C)] struct Exception { _uwe: uw::_Unwind_Exception, + canary: *const u8, cause: Box<dyn Any + Send>, } @@ -54,6 +65,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 { exception_cleanup, private: [0; uw::unwinder_private_data_size], }, + canary: &CANARY, cause: data, }); let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception; @@ -75,10 +87,22 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> { if (*exception).exception_class != rust_exception_class() { uw::_Unwind_DeleteException(exception); super::__rust_foreign_exception(); - } else { - let exception = Box::from_raw(exception as *mut Exception); - exception.cause } + + let exception = exception.cast::<Exception>(); + // Just access the canary field, avoid accessing the entire `Exception` as + // it can be a foreign Rust exception. + let canary = ptr::addr_of!((*exception).canary).read(); + if !ptr::eq(canary, &CANARY) { + // A foreign Rust exception, treat it slightly differently from other + // foreign exceptions, because call into `_Unwind_DeleteException` will + // call into `__rust_drop_panic` which produces a confusing + // "Rust panic must be rethrown" message. + super::__rust_foreign_exception(); + } + + let exception = Box::from_raw(exception as *mut Exception); + exception.cause } // Rust's exception class identifier. This is used by personality routines to diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs index 1eb4f3789..7e7180a38 100644 --- a/library/panic_unwind/src/lib.rs +++ b/library/panic_unwind/src/lib.rs @@ -42,7 +42,8 @@ cfg_if::cfg_if! { // L4Re is unix family but does not yet support unwinding. #[path = "dummy.rs"] mod real_imp; - } else if #[cfg(target_env = "msvc")] { + } else if #[cfg(all(target_env = "msvc", not(target_arch = "arm")))] { + // LLVM does not support unwinding on 32 bit ARM msvc (thumbv7a-pc-windows-msvc) #[path = "seh.rs"] mod real_imp; } else if #[cfg(any( diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs index 6b8d06568..651115a82 100644 --- a/library/panic_unwind/src/seh.rs +++ b/library/panic_unwind/src/seh.rs @@ -49,9 +49,15 @@ use alloc::boxed::Box; use core::any::Any; use core::mem::{self, ManuallyDrop}; +use core::ptr; use libc::{c_int, c_uint, c_void}; +// NOTE(nbdd0121): The `canary` field will be part of stable ABI after `c_unwind` stabilization. +#[repr(C)] struct Exception { + // See `gcc.rs` on why this is present. We already have a static here so just use it. + canary: *const _TypeDescriptor, + // This needs to be an Option because we catch the exception by reference // and its destructor is executed by the C++ runtime. When we take the Box // out of the exception, we need to leave the exception in a valid state @@ -235,7 +241,7 @@ static mut TYPE_DESCRIPTOR: _TypeDescriptor = _TypeDescriptor { macro_rules! define_cleanup { ($abi:tt $abi2:tt) => { unsafe extern $abi fn exception_cleanup(e: *mut Exception) { - if let Exception { data: Some(b) } = e.read() { + if let Exception { data: Some(b), .. } = e.read() { drop(b); super::__rust_drop_panic(); } @@ -265,7 +271,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 { // The ManuallyDrop is needed here since we don't want Exception to be // dropped when unwinding. Instead it will be dropped by exception_cleanup // which is invoked by the C++ runtime. - let mut exception = ManuallyDrop::new(Exception { data: Some(data) }); + let mut exception = ManuallyDrop::new(Exception { canary: &TYPE_DESCRIPTOR, data: Some(data) }); let throw_ptr = &mut exception as *mut _ as *mut _; // This... may seems surprising, and justifiably so. On 32-bit MSVC the @@ -321,8 +327,12 @@ pub unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send> { // __rust_try. This happens when a non-Rust foreign exception is caught. if payload.is_null() { super::__rust_foreign_exception(); - } else { - let exception = &mut *(payload as *mut Exception); - exception.data.take().unwrap() } + let exception = payload as *mut Exception; + let canary = ptr::addr_of!((*exception).canary).read(); + if !ptr::eq(canary, &TYPE_DESCRIPTOR) { + // A foreign Rust exception. + super::__rust_foreign_exception(); + } + (*exception).data.take().unwrap() } diff --git a/library/proc_macro/src/bridge/client.rs b/library/proc_macro/src/bridge/client.rs index 4461b2180..506b2a773 100644 --- a/library/proc_macro/src/bridge/client.rs +++ b/library/proc_macro/src/bridge/client.rs @@ -223,10 +223,10 @@ pub(crate) use super::symbol::Symbol; macro_rules! define_client_side { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)*;)* + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)?;)* }),* $(,)?) => { $(impl $name { - $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)* { + $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)? { Bridge::with(|bridge| { let mut buf = bridge.cached_buffer.take(); diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs index 495c1c5ae..0d3fc2c52 100644 --- a/library/proc_macro/src/lib.rs +++ b/library/proc_macro/src/lib.rs @@ -533,7 +533,7 @@ impl Span { other.resolved_at(*self) } - /// Compares to spans to see if they're equal. + /// Compares two spans to see if they're equal. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn eq(&self, other: &Span) -> bool { self.0 == other.0 @@ -546,7 +546,7 @@ impl Span { /// Note: The observable result of a macro should only rely on the tokens and /// not on this source text. The result of this function is a best effort to /// be used for diagnostics only. - #[unstable(feature = "proc_macro_span", issue = "54725")] + #[stable(feature = "proc_macro_source_text", since = "1.66.0")] pub fn source_text(&self) -> Option<String> { self.0.source_text() } diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 324ecc804..bc10b12ec 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -11,11 +11,11 @@ crate-type = ["dylib", "rlib"] [dependencies] alloc = { path = "../alloc" } -cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] } +cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } panic_unwind = { path = "../panic_unwind", optional = true } panic_abort = { path = "../panic_abort" } core = { path = "../core" } -libc = { version = "0.2.126", default-features = false, features = ['rustc-dep-of-std'] } +libc = { version = "0.2.135", default-features = false, features = ['rustc-dep-of-std'] } compiler_builtins = { version = "0.1.73" } profiler_builtins = { path = "../profiler_builtins", optional = true } unwind = { path = "../unwind" } diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 5cf6ec817..9cb74f951 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -14,8 +14,8 @@ //! Backtraces are attempted to be as accurate as possible, but no guarantees //! are provided about the exact accuracy of a backtrace. Instruction pointers, //! symbol names, filenames, line numbers, etc, may all be incorrect when -//! reported. Accuracy is attempted on a best-effort basis, however, and bugs -//! are always welcome to indicate areas of improvement! +//! reported. Accuracy is attempted on a best-effort basis, however, any bug +//! reports are always welcome to indicate areas of improvement! //! //! For most platforms a backtrace with a filename/line number requires that //! programs be compiled with debug information. Without debug information @@ -39,7 +39,7 @@ //! default. Its behavior is governed by two environment variables: //! //! * `RUST_LIB_BACKTRACE` - if this is set to `0` then `Backtrace::capture` -//! will never capture a backtrace. Any other value this is set to will enable +//! will never capture a backtrace. Any other value set will enable //! `Backtrace::capture`. //! //! * `RUST_BACKTRACE` - if `RUST_LIB_BACKTRACE` is not set, then this variable @@ -325,8 +325,7 @@ impl Backtrace { // Capture a backtrace which start just before the function addressed by // `ip` fn create(ip: usize) -> Backtrace { - // SAFETY: We don't attempt to lock this reentrantly. - let _lock = unsafe { lock() }; + let _lock = lock(); let mut frames = Vec::new(); let mut actual_start = None; unsafe { @@ -469,8 +468,7 @@ impl Capture { // Use the global backtrace lock to synchronize this as it's a // requirement of the `backtrace` crate, and then actually resolve // everything. - // SAFETY: We don't attempt to lock this reentrantly. - let _lock = unsafe { lock() }; + let _lock = lock(); for frame in self.frames.iter_mut() { let symbols = &mut frame.symbols; let frame = match &frame.frame { diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs index 9845d1faf..708edc5de 100644 --- a/library/std/src/collections/hash/map.rs +++ b/library/std/src/collections/hash/map.rs @@ -9,7 +9,6 @@ use crate::borrow::Borrow; use crate::cell::Cell; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; -#[cfg(not(bootstrap))] use crate::error::Error; use crate::fmt::{self, Debug}; #[allow(deprecated)] @@ -281,7 +280,8 @@ impl<K, V, S> HashMap<K, V, S> { /// ``` #[inline] #[stable(feature = "hashmap_build_hasher", since = "1.7.0")] - pub fn with_hasher(hash_builder: S) -> HashMap<K, V, S> { + #[rustc_const_unstable(feature = "const_collections_with_hasher", issue = "102575")] + pub const fn with_hasher(hash_builder: S) -> HashMap<K, V, S> { HashMap { base: base::HashMap::with_hasher(hash_builder) } } @@ -759,7 +759,7 @@ where /// Tries to reserve capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to speculatively - /// avoid frequent reallocations. After calling `reserve`, + /// avoid frequent reallocations. After calling `try_reserve`, /// capacity will be greater than or equal to `self.len() + additional` if /// it returns `Ok(())`. /// Does nothing if capacity is already sufficient. @@ -2160,7 +2160,6 @@ impl<'a, K: Debug, V: Debug> fmt::Display for OccupiedError<'a, K, V> { } } -#[cfg(not(bootstrap))] #[unstable(feature = "map_try_insert", issue = "82766")] impl<'a, K: fmt::Debug, V: fmt::Debug> Error for OccupiedError<'a, K, V> { #[allow(deprecated)] diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs index cb3032719..65634f206 100644 --- a/library/std/src/collections/hash/map/tests.rs +++ b/library/std/src/collections/hash/map/tests.rs @@ -1115,3 +1115,9 @@ fn from_array() { // that's a problem! let _must_not_require_type_annotation = HashMap::from([(1, 2)]); } + +#[test] +fn const_with_hasher() { + const X: HashMap<(), (), ()> = HashMap::with_hasher(()); + assert_eq!(X.len(), 0); +} diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs index 5b6a415fa..cee884145 100644 --- a/library/std/src/collections/hash/set.rs +++ b/library/std/src/collections/hash/set.rs @@ -376,7 +376,8 @@ impl<T, S> HashSet<T, S> { /// ``` #[inline] #[stable(feature = "hashmap_build_hasher", since = "1.7.0")] - pub fn with_hasher(hasher: S) -> HashSet<T, S> { + #[rustc_const_unstable(feature = "const_collections_with_hasher", issue = "102575")] + pub const fn with_hasher(hasher: S) -> HashSet<T, S> { HashSet { base: base::HashSet::with_hasher(hasher) } } @@ -461,7 +462,7 @@ where /// Tries to reserve capacity for at least `additional` more elements to be inserted /// in the `HashSet`. The collection may reserve more space to speculatively - /// avoid frequent reallocations. After calling `reserve`, + /// avoid frequent reallocations. After calling `try_reserve`, /// capacity will be greater than or equal to `self.len() + additional` if /// it returns `Ok(())`. /// Does nothing if capacity is already sufficient. diff --git a/library/std/src/collections/hash/set/tests.rs b/library/std/src/collections/hash/set/tests.rs index 233db276b..941a0450c 100644 --- a/library/std/src/collections/hash/set/tests.rs +++ b/library/std/src/collections/hash/set/tests.rs @@ -496,3 +496,9 @@ fn from_array() { // that's a problem! let _must_not_require_type_annotation = HashSet::from([1, 2]); } + +#[test] +fn const_with_hasher() { + const X: HashSet<(), ()> = HashSet::with_hasher(()); + assert_eq!(X.len(), 0); +} diff --git a/library/std/src/env.rs b/library/std/src/env.rs index 463f71406..6eb7cbea6 100644 --- a/library/std/src/env.rs +++ b/library/std/src/env.rs @@ -603,7 +603,7 @@ pub fn home_dir() -> Option<PathBuf> { /// # Platform-specific behavior /// /// On Unix, returns the value of the `TMPDIR` environment variable if it is -/// set, otherwise for non-Android it returns `/tmp`. If Android, since there +/// set, otherwise for non-Android it returns `/tmp`. On Android, since there /// is no global temporary folder (it is usually allocated per-app), it returns /// `/data/local/tmp`. /// On Windows, the behavior is equivalent to that of [`GetTempPath2`][GetTempPath2] / diff --git a/library/std/src/error.rs b/library/std/src/error.rs index e45059595..05f8fd8de 100644 --- a/library/std/src/error.rs +++ b/library/std/src/error.rs @@ -4,242 +4,12 @@ #[cfg(test)] mod tests; -#[cfg(bootstrap)] -use core::array; -#[cfg(bootstrap)] -use core::convert::Infallible; - -#[cfg(bootstrap)] -use crate::alloc::{AllocError, LayoutError}; -#[cfg(bootstrap)] -use crate::any::Demand; -#[cfg(bootstrap)] -use crate::any::{Provider, TypeId}; use crate::backtrace::Backtrace; -#[cfg(bootstrap)] -use crate::borrow::Cow; -#[cfg(bootstrap)] -use crate::cell; -#[cfg(bootstrap)] -use crate::char; -#[cfg(bootstrap)] -use crate::fmt::Debug; -#[cfg(bootstrap)] -use crate::fmt::Display; use crate::fmt::{self, Write}; -#[cfg(bootstrap)] -use crate::io; -#[cfg(bootstrap)] -use crate::mem::transmute; -#[cfg(bootstrap)] -use crate::num; -#[cfg(bootstrap)] -use crate::str; -#[cfg(bootstrap)] -use crate::string; -#[cfg(bootstrap)] -use crate::sync::Arc; -#[cfg(bootstrap)] -use crate::time; -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] pub use core::error::Error; -/// `Error` is a trait representing the basic expectations for error values, -/// i.e., values of type `E` in [`Result<T, E>`]. -/// -/// Errors must describe themselves through the [`Display`] and [`Debug`] -/// traits. Error messages are typically concise lowercase sentences without -/// trailing punctuation: -/// -/// ``` -/// let err = "NaN".parse::<u32>().unwrap_err(); -/// assert_eq!(err.to_string(), "invalid digit found in string"); -/// ``` -/// -/// Errors may provide cause information. [`Error::source()`] is generally -/// used when errors cross "abstraction boundaries". If one module must report -/// an error that is caused by an error from a lower-level module, it can allow -/// accessing that error via [`Error::source()`]. This makes it possible for the -/// high-level module to provide its own errors while also revealing some of the -/// implementation for debugging. -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(test), rustc_diagnostic_item = "Error")] -#[cfg(bootstrap)] -pub trait Error: Debug + Display { - /// The lower-level source of this error, if any. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::fmt; - /// - /// #[derive(Debug)] - /// struct SuperError { - /// source: SuperErrorSideKick, - /// } - /// - /// impl fmt::Display for SuperError { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "SuperError is here!") - /// } - /// } - /// - /// impl Error for SuperError { - /// fn source(&self) -> Option<&(dyn Error + 'static)> { - /// Some(&self.source) - /// } - /// } - /// - /// #[derive(Debug)] - /// struct SuperErrorSideKick; - /// - /// impl fmt::Display for SuperErrorSideKick { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "SuperErrorSideKick is here!") - /// } - /// } - /// - /// impl Error for SuperErrorSideKick {} - /// - /// fn get_super_error() -> Result<(), SuperError> { - /// Err(SuperError { source: SuperErrorSideKick }) - /// } - /// - /// fn main() { - /// match get_super_error() { - /// Err(e) => { - /// println!("Error: {e}"); - /// println!("Caused by: {}", e.source().unwrap()); - /// } - /// _ => println!("No error"), - /// } - /// } - /// ``` - #[stable(feature = "error_source", since = "1.30.0")] - fn source(&self) -> Option<&(dyn Error + 'static)> { - None - } - - /// Gets the `TypeId` of `self`. - #[doc(hidden)] - #[unstable( - feature = "error_type_id", - reason = "this is memory-unsafe to override in user code", - issue = "60784" - )] - fn type_id(&self, _: private::Internal) -> TypeId - where - Self: 'static, - { - TypeId::of::<Self>() - } - - /// ``` - /// if let Err(e) = "xc".parse::<u32>() { - /// // Print `e` itself, no need for description(). - /// eprintln!("Error: {e}"); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[deprecated(since = "1.42.0", note = "use the Display impl or to_string()")] - fn description(&self) -> &str { - "description() is deprecated; use Display" - } - - #[stable(feature = "rust1", since = "1.0.0")] - #[deprecated( - since = "1.33.0", - note = "replaced by Error::source, which can support downcasting" - )] - #[allow(missing_docs)] - fn cause(&self) -> Option<&dyn Error> { - self.source() - } - - /// Provides type based access to context intended for error reports. - /// - /// Used in conjunction with [`Demand::provide_value`] and [`Demand::provide_ref`] to extract - /// references to member variables from `dyn Error` trait objects. - /// - /// # Example - /// - /// ```rust - /// #![feature(provide_any)] - /// #![feature(error_generic_member_access)] - /// use core::fmt; - /// use core::any::Demand; - /// - /// #[derive(Debug)] - /// struct MyBacktrace { - /// // ... - /// } - /// - /// impl MyBacktrace { - /// fn new() -> MyBacktrace { - /// // ... - /// # MyBacktrace {} - /// } - /// } - /// - /// #[derive(Debug)] - /// struct SourceError { - /// // ... - /// } - /// - /// impl fmt::Display for SourceError { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "Example Source Error") - /// } - /// } - /// - /// impl std::error::Error for SourceError {} - /// - /// #[derive(Debug)] - /// struct Error { - /// source: SourceError, - /// backtrace: MyBacktrace, - /// } - /// - /// impl fmt::Display for Error { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "Example Error") - /// } - /// } - /// - /// impl std::error::Error for Error { - /// fn provide<'a>(&'a self, demand: &mut Demand<'a>) { - /// demand - /// .provide_ref::<MyBacktrace>(&self.backtrace) - /// .provide_ref::<dyn std::error::Error + 'static>(&self.source); - /// } - /// } - /// - /// fn main() { - /// let backtrace = MyBacktrace::new(); - /// let source = SourceError {}; - /// let error = Error { source, backtrace }; - /// let dyn_error = &error as &dyn std::error::Error; - /// let backtrace_ref = dyn_error.request_ref::<MyBacktrace>().unwrap(); - /// - /// assert!(core::ptr::eq(&error.backtrace, backtrace_ref)); - /// } - /// ``` - #[unstable(feature = "error_generic_member_access", issue = "99301")] - #[allow(unused_variables)] - fn provide<'a>(&'a self, demand: &mut Demand<'a>) {} -} - -#[cfg(bootstrap)] -#[unstable(feature = "error_generic_member_access", issue = "99301")] -impl<'b> Provider for dyn Error + 'b { - fn provide<'a>(&'a self, demand: &mut Demand<'a>) { - self.provide(demand) - } -} - mod private { // This is a hack to prevent `type_id` from being overridden by `Error` // implementations, since that can enable unsound downcasting. @@ -248,799 +18,6 @@ mod private { pub struct Internal; } -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> { - /// Converts a type of [`Error`] into a box of dyn [`Error`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::fmt; - /// use std::mem; - /// - /// #[derive(Debug)] - /// struct AnError; - /// - /// impl fmt::Display for AnError { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "An error") - /// } - /// } - /// - /// impl Error for AnError {} - /// - /// let an_error = AnError; - /// assert!(0 == mem::size_of_val(&an_error)); - /// let a_boxed_error = Box::<dyn Error>::from(an_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(err: E) -> Box<dyn Error + 'a> { - Box::new(err) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> { - /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of - /// dyn [`Error`] + [`Send`] + [`Sync`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::fmt; - /// use std::mem; - /// - /// #[derive(Debug)] - /// struct AnError; - /// - /// impl fmt::Display for AnError { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "An error") - /// } - /// } - /// - /// impl Error for AnError {} - /// - /// unsafe impl Send for AnError {} - /// - /// unsafe impl Sync for AnError {} - /// - /// let an_error = AnError; - /// assert!(0 == mem::size_of_val(&an_error)); - /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error); - /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> { - Box::new(err) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl From<String> for Box<dyn Error + Send + Sync> { - /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// - /// let a_string_error = "a string error".to_string(); - /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error); - /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - #[inline] - fn from(err: String) -> Box<dyn Error + Send + Sync> { - struct StringError(String); - - impl Error for StringError { - #[allow(deprecated)] - fn description(&self) -> &str { - &self.0 - } - } - - impl Display for StringError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Display::fmt(&self.0, f) - } - } - - // Purposefully skip printing "StringError(..)" - impl Debug for StringError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Debug::fmt(&self.0, f) - } - } - - Box::new(StringError(err)) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "string_box_error", since = "1.6.0")] -impl From<String> for Box<dyn Error> { - /// Converts a [`String`] into a box of dyn [`Error`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// - /// let a_string_error = "a string error".to_string(); - /// let a_boxed_error = Box::<dyn Error>::from(a_string_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(str_err: String) -> Box<dyn Error> { - let err1: Box<dyn Error + Send + Sync> = From::from(str_err); - let err2: Box<dyn Error> = err1; - err2 - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> { - /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`]. - /// - /// [`str`]: prim@str - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// - /// let a_str_error = "a str error"; - /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error); - /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - #[inline] - fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> { - From::from(String::from(err)) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "string_box_error", since = "1.6.0")] -impl From<&str> for Box<dyn Error> { - /// Converts a [`str`] into a box of dyn [`Error`]. - /// - /// [`str`]: prim@str - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// - /// let a_str_error = "a str error"; - /// let a_boxed_error = Box::<dyn Error>::from(a_str_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(err: &str) -> Box<dyn Error> { - From::from(String::from(err)) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "cow_box_error", since = "1.22.0")] -impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> { - /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// use std::borrow::Cow; - /// - /// let a_cow_str_error = Cow::from("a str error"); - /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error); - /// assert!( - /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> { - From::from(String::from(err)) - } -} - -#[cfg(bootstrap)] -#[stable(feature = "cow_box_error", since = "1.22.0")] -impl<'a> From<Cow<'a, str>> for Box<dyn Error> { - /// Converts a [`Cow`] into a box of dyn [`Error`]. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::mem; - /// use std::borrow::Cow; - /// - /// let a_cow_str_error = Cow::from("a str error"); - /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error); - /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error)) - /// ``` - fn from(err: Cow<'a, str>) -> Box<dyn Error> { - From::from(String::from(err)) - } -} - -#[cfg(bootstrap)] -#[unstable(feature = "never_type", issue = "35121")] -impl Error for ! {} - -#[cfg(bootstrap)] -#[unstable( - feature = "allocator_api", - reason = "the precise API and guarantees it provides may be tweaked.", - issue = "32838" -)] -impl Error for AllocError {} - -#[cfg(bootstrap)] -#[stable(feature = "alloc_layout", since = "1.28.0")] -impl Error for LayoutError {} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for str::ParseBoolError { - #[allow(deprecated)] - fn description(&self) -> &str { - "failed to parse bool" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for str::Utf8Error { - #[allow(deprecated)] - fn description(&self) -> &str { - "invalid utf-8: corrupt contents" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for num::ParseIntError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for num::TryFromIntError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for array::TryFromSliceError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for num::ParseFloatError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for string::FromUtf8Error { - #[allow(deprecated)] - fn description(&self) -> &str { - "invalid utf-8" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for string::FromUtf16Error { - #[allow(deprecated)] - fn description(&self) -> &str { - "invalid utf-16" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "str_parse_error2", since = "1.8.0")] -impl Error for Infallible { - fn description(&self) -> &str { - match *self {} - } -} - -#[cfg(bootstrap)] -#[stable(feature = "decode_utf16", since = "1.9.0")] -impl Error for char::DecodeUtf16Error { - #[allow(deprecated)] - fn description(&self) -> &str { - "unpaired surrogate found" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "u8_from_char", since = "1.59.0")] -impl Error for char::TryFromCharError {} - -#[cfg(bootstrap)] -#[unstable(feature = "map_try_insert", issue = "82766")] -impl<'a, K: Debug + Ord, V: Debug> Error - for crate::collections::btree_map::OccupiedError<'a, K, V> -{ - #[allow(deprecated)] - fn description(&self) -> &str { - "key already exists" - } -} - -#[cfg(bootstrap)] -#[unstable(feature = "map_try_insert", issue = "82766")] -impl<'a, K: Debug, V: Debug> Error for crate::collections::hash_map::OccupiedError<'a, K, V> { - #[allow(deprecated)] - fn description(&self) -> &str { - "key already exists" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "box_error", since = "1.8.0")] -impl<T: Error> Error for Box<T> { - #[allow(deprecated, deprecated_in_future)] - fn description(&self) -> &str { - Error::description(&**self) - } - - #[allow(deprecated)] - fn cause(&self) -> Option<&dyn Error> { - Error::cause(&**self) - } - - fn source(&self) -> Option<&(dyn Error + 'static)> { - Error::source(&**self) - } -} - -#[cfg(bootstrap)] -#[unstable(feature = "thin_box", issue = "92791")] -impl<T: ?Sized + crate::error::Error> crate::error::Error for crate::boxed::ThinBox<T> { - fn source(&self) -> Option<&(dyn crate::error::Error + 'static)> { - use core::ops::Deref; - self.deref().source() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "error_by_ref", since = "1.51.0")] -impl<'a, T: Error + ?Sized> Error for &'a T { - #[allow(deprecated, deprecated_in_future)] - fn description(&self) -> &str { - Error::description(&**self) - } - - #[allow(deprecated)] - fn cause(&self) -> Option<&dyn Error> { - Error::cause(&**self) - } - - fn source(&self) -> Option<&(dyn Error + 'static)> { - Error::source(&**self) - } - - fn provide<'b>(&'b self, demand: &mut Demand<'b>) { - Error::provide(&**self, demand); - } -} - -#[cfg(bootstrap)] -#[stable(feature = "arc_error", since = "1.52.0")] -impl<T: Error + ?Sized> Error for Arc<T> { - #[allow(deprecated, deprecated_in_future)] - fn description(&self) -> &str { - Error::description(&**self) - } - - #[allow(deprecated)] - fn cause(&self) -> Option<&dyn Error> { - Error::cause(&**self) - } - - fn source(&self) -> Option<&(dyn Error + 'static)> { - Error::source(&**self) - } - - fn provide<'a>(&'a self, demand: &mut Demand<'a>) { - Error::provide(&**self, demand); - } -} - -#[cfg(bootstrap)] -#[stable(feature = "fmt_error", since = "1.11.0")] -impl Error for fmt::Error { - #[allow(deprecated)] - fn description(&self) -> &str { - "an error occurred when formatting an argument" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_borrow", since = "1.13.0")] -impl Error for cell::BorrowError { - #[allow(deprecated)] - fn description(&self) -> &str { - "already mutably borrowed" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_borrow", since = "1.13.0")] -impl Error for cell::BorrowMutError { - #[allow(deprecated)] - fn description(&self) -> &str { - "already borrowed" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for char::CharTryFromError { - #[allow(deprecated)] - fn description(&self) -> &str { - "converted integer out of range for `char`" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "char_from_str", since = "1.20.0")] -impl Error for char::ParseCharError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[stable(feature = "try_reserve", since = "1.57.0")] -impl Error for alloc::collections::TryReserveError {} - -#[cfg(bootstrap)] -#[unstable(feature = "duration_checked_float", issue = "83400")] -impl Error for time::FromFloatSecsError {} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for alloc::ffi::NulError { - #[allow(deprecated)] - fn description(&self) -> &str { - "nul byte found in data" - } -} - -#[cfg(bootstrap)] -#[stable(feature = "rust1", since = "1.0.0")] -impl From<alloc::ffi::NulError> for io::Error { - /// Converts a [`alloc::ffi::NulError`] into a [`io::Error`]. - fn from(_: alloc::ffi::NulError) -> io::Error { - io::const_io_error!(io::ErrorKind::InvalidInput, "data provided contains a nul byte") - } -} - -#[cfg(bootstrap)] -#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl Error for core::ffi::FromBytesWithNulError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[cfg(bootstrap)] -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] -impl Error for core::ffi::FromBytesUntilNulError {} - -#[cfg(bootstrap)] -#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")] -impl Error for alloc::ffi::FromVecWithNulError {} - -#[cfg(bootstrap)] -#[stable(feature = "cstring_into", since = "1.7.0")] -impl Error for alloc::ffi::IntoStringError { - #[allow(deprecated)] - fn description(&self) -> &str { - "C string contained non-utf8 bytes" - } - - fn source(&self) -> Option<&(dyn Error + 'static)> { - Some(self.__source()) - } -} - -#[cfg(bootstrap)] -impl<'a> dyn Error + 'a { - /// Request a reference of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_ref<T: ?Sized + 'static>(&'a self) -> Option<&'a T> { - core::any::request_ref(self) - } - - /// Request a value of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_value<T: 'static>(&'a self) -> Option<T> { - core::any::request_value(self) - } -} - -// Copied from `any.rs`. -#[cfg(bootstrap)] -impl dyn Error + 'static { - /// Returns `true` if the inner type is the same as `T`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn is<T: Error + 'static>(&self) -> bool { - // Get `TypeId` of the type this function is instantiated with. - let t = TypeId::of::<T>(); - - // Get `TypeId` of the type in the trait object (`self`). - let concrete = self.type_id(private::Internal); - - // Compare both `TypeId`s on equality. - t == concrete - } - - /// Returns some reference to the inner value if it is of type `T`, or - /// `None` if it isn't. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> { - if self.is::<T>() { - unsafe { Some(&*(self as *const dyn Error as *const T)) } - } else { - None - } - } - - /// Returns some mutable reference to the inner value if it is of type `T`, or - /// `None` if it isn't. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> { - if self.is::<T>() { - unsafe { Some(&mut *(self as *mut dyn Error as *mut T)) } - } else { - None - } - } -} - -#[cfg(bootstrap)] -impl dyn Error + 'static + Send { - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn is<T: Error + 'static>(&self) -> bool { - <dyn Error + 'static>::is::<T>(self) - } - - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> { - <dyn Error + 'static>::downcast_ref::<T>(self) - } - - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> { - <dyn Error + 'static>::downcast_mut::<T>(self) - } - - /// Request a reference of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> { - <dyn Error>::request_ref(self) - } - - /// Request a value of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_value<T: 'static>(&self) -> Option<T> { - <dyn Error>::request_value(self) - } -} - -#[cfg(bootstrap)] -impl dyn Error + 'static + Send + Sync { - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn is<T: Error + 'static>(&self) -> bool { - <dyn Error + 'static>::is::<T>(self) - } - - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> { - <dyn Error + 'static>::downcast_ref::<T>(self) - } - - /// Forwards to the method defined on the type `dyn Error`. - #[stable(feature = "error_downcast", since = "1.3.0")] - #[inline] - pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> { - <dyn Error + 'static>::downcast_mut::<T>(self) - } - - /// Request a reference of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_ref<T: ?Sized + 'static>(&self) -> Option<&T> { - <dyn Error>::request_ref(self) - } - - /// Request a value of type `T` as context about this error. - #[unstable(feature = "error_generic_member_access", issue = "99301")] - pub fn request_value<T: 'static>(&self) -> Option<T> { - <dyn Error>::request_value(self) - } -} - -#[cfg(bootstrap)] -impl dyn Error { - #[inline] - #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempts to downcast the box to a concrete type. - pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> { - if self.is::<T>() { - unsafe { - let raw: *mut dyn Error = Box::into_raw(self); - Ok(Box::from_raw(raw as *mut T)) - } - } else { - Err(self) - } - } - - /// Returns an iterator starting with the current error and continuing with - /// recursively calling [`Error::source`]. - /// - /// If you want to omit the current error and only use its sources, - /// use `skip(1)`. - /// - /// # Examples - /// - /// ``` - /// #![feature(error_iter)] - /// use std::error::Error; - /// use std::fmt; - /// - /// #[derive(Debug)] - /// struct A; - /// - /// #[derive(Debug)] - /// struct B(Option<Box<dyn Error + 'static>>); - /// - /// impl fmt::Display for A { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "A") - /// } - /// } - /// - /// impl fmt::Display for B { - /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// write!(f, "B") - /// } - /// } - /// - /// impl Error for A {} - /// - /// impl Error for B { - /// fn source(&self) -> Option<&(dyn Error + 'static)> { - /// self.0.as_ref().map(|e| e.as_ref()) - /// } - /// } - /// - /// let b = B(Some(Box::new(A))); - /// - /// // let err : Box<Error> = b.into(); // or - /// let err = &b as &(dyn Error); - /// - /// let mut iter = err.sources(); - /// - /// assert_eq!("B".to_string(), iter.next().unwrap().to_string()); - /// assert_eq!("A".to_string(), iter.next().unwrap().to_string()); - /// assert!(iter.next().is_none()); - /// assert!(iter.next().is_none()); - /// ``` - #[unstable(feature = "error_iter", issue = "58520")] - #[inline] - pub fn sources(&self) -> Sources<'_> { - // You may think this method would be better in the Error trait, and you'd be right. - // Unfortunately that doesn't work, not because of the object safety rules but because we - // save a reference to self in Sources below as a trait object. If this method was - // declared in Error, then self would have the type &T where T is some concrete type which - // implements Error. We would need to coerce self to have type &dyn Error, but that requires - // that Self has a known size (i.e., Self: Sized). We can't put that bound on Error - // since that would forbid Error trait objects, and we can't put that bound on the method - // because that means the method can't be called on trait objects (we'd also need the - // 'static bound, but that isn't allowed because methods with bounds on Self other than - // Sized are not object-safe). Requiring an Unsize bound is not backwards compatible. - - Sources { current: Some(self) } - } -} - -/// An iterator over an [`Error`] and its sources. -/// -/// If you want to omit the initial error and only process -/// its sources, use `skip(1)`. -#[unstable(feature = "error_iter", issue = "58520")] -#[derive(Clone, Debug)] -#[cfg(bootstrap)] -pub struct Sources<'a> { - current: Option<&'a (dyn Error + 'static)>, -} - -#[cfg(bootstrap)] -#[unstable(feature = "error_iter", issue = "58520")] -impl<'a> Iterator for Sources<'a> { - type Item = &'a (dyn Error + 'static); - - fn next(&mut self) -> Option<Self::Item> { - let current = self.current; - self.current = self.current.and_then(Error::source); - current - } -} - -#[cfg(bootstrap)] -impl dyn Error + Send { - #[inline] - #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempts to downcast the box to a concrete type. - pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> { - let err: Box<dyn Error> = self; - <dyn Error>::downcast(err).map_err(|s| unsafe { - // Reapply the `Send` marker. - transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s) - }) - } -} - -#[cfg(bootstrap)] -impl dyn Error + Send + Sync { - #[inline] - #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempts to downcast the box to a concrete type. - pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> { - let err: Box<dyn Error> = self; - <dyn Error>::downcast(err).map_err(|s| unsafe { - // Reapply the `Send + Sync` marker. - transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s) - }) - } -} - /// An error reporter that prints an error and its sources. /// /// Report also exposes configuration options for formatting the error sources, either entirely on a diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index c6c78dc39..188ff00e1 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -1365,6 +1365,34 @@ impl FileTimes { impl Permissions { /// Returns `true` if these permissions describe a readonly (unwritable) file. /// + /// # Note + /// + /// This function does not take Access Control Lists (ACLs) or Unix group + /// membership into account. + /// + /// # Windows + /// + /// On Windows this returns [`FILE_ATTRIBUTE_READONLY`](https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants). + /// If `FILE_ATTRIBUTE_READONLY` is set then writes to the file will fail + /// but the user may still have permission to change this flag. If + /// `FILE_ATTRIBUTE_READONLY` is *not* set then writes may still fail due + /// to lack of write permission. + /// The behavior of this attribute for directories depends on the Windows + /// version. + /// + /// # Unix (including macOS) + /// + /// On Unix-based platforms this checks if *any* of the owner, group or others + /// write permission bits are set. It does not check if the current + /// user is in the file's assigned group. It also does not check ACLs. + /// Therefore even if this returns true you may not be able to write to the + /// file, and vice versa. The [`PermissionsExt`] trait gives direct access + /// to the permission bits but also does not read ACLs. If you need to + /// accurately know whether or not a file is writable use the `access()` + /// function from libc. + /// + /// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt + /// /// # Examples /// /// ```no_run @@ -1390,8 +1418,40 @@ impl Permissions { /// using the resulting `Permission` will update file permissions to allow /// writing. /// - /// This operation does **not** modify the filesystem. To modify the - /// filesystem use the [`set_permissions`] function. + /// This operation does **not** modify the files attributes. This only + /// changes the in-memory value of these attributes for this `Permissions` + /// instance. To modify the files attributes use the [`set_permissions`] + /// function which commits these attribute changes to the file. + /// + /// # Note + /// + /// `set_readonly(false)` makes the file *world-writable* on Unix. + /// You can use the [`PermissionsExt`] trait on Unix to avoid this issue. + /// + /// It also does not take Access Control Lists (ACLs) or Unix group + /// membership into account. + /// + /// # Windows + /// + /// On Windows this sets or clears [`FILE_ATTRIBUTE_READONLY`](https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants). + /// If `FILE_ATTRIBUTE_READONLY` is set then writes to the file will fail + /// but the user may still have permission to change this flag. If + /// `FILE_ATTRIBUTE_READONLY` is *not* set then the write may still fail if + /// the user does not have permission to write to the file. + /// + /// In Windows 7 and earlier this attribute prevents deleting empty + /// directories. It does not prevent modifying the directory contents. + /// On later versions of Windows this attribute is ignored for directories. + /// + /// # Unix (including macOS) + /// + /// On Unix-based platforms this sets or clears the write access bit for + /// the owner, group *and* others, equivalent to `chmod a+w <file>` + /// or `chmod a-w <file>` respectively. The latter will grant write access + /// to all users! You can use the [`PermissionsExt`] trait on Unix + /// to avoid this issue. + /// + /// [`PermissionsExt`]: crate::os::unix::fs::PermissionsExt /// /// # Examples /// @@ -1405,7 +1465,8 @@ impl Permissions { /// /// permissions.set_readonly(true); /// - /// // filesystem doesn't change + /// // filesystem doesn't change, only the in memory state of the + /// // readonly permission /// assert_eq!(false, metadata.permissions().readonly()); /// /// // just this particular `permissions`. diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs index 29b09fcc5..3cabf2449 100644 --- a/library/std/src/io/error.rs +++ b/library/std/src/io/error.rs @@ -76,7 +76,6 @@ impl fmt::Debug for Error { } } -#[cfg(not(bootstrap))] #[stable(feature = "rust1", since = "1.0.0")] impl From<alloc::ffi::NulError> for Error { /// Converts a [`alloc::ffi::NulError`] into a [`Error`]. @@ -388,7 +387,7 @@ pub enum ErrorKind { impl ErrorKind { pub(crate) fn as_str(&self) -> &'static str { use ErrorKind::*; - // Strictly alphabetical, please. (Sadly rustfmt cannot do this yet.) + // tidy-alphabetical-start match *self { AddrInUse => "address in use", AddrNotAvailable => "address not available", @@ -432,6 +431,7 @@ impl ErrorKind { WouldBlock => "operation would block", WriteZero => "write zero", } + // tidy-alphabetical-end } } @@ -482,6 +482,7 @@ impl Error { /// originate from the OS itself. The `error` argument is an arbitrary /// payload which will be contained in this [`Error`]. /// + /// Note that this function allocates memory on the heap. /// If no extra payload is required, use the `From` conversion from /// `ErrorKind`. /// @@ -496,7 +497,7 @@ impl Error { /// // errors can also be created from other errors /// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error); /// - /// // creating an error without payload + /// // creating an error without payload (and without memory allocation) /// let eof_error = Error::from(ErrorKind::UnexpectedEof); /// ``` #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs index c897a5e87..16c634e9a 100644 --- a/library/std/src/io/error/tests.rs +++ b/library/std/src/io/error/tests.rs @@ -86,7 +86,7 @@ fn test_errorkind_packing() { assert_eq!(Error::from(ErrorKind::NotFound).kind(), ErrorKind::NotFound); assert_eq!(Error::from(ErrorKind::PermissionDenied).kind(), ErrorKind::PermissionDenied); assert_eq!(Error::from(ErrorKind::Uncategorized).kind(), ErrorKind::Uncategorized); - // Check that the innards look like like what we want. + // Check that the innards look like what we want. assert_matches!( Error::from(ErrorKind::OutOfMemory).repr.data(), ErrorData::Simple(ErrorKind::OutOfMemory), diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index eeace2c43..23a13523f 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -262,9 +262,12 @@ use crate::sys_common::memchr; #[stable(feature = "bufwriter_into_parts", since = "1.56.0")] pub use self::buffered::WriterPanicked; +pub(crate) use self::stdio::attempt_print_to_stderr; #[unstable(feature = "internal_output_capture", issue = "none")] #[doc(no_inline, hidden)] pub use self::stdio::set_output_capture; +#[unstable(feature = "is_terminal", issue = "98070")] +pub use self::stdio::IsTerminal; #[unstable(feature = "print_internals", issue = "none")] pub use self::stdio::{_eprint, _print}; #[stable(feature = "rust1", since = "1.0.0")] @@ -580,7 +583,7 @@ pub trait Read { /// `n > buf.len()`. /// /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the + /// function is called, so implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that *implementations* /// only write data to `buf` instead of reading its contents. /// @@ -756,7 +759,7 @@ pub trait Read { /// specified buffer `buf`. /// /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the + /// function is called, so implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that implementations /// only write data to `buf` instead of reading its contents. The /// documentation on [`read`] has a more detailed explanation on this diff --git a/library/std/src/io/readbuf.rs b/library/std/src/io/readbuf.rs index b1a84095f..4800eeda0 100644 --- a/library/std/src/io/readbuf.rs +++ b/library/std/src/io/readbuf.rs @@ -3,10 +3,10 @@ #[cfg(test)] mod tests; -use crate::cmp; use crate::fmt::{self, Debug, Formatter}; use crate::io::{Result, Write}; use crate::mem::{self, MaybeUninit}; +use crate::{cmp, ptr}; /// A borrowed byte buffer which is incrementally filled and initialized. /// @@ -250,8 +250,11 @@ impl<'a> BorrowedCursor<'a> { /// Initializes all bytes in the cursor. #[inline] pub fn ensure_init(&mut self) -> &mut Self { - for byte in self.uninit_mut() { - byte.write(0); + let uninit = self.uninit_mut(); + // SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation + // since it is comes from a slice reference. + unsafe { + ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len()); } self.buf.init = self.buf.capacity(); diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 2dc12a18a..1141a957d 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -7,6 +7,7 @@ use crate::io::prelude::*; use crate::cell::{Cell, RefCell}; use crate::fmt; +use crate::fs::File; use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines}; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::{Arc, Mutex, MutexGuard, OnceLock}; @@ -999,7 +1000,18 @@ fn print_to<T>(args: fmt::Arguments<'_>, global_s: fn() -> T, label: &str) where T: Write, { - if OUTPUT_CAPTURE_USED.load(Ordering::Relaxed) + if print_to_buffer_if_capture_used(args) { + // Successfully wrote to capture buffer. + return; + } + + if let Err(e) = global_s().write_fmt(args) { + panic!("failed printing to {label}: {e}"); + } +} + +fn print_to_buffer_if_capture_used(args: fmt::Arguments<'_>) -> bool { + OUTPUT_CAPTURE_USED.load(Ordering::Relaxed) && OUTPUT_CAPTURE.try_with(|s| { // Note that we completely remove a local sink to write to in case // our printing recursively panics/prints, so the recursive @@ -1009,16 +1021,49 @@ where s.set(Some(w)); }) }) == Ok(Some(())) - { - // Successfully wrote to capture buffer. +} + +/// Used by impl Termination for Result to print error after `main` or a test +/// has returned. Should avoid panicking, although we can't help it if one of +/// the Display impls inside args decides to. +pub(crate) fn attempt_print_to_stderr(args: fmt::Arguments<'_>) { + if print_to_buffer_if_capture_used(args) { return; } - if let Err(e) = global_s().write_fmt(args) { - panic!("failed printing to {label}: {e}"); - } + // Ignore error if the write fails, for example because stderr is already + // closed. There is not much point panicking at this point. + let _ = stderr().write_fmt(args); } +/// Trait to determine if a descriptor/handle refers to a terminal/tty. +#[unstable(feature = "is_terminal", issue = "98070")] +pub trait IsTerminal: crate::sealed::Sealed { + /// Returns `true` if the descriptor/handle refers to a terminal/tty. + /// + /// On platforms where Rust does not know how to detect a terminal yet, this will return + /// `false`. This will also return `false` if an unexpected error occurred, such as from + /// passing an invalid file descriptor. + fn is_terminal(&self) -> bool; +} + +macro_rules! impl_is_terminal { + ($($t:ty),*$(,)?) => {$( + #[unstable(feature = "sealed", issue = "none")] + impl crate::sealed::Sealed for $t {} + + #[unstable(feature = "is_terminal", issue = "98070")] + impl IsTerminal for $t { + #[inline] + fn is_terminal(&self) -> bool { + crate::sys::io::is_terminal(self) + } + } + )*} +} + +impl_is_terminal!(File, Stdin, StdinLock<'_>, Stdout, StdoutLock<'_>, Stderr, StderrLock<'_>); + #[unstable( feature = "print_internals", reason = "implementation detail which may disappear or be replaced at any time", diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs index a4b0522b0..e35145c4a 100644 --- a/library/std/src/keyword_docs.rs +++ b/library/std/src/keyword_docs.rs @@ -1867,11 +1867,15 @@ mod type_keyword {} /// Code or interfaces whose [memory safety] cannot be verified by the type /// system. /// -/// The `unsafe` keyword has two uses: to declare the existence of contracts the -/// compiler can't check (`unsafe fn` and `unsafe trait`), and to declare that a -/// programmer has checked that these contracts have been upheld (`unsafe {}` -/// and `unsafe impl`, but also `unsafe fn` -- see below). They are not mutually -/// exclusive, as can be seen in `unsafe fn`. +/// The `unsafe` keyword has two uses: +/// - to declare the existence of contracts the compiler can't check (`unsafe fn` and `unsafe +/// trait`), +/// - and to declare that a programmer has checked that these contracts have been upheld (`unsafe +/// {}` and `unsafe impl`, but also `unsafe fn` -- see below). +/// +/// They are not mutually exclusive, as can be seen in `unsafe fn`: the body of an `unsafe fn` is, +/// by default, treated like an unsafe block. The `unsafe_op_in_unsafe_fn` lint can be enabled to +/// change that. /// /// # Unsafe abilities /// @@ -1914,12 +1918,12 @@ mod type_keyword {} /// - `unsafe impl`: the contract necessary to implement the trait has been /// checked by the programmer and is guaranteed to be respected. /// -/// `unsafe fn` also acts like an `unsafe {}` block +/// By default, `unsafe fn` also acts like an `unsafe {}` block /// around the code inside the function. This means it is not just a signal to /// the caller, but also promises that the preconditions for the operations -/// inside the function are upheld. Mixing these two meanings can be confusing -/// and [proposal]s exist to use `unsafe {}` blocks inside such functions when -/// making `unsafe` operations. +/// inside the function are upheld. Mixing these two meanings can be confusing, so the +/// `unsafe_op_in_unsafe_fn` lint can be enabled to warn against that and require explicit unsafe +/// blocks even inside `unsafe fn`. /// /// See the [Rustnomicon] and the [Reference] for more information. /// @@ -1987,13 +1991,16 @@ mod type_keyword {} /// /// ```rust /// # #![allow(dead_code)] +/// #![deny(unsafe_op_in_unsafe_fn)] +/// /// /// Dereference the given pointer. /// /// /// /// # Safety /// /// /// /// `ptr` must be aligned and must not be dangling. /// unsafe fn deref_unchecked(ptr: *const i32) -> i32 { -/// *ptr +/// // SAFETY: the caller is required to ensure that `ptr` is aligned and dereferenceable. +/// unsafe { *ptr } /// } /// /// let a = 3; @@ -2003,35 +2010,118 @@ mod type_keyword {} /// unsafe { assert_eq!(*b, deref_unchecked(b)); }; /// ``` /// -/// Traits marked as `unsafe` must be [`impl`]emented using `unsafe impl`. This -/// makes a guarantee to other `unsafe` code that the implementation satisfies -/// the trait's safety contract. The [Send] and [Sync] traits are examples of -/// this behaviour in the standard library. +/// ## `unsafe` and traits +/// +/// The interactions of `unsafe` and traits can be surprising, so let us contrast the +/// two combinations of safe `fn` in `unsafe trait` and `unsafe fn` in safe trait using two +/// examples: +/// +/// ```rust +/// /// # Safety +/// /// +/// /// `make_even` must return an even number. +/// unsafe trait MakeEven { +/// fn make_even(&self) -> i32; +/// } +/// +/// // SAFETY: Our `make_even` always returns something even. +/// unsafe impl MakeEven for i32 { +/// fn make_even(&self) -> i32 { +/// self << 1 +/// } +/// } +/// +/// fn use_make_even(x: impl MakeEven) { +/// if x.make_even() % 2 == 1 { +/// // SAFETY: this can never happen, because all `MakeEven` implementations +/// // ensure that `make_even` returns something even. +/// unsafe { std::hint::unreachable_unchecked() }; +/// } +/// } +/// ``` +/// +/// Note how the safety contract of the trait is upheld by the implementation, and is itself used to +/// uphold the safety contract of the unsafe function `unreachable_unchecked` called by +/// `use_make_even`. `make_even` itself is a safe function because its *callers* do not have to +/// worry about any contract, only the *implementation* of `MakeEven` is required to uphold a +/// certain contract. `use_make_even` is safe because it can use the promise made by `MakeEven` +/// implementations to uphold the safety contract of the `unsafe fn unreachable_unchecked` it calls. +/// +/// It is also possible to have `unsafe fn` in a regular safe `trait`: /// /// ```rust -/// /// Implementors of this trait must guarantee an element is always -/// /// accessible with index 3. -/// unsafe trait ThreeIndexable<T> { -/// /// Returns a reference to the element with index 3 in `&self`. -/// fn three(&self) -> &T; +/// # #![feature(never_type)] +/// #![deny(unsafe_op_in_unsafe_fn)] +/// +/// trait Indexable { +/// const LEN: usize; +/// +/// /// # Safety +/// /// +/// /// The caller must ensure that `idx < LEN`. +/// unsafe fn idx_unchecked(&self, idx: usize) -> i32; /// } /// -/// // The implementation of `ThreeIndexable` for `[T; 4]` is `unsafe` -/// // because the implementor must abide by a contract the compiler cannot -/// // check but as a programmer we know there will always be a valid element -/// // at index 3 to access. -/// unsafe impl<T> ThreeIndexable<T> for [T; 4] { -/// fn three(&self) -> &T { -/// // SAFETY: implementing the trait means there always is an element -/// // with index 3 accessible. -/// unsafe { self.get_unchecked(3) } +/// // The implementation for `i32` doesn't need to do any contract reasoning. +/// impl Indexable for i32 { +/// const LEN: usize = 1; +/// +/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 { +/// debug_assert_eq!(idx, 0); +/// *self /// } /// } /// -/// let a = [1, 2, 4, 8]; -/// assert_eq!(a.three(), &8); +/// // The implementation for arrays exploits the function contract to +/// // make use of `get_unchecked` on slices and avoid a run-time check. +/// impl Indexable for [i32; 42] { +/// const LEN: usize = 42; +/// +/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 { +/// // SAFETY: As per this trait's documentation, the caller ensures +/// // that `idx < 42`. +/// unsafe { *self.get_unchecked(idx) } +/// } +/// } +/// +/// // The implementation for the never type declares a length of 0, +/// // which means `idx_unchecked` can never be called. +/// impl Indexable for ! { +/// const LEN: usize = 0; +/// +/// unsafe fn idx_unchecked(&self, idx: usize) -> i32 { +/// // SAFETY: As per this trait's documentation, the caller ensures +/// // that `idx < 0`, which is impossible, so this is dead code. +/// unsafe { std::hint::unreachable_unchecked() } +/// } +/// } +/// +/// fn use_indexable<I: Indexable>(x: I, idx: usize) -> i32 { +/// if idx < I::LEN { +/// // SAFETY: We have checked that `idx < I::LEN`. +/// unsafe { x.idx_unchecked(idx) } +/// } else { +/// panic!("index out-of-bounds") +/// } +/// } /// ``` /// +/// This time, `use_indexable` is safe because it uses a run-time check to discharge the safety +/// contract of `idx_unchecked`. Implementing `Indexable` is safe because when writing +/// `idx_unchecked`, we don't have to worry: our *callers* need to discharge a proof obligation +/// (like `use_indexable` does), but the *implementation* of `get_unchecked` has no proof obligation +/// to contend with. Of course, the implementation of `Indexable` may choose to call other unsafe +/// operations, and then it needs an `unsafe` *block* to indicate it discharged the proof +/// obligations of its callees. (We enabled `unsafe_op_in_unsafe_fn`, so the body of `idx_unchecked` +/// is not implicitly an unsafe block.) For that purpose it can make use of the contract that all +/// its callers must uphold -- the fact that `idx < LEN`. +/// +/// Formally speaking, an `unsafe fn` in a trait is a function with *preconditions* that go beyond +/// those encoded by the argument types (such as `idx < LEN`), whereas an `unsafe trait` can declare +/// that some of its functions have *postconditions* that go beyond those encoded in the return type +/// (such as returning an even integer). If a trait needs a function with both extra precondition +/// and extra postcondition, then it needs an `unsafe fn` in an `unsafe trait`. +/// /// [`extern`]: keyword.extern.html /// [`trait`]: keyword.trait.html /// [`static`]: keyword.static.html @@ -2043,7 +2133,6 @@ mod type_keyword {} /// [nomicon-soundness]: ../nomicon/safe-unsafe-meaning.html /// [soundness]: https://rust-lang.github.io/unsafe-code-guidelines/glossary.html#soundness-of-code--of-a-library /// [Reference]: ../reference/unsafety.html -/// [proposal]: https://github.com/rust-lang/rfcs/pull/2585 /// [discussion on Rust Internals]: https://internals.rust-lang.org/t/what-does-unsafe-mean/6696 mod unsafe_keyword {} diff --git a/library/std/src/lazy.rs b/library/std/src/lazy.rs deleted file mode 100644 index f8c06c3f9..000000000 --- a/library/std/src/lazy.rs +++ /dev/null @@ -1 +0,0 @@ -//! Lazy values and one-time initialization of static data. diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index bc4f1b27c..385585dad 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -145,8 +145,8 @@ //! abstracting over differences in common platforms, most notably Windows and //! Unix derivatives. //! -//! Common types of I/O, including [files], [TCP], [UDP], are defined in the -//! [`io`], [`fs`], and [`net`] modules. +//! Common types of I/O, including [files], [TCP], and [UDP], are defined in +//! the [`io`], [`fs`], and [`net`] modules. //! //! The [`thread`] module contains Rust's threading abstractions. [`sync`] //! contains further primitive shared memory types, including [`atomic`] and @@ -251,11 +251,11 @@ #![feature(doc_notable_trait)] #![feature(dropck_eyepatch)] #![feature(exhaustive_patterns)] +#![feature(if_let_guard)] #![feature(intra_doc_pointers)] -#![cfg_attr(bootstrap, feature(label_break_value))] +#![feature(is_terminal)] #![feature(lang_items)] #![feature(let_chains)] -#![cfg_attr(bootstrap, feature(let_else))] #![feature(linkage)] #![feature(link_cfg)] #![feature(min_specialization)] @@ -280,11 +280,10 @@ #![feature(core_intrinsics)] #![feature(cstr_from_bytes_until_nul)] #![feature(cstr_internals)] -#![feature(duration_checked_float)] #![feature(duration_constants)] -#![cfg_attr(not(bootstrap), feature(error_generic_member_access))] -#![cfg_attr(not(bootstrap), feature(error_in_core))] -#![cfg_attr(not(bootstrap), feature(error_iter))] +#![feature(error_generic_member_access)] +#![feature(error_in_core)] +#![feature(error_iter)] #![feature(exact_size_is_empty)] #![feature(exclusive_wrapper)] #![feature(extend_one)] @@ -293,10 +292,9 @@ #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] #![feature(int_error_internals)] -#![feature(is_some_with)] +#![feature(is_some_and)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_write_slice)] -#![feature(mixed_integer_ops)] #![feature(nonnull_slice_from_raw_parts)] #![feature(panic_can_unwind)] #![feature(panic_info_message)] @@ -315,6 +313,7 @@ #![feature(strict_provenance)] #![feature(maybe_uninit_uninit_array)] #![feature(const_maybe_uninit_uninit_array)] +#![feature(const_waker)] // // Library features (alloc): #![feature(alloc_layout_extra)] @@ -350,9 +349,9 @@ #![feature(trace_macros)] // // Only used in tests/benchmarks: -#![feature(bench_black_box)] // // Only for const-ness: +#![feature(const_collections_with_hasher)] #![feature(const_io_structs)] #![feature(const_ip)] #![feature(const_ipv4)] @@ -530,9 +529,6 @@ pub mod process; pub mod sync; pub mod time; -#[unstable(feature = "once_cell", issue = "74465")] -pub mod lazy; - // Pull in `std_float` crate into libstd. The contents of // `std_float` are in a different repository: rust-lang/portable-simd. #[path = "../../portable-simd/crates/std_float/src/lib.rs"] diff --git a/library/std/src/os/fd/mod.rs b/library/std/src/os/fd/mod.rs index a45694753..c6aa7c77d 100644 --- a/library/std/src/os/fd/mod.rs +++ b/library/std/src/os/fd/mod.rs @@ -1,16 +1,25 @@ //! Owned and borrowed Unix-like file descriptors. +//! +//! This module is supported on Unix platforms and WASI, which both use a +//! similar file descriptor system for referencing OS resources. #![stable(feature = "io_safety", since = "1.63.0")] #![deny(unsafe_op_in_unsafe_fn)] // `RawFd`, `AsRawFd`, etc. -pub mod raw; +mod raw; // `OwnedFd`, `AsFd`, etc. -pub mod owned; +mod owned; // Implementations for `AsRawFd` etc. for network types. mod net; #[cfg(test)] mod tests; + +// Export the types and traits for the public API. +#[unstable(feature = "os_fd", issue = "98699")] +pub use owned::*; +#[unstable(feature = "os_fd", issue = "98699")] +pub use raw::*; diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs index 71e33fb9e..c16518577 100644 --- a/library/std/src/os/fd/owned.rs +++ b/library/std/src/os/fd/owned.rs @@ -6,6 +6,7 @@ use super::raw::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use crate::fmt; use crate::fs; +use crate::io; use crate::marker::PhantomData; use crate::mem::forget; #[cfg(not(any(target_arch = "wasm32", target_env = "sgx")))] @@ -192,6 +193,23 @@ impl fmt::Debug for OwnedFd { } } +macro_rules! impl_is_terminal { + ($($t:ty),*$(,)?) => {$( + #[unstable(feature = "sealed", issue = "none")] + impl crate::sealed::Sealed for $t {} + + #[unstable(feature = "is_terminal", issue = "98070")] + impl crate::io::IsTerminal for $t { + #[inline] + fn is_terminal(&self) -> bool { + crate::sys::io::is_terminal(self) + } + } + )*} +} + +impl_is_terminal!(BorrowedFd<'_>, OwnedFd); + /// A trait to borrow the file descriptor from an underlying object. /// /// This is only available on unix platforms and must be imported in order to @@ -206,10 +224,8 @@ pub trait AsFd { /// ```rust,no_run /// use std::fs::File; /// # use std::io; - /// # #[cfg(target_os = "wasi")] - /// # use std::os::wasi::io::{AsFd, BorrowedFd}; - /// # #[cfg(unix)] - /// # use std::os::unix::io::{AsFd, BorrowedFd}; + /// # #[cfg(any(unix, target_os = "wasi"))] + /// # use std::os::fd::{AsFd, BorrowedFd}; /// /// let mut f = File::open("foo.txt")?; /// # #[cfg(any(unix, target_os = "wasi"))] @@ -387,3 +403,54 @@ impl<T: AsFd> AsFd for Box<T> { (**self).as_fd() } } + +#[stable(feature = "io_safety", since = "1.63.0")] +impl AsFd for io::Stdin { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + unsafe { BorrowedFd::borrow_raw(0) } + } +} + +#[stable(feature = "io_safety", since = "1.63.0")] +impl<'a> AsFd for io::StdinLock<'a> { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + // SAFETY: user code should not close stdin out from under the standard library + unsafe { BorrowedFd::borrow_raw(0) } + } +} + +#[stable(feature = "io_safety", since = "1.63.0")] +impl AsFd for io::Stdout { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + unsafe { BorrowedFd::borrow_raw(1) } + } +} + +#[stable(feature = "io_safety", since = "1.63.0")] +impl<'a> AsFd for io::StdoutLock<'a> { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + // SAFETY: user code should not close stdout out from under the standard library + unsafe { BorrowedFd::borrow_raw(1) } + } +} + +#[stable(feature = "io_safety", since = "1.63.0")] +impl AsFd for io::Stderr { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + unsafe { BorrowedFd::borrow_raw(2) } + } +} + +#[stable(feature = "io_safety", since = "1.63.0")] +impl<'a> AsFd for io::StderrLock<'a> { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + // SAFETY: user code should not close stderr out from under the standard library + unsafe { BorrowedFd::borrow_raw(2) } + } +} diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs index 1b3d11042..f92a05066 100644 --- a/library/std/src/os/fd/raw.rs +++ b/library/std/src/os/fd/raw.rs @@ -42,10 +42,8 @@ pub trait AsRawFd { /// ```no_run /// use std::fs::File; /// # use std::io; - /// #[cfg(unix)] - /// use std::os::unix::io::{AsRawFd, RawFd}; - /// #[cfg(target_os = "wasi")] - /// use std::os::wasi::io::{AsRawFd, RawFd}; + /// #[cfg(any(unix, target_os = "wasi"))] + /// use std::os::fd::{AsRawFd, RawFd}; /// /// let mut f = File::open("foo.txt")?; /// // Note that `raw_fd` is only valid as long as `f` exists. @@ -83,10 +81,8 @@ pub trait FromRawFd { /// ```no_run /// use std::fs::File; /// # use std::io; - /// #[cfg(unix)] - /// use std::os::unix::io::{FromRawFd, IntoRawFd, RawFd}; - /// #[cfg(target_os = "wasi")] - /// use std::os::wasi::io::{FromRawFd, IntoRawFd, RawFd}; + /// #[cfg(any(unix, target_os = "wasi"))] + /// use std::os::fd::{FromRawFd, IntoRawFd, RawFd}; /// /// let f = File::open("foo.txt")?; /// # #[cfg(any(unix, target_os = "wasi"))] @@ -121,10 +117,8 @@ pub trait IntoRawFd { /// ```no_run /// use std::fs::File; /// # use std::io; - /// #[cfg(unix)] - /// use std::os::unix::io::{IntoRawFd, RawFd}; - /// #[cfg(target_os = "wasi")] - /// use std::os::wasi::io::{IntoRawFd, RawFd}; + /// #[cfg(any(unix, target_os = "wasi"))] + /// use std::os::fd::{IntoRawFd, RawFd}; /// /// let f = File::open("foo.txt")?; /// #[cfg(any(unix, target_os = "wasi"))] diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs index 18c64b510..42773805c 100644 --- a/library/std/src/os/mod.rs +++ b/library/std/src/os/mod.rs @@ -145,9 +145,11 @@ pub mod solaris; pub mod solid; #[cfg(target_os = "vxworks")] pub mod vxworks; +#[cfg(target_os = "watchos")] +pub(crate) mod watchos; #[cfg(any(unix, target_os = "wasi", doc))] -mod fd; +pub mod fd; #[cfg(any(target_os = "linux", target_os = "android", doc))] mod net; diff --git a/library/std/src/os/unix/io/fd.rs b/library/std/src/os/unix/io/fd.rs deleted file mode 100644 index d4cb69645..000000000 --- a/library/std/src/os/unix/io/fd.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Owned and borrowed file descriptors. - -// Tests for this module -#[cfg(test)] -mod tests; - -#[stable(feature = "io_safety", since = "1.63.0")] -pub use crate::os::fd::owned::*; diff --git a/library/std/src/os/unix/io/mod.rs b/library/std/src/os/unix/io/mod.rs index 3ab5606f8..25b5dbff1 100644 --- a/library/std/src/os/unix/io/mod.rs +++ b/library/std/src/os/unix/io/mod.rs @@ -77,10 +77,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -mod fd; -mod raw; - -#[stable(feature = "io_safety", since = "1.63.0")] -pub use fd::*; #[stable(feature = "rust1", since = "1.0.0")] -pub use raw::*; +pub use crate::os::fd::*; + +// Tests for this module +#[cfg(test)] +mod tests; diff --git a/library/std/src/os/unix/io/raw.rs b/library/std/src/os/unix/io/raw.rs deleted file mode 100644 index a4d2ba797..000000000 --- a/library/std/src/os/unix/io/raw.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Unix-specific extensions to general I/O primitives. - -#![stable(feature = "rust1", since = "1.0.0")] - -#[stable(feature = "rust1", since = "1.0.0")] -pub use crate::os::fd::raw::*; diff --git a/library/std/src/os/unix/io/fd/tests.rs b/library/std/src/os/unix/io/tests.rs index 84d2a7a1a..84d2a7a1a 100644 --- a/library/std/src/os/unix/io/fd/tests.rs +++ b/library/std/src/os/unix/io/tests.rs diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs index 411cc0925..f97fa0fb0 100644 --- a/library/std/src/os/unix/mod.rs +++ b/library/std/src/os/unix/mod.rs @@ -73,6 +73,8 @@ mod platform { pub use crate::os::solaris::*; #[cfg(target_os = "vxworks")] pub use crate::os::vxworks::*; + #[cfg(target_os = "watchos")] + pub use crate::os::watchos::*; } pub mod ffi; diff --git a/library/std/src/os/wasi/io/fd.rs b/library/std/src/os/wasi/io/fd.rs index 75703af6a..930aca887 100644 --- a/library/std/src/os/wasi/io/fd.rs +++ b/library/std/src/os/wasi/io/fd.rs @@ -1,10 +1,9 @@ //! Owned and borrowed file descriptors. -#![stable(feature = "io_safety_wasi", since = "1.65.0")] +#![unstable(feature = "wasi_ext", issue = "71213")] // Tests for this module #[cfg(test)] mod tests; -#[stable(feature = "io_safety_wasi", since = "1.65.0")] pub use crate::os::fd::owned::*; diff --git a/library/std/src/os/wasi/io/mod.rs b/library/std/src/os/wasi/io/mod.rs index 4f5cfbf9a..57bd842a5 100644 --- a/library/std/src/os/wasi/io/mod.rs +++ b/library/std/src/os/wasi/io/mod.rs @@ -1,12 +1,6 @@ //! WASI-specific extensions to general I/O primitives. -#![deny(unsafe_op_in_unsafe_fn)] -#![stable(feature = "io_safety_wasi", since = "1.65.0")] +#![stable(feature = "io_safety", since = "1.63.0")] -mod fd; -mod raw; - -#[stable(feature = "io_safety_wasi", since = "1.65.0")] -pub use fd::*; -#[stable(feature = "io_safety_wasi", since = "1.65.0")] -pub use raw::*; +#[stable(feature = "io_safety", since = "1.63.0")] +pub use crate::os::fd::*; diff --git a/library/std/src/os/wasi/io/raw.rs b/library/std/src/os/wasi/io/raw.rs index 4ac792ee8..da3b36ada 100644 --- a/library/std/src/os/wasi/io/raw.rs +++ b/library/std/src/os/wasi/io/raw.rs @@ -1,6 +1,20 @@ //! WASI-specific extensions to general I/O primitives. -#![stable(feature = "io_safety_wasi", since = "1.65.0")] +#![unstable(feature = "wasi_ext", issue = "71213")] -#[stable(feature = "io_safety_wasi", since = "1.65.0")] +// NOTE: despite the fact that this module is unstable, +// stable Rust had the capability to access the stable +// re-exported items from os::fd::raw through this +// unstable module. +// In PR #95956 the stability checker was changed to check +// all path segments of an item rather than just the last, +// which caused the aforementioned stable usage to regress +// (see issue #99502). +// As a result, the items in os::fd::raw were given the +// rustc_allowed_through_unstable_modules attribute. +// No regression tests were added to ensure this property, +// as CI is not configured to test wasm32-wasi. +// If this module is stabilized, +// you may want to remove those attributes +// (assuming no other unstable modules need them). pub use crate::os::fd::raw::*; diff --git a/library/std/src/os/watchos/fs.rs b/library/std/src/os/watchos/fs.rs new file mode 100644 index 000000000..a14fe35a7 --- /dev/null +++ b/library/std/src/os/watchos/fs.rs @@ -0,0 +1,142 @@ +#![stable(feature = "metadata_ext", since = "1.1.0")] + +use crate::fs::Metadata; +use crate::sys_common::AsInner; + +#[allow(deprecated)] +use crate::os::watchos::raw; + +/// OS-specific extensions to [`fs::Metadata`]. +/// +/// [`fs::Metadata`]: crate::fs::Metadata +#[stable(feature = "metadata_ext", since = "1.1.0")] +pub trait MetadataExt { + /// Gain a reference to the underlying `stat` structure which contains + /// the raw information returned by the OS. + /// + /// The contents of the returned `stat` are **not** consistent across + /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the + /// cross-Unix abstractions contained within the raw stat. + #[stable(feature = "metadata_ext", since = "1.1.0")] + #[deprecated( + since = "1.8.0", + note = "deprecated in favor of the accessor \ + methods of this trait" + )] + #[allow(deprecated)] + fn as_raw_stat(&self) -> &raw::stat; + + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_dev(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ino(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mode(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_nlink(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_uid(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_gid(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_rdev(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_size(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_atime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_atime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mtime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mtime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ctime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ctime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_birthtime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_birthtime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_blksize(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_blocks(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_flags(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_gen(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_lspare(&self) -> u32; +} + +#[stable(feature = "metadata_ext", since = "1.1.0")] +impl MetadataExt for Metadata { + #[allow(deprecated)] + fn as_raw_stat(&self) -> &raw::stat { + unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) } + } + fn st_dev(&self) -> u64 { + self.as_inner().as_inner().st_dev as u64 + } + fn st_ino(&self) -> u64 { + self.as_inner().as_inner().st_ino as u64 + } + fn st_mode(&self) -> u32 { + self.as_inner().as_inner().st_mode as u32 + } + fn st_nlink(&self) -> u64 { + self.as_inner().as_inner().st_nlink as u64 + } + fn st_uid(&self) -> u32 { + self.as_inner().as_inner().st_uid as u32 + } + fn st_gid(&self) -> u32 { + self.as_inner().as_inner().st_gid as u32 + } + fn st_rdev(&self) -> u64 { + self.as_inner().as_inner().st_rdev as u64 + } + fn st_size(&self) -> u64 { + self.as_inner().as_inner().st_size as u64 + } + fn st_atime(&self) -> i64 { + self.as_inner().as_inner().st_atime as i64 + } + fn st_atime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_atime_nsec as i64 + } + fn st_mtime(&self) -> i64 { + self.as_inner().as_inner().st_mtime as i64 + } + fn st_mtime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_mtime_nsec as i64 + } + fn st_ctime(&self) -> i64 { + self.as_inner().as_inner().st_ctime as i64 + } + fn st_ctime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_ctime_nsec as i64 + } + fn st_birthtime(&self) -> i64 { + self.as_inner().as_inner().st_birthtime as i64 + } + fn st_birthtime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_birthtime_nsec as i64 + } + fn st_blksize(&self) -> u64 { + self.as_inner().as_inner().st_blksize as u64 + } + fn st_blocks(&self) -> u64 { + self.as_inner().as_inner().st_blocks as u64 + } + fn st_gen(&self) -> u32 { + self.as_inner().as_inner().st_gen as u32 + } + fn st_flags(&self) -> u32 { + self.as_inner().as_inner().st_flags as u32 + } + fn st_lspare(&self) -> u32 { + self.as_inner().as_inner().st_lspare as u32 + } +} diff --git a/library/std/src/os/watchos/mod.rs b/library/std/src/os/watchos/mod.rs new file mode 100644 index 000000000..cd6454ebb --- /dev/null +++ b/library/std/src/os/watchos/mod.rs @@ -0,0 +1,6 @@ +//! watchOS-specific definitions + +#![stable(feature = "raw_ext", since = "1.1.0")] + +pub mod fs; +pub mod raw; diff --git a/library/std/src/os/watchos/raw.rs b/library/std/src/os/watchos/raw.rs new file mode 100644 index 000000000..630a533d9 --- /dev/null +++ b/library/std/src/os/watchos/raw.rs @@ -0,0 +1,83 @@ +//! watchOS-specific raw type definitions + +#![stable(feature = "raw_ext", since = "1.1.0")] +#![deprecated( + since = "1.8.0", + note = "these type aliases are no longer supported by \ + the standard library, the `libc` crate on \ + crates.io should be used instead for the correct \ + definitions" +)] +#![allow(deprecated)] + +use crate::os::raw::c_long; + +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type blkcnt_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type blksize_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type dev_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type ino_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type mode_t = u32; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type nlink_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type off_t = u64; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type time_t = i64; + +#[stable(feature = "pthread_t", since = "1.8.0")] +pub type pthread_t = usize; + +#[repr(C)] +#[derive(Clone)] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub struct stat { + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_dev: i32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_mode: u16, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_nlink: u16, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_ino: u64, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_uid: u32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_gid: u32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_rdev: i32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_atime: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_atime_nsec: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_mtime: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_mtime_nsec: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_ctime: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_ctime_nsec: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_birthtime: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_birthtime_nsec: c_long, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_size: i64, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_blocks: i64, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_blksize: i32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_flags: u32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_gen: u32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_lspare: i32, + #[stable(feature = "raw_ext", since = "1.1.0")] + pub st_qspare: [i64; 2], +} diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs index 16cc8fa27..1dfecc573 100644 --- a/library/std/src/os/windows/io/handle.rs +++ b/library/std/src/os/windows/io/handle.rs @@ -384,6 +384,23 @@ impl fmt::Debug for OwnedHandle { } } +macro_rules! impl_is_terminal { + ($($t:ty),*$(,)?) => {$( + #[unstable(feature = "sealed", issue = "none")] + impl crate::sealed::Sealed for $t {} + + #[unstable(feature = "is_terminal", issue = "98070")] + impl crate::io::IsTerminal for $t { + #[inline] + fn is_terminal(&self) -> bool { + crate::sys::io::is_terminal(self) + } + } + )*} +} + +impl_is_terminal!(BorrowedHandle<'_>, OwnedHandle); + /// A trait to borrow the handle from an underlying object. #[stable(feature = "io_safety", since = "1.63.0")] pub trait AsHandle { diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 25c9201f2..d4976a469 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -18,9 +18,9 @@ use crate::intrinsics; use crate::mem::{self, ManuallyDrop}; use crate::process; use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::{PoisonError, RwLock}; use crate::sys::stdio::panic_output; use crate::sys_common::backtrace; -use crate::sys_common::rwlock::StaticRwLock; use crate::sys_common::thread_info; use crate::thread; @@ -71,20 +71,29 @@ extern "C" fn __rust_foreign_exception() -> ! { rtabort!("Rust cannot catch foreign exceptions"); } -#[derive(Copy, Clone)] enum Hook { Default, - Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)), + Custom(Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>), } impl Hook { - fn custom(f: impl Fn(&PanicInfo<'_>) + 'static + Sync + Send) -> Self { - Self::Custom(Box::into_raw(Box::new(f))) + #[inline] + fn into_box(self) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> { + match self { + Hook::Default => Box::new(default_hook), + Hook::Custom(hook) => hook, + } + } +} + +impl Default for Hook { + #[inline] + fn default() -> Hook { + Hook::Default } } -static HOOK_LOCK: StaticRwLock = StaticRwLock::new(); -static mut HOOK: Hook = Hook::Default; +static HOOK: RwLock<Hook> = RwLock::new(Hook::Default); /// Registers a custom panic hook, replacing any that was previously registered. /// @@ -125,24 +134,13 @@ pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) { panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let old_hook = HOOK; - HOOK = Hook::Custom(Box::into_raw(hook)); - drop(guard); - - if let Hook::Custom(ptr) = old_hook { - #[allow(unused_must_use)] - { - Box::from_raw(ptr); - } - } - } + let new = Hook::Custom(hook); + let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner); + let old = mem::replace(&mut *hook, new); + drop(hook); + // Only drop the old hook after releasing the lock to avoid deadlocking + // if its destructor panics. + drop(old); } /// Unregisters the current panic hook, returning it. @@ -179,22 +177,11 @@ pub fn take_hook() -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> { panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let hook = HOOK; - HOOK = Hook::Default; - drop(guard); + let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner); + let old_hook = mem::take(&mut *hook); + drop(hook); - match hook { - Hook::Default => Box::new(default_hook), - Hook::Custom(ptr) => Box::from_raw(ptr), - } - } + old_hook.into_box() } /// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with @@ -240,24 +227,9 @@ where panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let old_hook = HOOK; - HOOK = Hook::Default; - - let prev = match old_hook { - Hook::Default => Box::new(default_hook), - Hook::Custom(ptr) => Box::from_raw(ptr), - }; - - HOOK = Hook::custom(move |info| hook_fn(&prev, info)); - drop(guard); - } + let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner); + let prev = mem::take(&mut *hook).into_box(); + *hook = Hook::Custom(Box::new(move |info| hook_fn(&prev, info))); } fn default_hook(info: &PanicInfo<'_>) { @@ -328,7 +300,7 @@ pub mod panic_count { thread_local! { static LOCAL_PANIC_COUNT: Cell<usize> = const { Cell::new(0) } } // Sum of panic counts from all threads. The purpose of this is to have - // a fast path in `is_zero` (which is used by `panicking`). In any particular + // a fast path in `count_is_zero` (which is used by `panicking`). In any particular // thread, if that thread currently views `GLOBAL_PANIC_COUNT` as being zero, // then `LOCAL_PANIC_COUNT` in that thread is zero. This invariant holds before // and after increase and decrease, but not necessarily during their execution. @@ -336,6 +308,14 @@ pub mod panic_count { // Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG) // records whether panic::always_abort() has been called. This can only be // set, never cleared. + // panic::always_abort() is usually called to prevent memory allocations done by + // the panic handling in the child created by `libc::fork`. + // Memory allocations performed in a child created with `libc::fork` are undefined + // behavior in most operating systems. + // Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory + // allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is + // sufficient because a child process will always have exactly one thread only. + // See also #85261 for details. // // This could be viewed as a struct containing a single bit and an n-1-bit // value, but if we wrote it like that it would be more than a single word, @@ -346,15 +326,26 @@ pub mod panic_count { // panicking thread consumes at least 2 bytes of address space. static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0); + // Return the state of the ALWAYS_ABORT_FLAG and number of panics. + // + // If ALWAYS_ABORT_FLAG is not set, the number is determined on a per-thread + // base (stored in LOCAL_PANIC_COUNT), i.e. it is the amount of recursive calls + // of the calling thread. + // If ALWAYS_ABORT_FLAG is set, the number equals the *global* number of panic + // calls. See above why LOCAL_PANIC_COUNT is not used. pub fn increase() -> (bool, usize) { - ( - GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed) & ALWAYS_ABORT_FLAG != 0, + let global_count = GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed); + let must_abort = global_count & ALWAYS_ABORT_FLAG != 0; + let panics = if must_abort { + global_count & !ALWAYS_ABORT_FLAG + } else { LOCAL_PANIC_COUNT.with(|c| { let next = c.get() + 1; c.set(next); next - }), - ) + }) + }; + (must_abort, panics) } pub fn decrease() { @@ -397,7 +388,7 @@ pub mod panic_count { } // Slow path is in a separate function to reduce the amount of code - // inlined from `is_zero`. + // inlined from `count_is_zero`. #[inline(never)] #[cold] fn is_zero_slow_path() -> bool { @@ -682,27 +673,26 @@ fn rust_panic_with_hook( crate::sys::abort_internal(); } - unsafe { - let mut info = PanicInfo::internal_constructor(message, location, can_unwind); - let _guard = HOOK_LOCK.read(); - match HOOK { - // Some platforms (like wasm) know that printing to stderr won't ever actually - // print anything, and if that's the case we can skip the default - // hook. Since string formatting happens lazily when calling `payload` - // methods, this means we avoid formatting the string at all! - // (The panic runtime might still call `payload.take_box()` though and trigger - // formatting.) - Hook::Default if panic_output().is_none() => {} - Hook::Default => { - info.set_payload(payload.get()); - default_hook(&info); - } - Hook::Custom(ptr) => { - info.set_payload(payload.get()); - (*ptr)(&info); - } - }; - } + let mut info = PanicInfo::internal_constructor(message, location, can_unwind); + let hook = HOOK.read().unwrap_or_else(PoisonError::into_inner); + match *hook { + // Some platforms (like wasm) know that printing to stderr won't ever actually + // print anything, and if that's the case we can skip the default + // hook. Since string formatting happens lazily when calling `payload` + // methods, this means we avoid formatting the string at all! + // (The panic runtime might still call `payload.take_box()` though and trigger + // formatting.) + Hook::Default if panic_output().is_none() => {} + Hook::Default => { + info.set_payload(payload.get()); + default_hook(&info); + } + Hook::Custom(ref hook) => { + info.set_payload(payload.get()); + hook(&info); + } + }; + drop(hook); if panics > 1 || !can_unwind { // If a thread panics while it's already unwinding then we diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 5dfeb517a..9d6328162 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -2158,6 +2158,7 @@ impl Path { /// assert_eq!(grand_parent.parent(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[doc(alias = "dirname")] #[must_use] pub fn parent(&self) -> Option<&Path> { let mut comps = self.components(); @@ -2225,6 +2226,7 @@ impl Path { /// assert_eq!(None, Path::new("/").file_name()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[doc(alias = "basename")] #[must_use] pub fn file_name(&self) -> Option<&OsStr> { self.components().next_back().and_then(|p| match p { @@ -2401,7 +2403,7 @@ impl Path { self.file_name().map(split_file_at_dot).and_then(|(before, _after)| Some(before)) } - /// Extracts the extension of [`self.file_name`], if possible. + /// Extracts the extension (without the leading dot) of [`self.file_name`], if possible. /// /// The extension is: /// diff --git a/library/std/src/personality/dwarf/eh.rs b/library/std/src/personality/dwarf/eh.rs index 8799137b7..27b50c13b 100644 --- a/library/std/src/personality/dwarf/eh.rs +++ b/library/std/src/personality/dwarf/eh.rs @@ -98,9 +98,8 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result } } } - // Ip is not present in the table. This should not happen... but it does: issue #35011. - // So rather than returning EHAction::Terminate, we do this. - Ok(EHAction::None) + // Ip is not present in the table. This indicates a nounwind call. + Ok(EHAction::Terminate) } else { // SjLj version: // The "IP" is an index into the call-site table, with two exceptions: diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs index 242f44ade..331714a99 100644 --- a/library/std/src/primitive_docs.rs +++ b/library/std/src/primitive_docs.rs @@ -611,7 +611,19 @@ mod prim_pointer {} /// /// Arrays coerce to [slices (`[T]`)][slice], so a slice method may be called on /// an array. Indeed, this provides most of the API for working with arrays. -/// Slices have a dynamic size and do not coerce to arrays. +/// +/// Slices have a dynamic size and do not coerce to arrays. Instead, use +/// `slice.try_into().unwrap()` or `<ArrayType>::try_from(slice).unwrap()`. +/// +/// Array's `try_from(slice)` implementations (and the corresponding `slice.try_into()` +/// array implementations) succeed if the input slice length is the same as the result +/// array length. They optimize especially well when the optimizer can easily determine +/// the slice length, e.g. `<[u8; 4]>::try_from(&slice[4..8]).unwrap()`. Array implements +/// [TryFrom](crate::convert::TryFrom) returning: +/// +/// - `[T; N]` copies from the slice's elements +/// - `&[T; N]` references the original slice's elements +/// - `&mut [T; N]` references the original slice's elements /// /// You can move elements out of an array with a [slice pattern]. If you want /// one element, see [`mem::replace`]. @@ -640,6 +652,15 @@ mod prim_pointer {} /// for x in &array { } /// ``` /// +/// You can use `<ArrayType>::try_from(slice)` or `slice.try_into()` to get an array from +/// a slice: +/// +/// ``` +/// let bytes: [u8; 3] = [1, 0, 2]; +/// assert_eq!(1, u16::from_le_bytes(<[u8; 2]>::try_from(&bytes[0..2]).unwrap())); +/// assert_eq!(512, u16::from_le_bytes(bytes[1..3].try_into().unwrap())); +/// ``` +/// /// You can use a [slice pattern] to move elements out of an array: /// /// ``` diff --git a/library/std/src/process.rs b/library/std/src/process.rs index d91d4fa64..400d25beb 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -1629,7 +1629,7 @@ impl ExitStatusError { /// /// This is exactly like [`code()`](Self::code), except that it returns a `NonZeroI32`. /// - /// Plain `code`, returning a plain integer, is provided because is is often more convenient. + /// Plain `code`, returning a plain integer, is provided because it is often more convenient. /// The returned value from `code()` is indeed also nonzero; use `code_nonzero()` when you want /// a type-level guarantee of nonzeroness. /// @@ -2154,8 +2154,16 @@ pub fn id() -> u32 { #[cfg_attr(not(test), lang = "termination")] #[stable(feature = "termination_trait_lib", since = "1.61.0")] #[rustc_on_unimplemented( - message = "`main` has invalid return type `{Self}`", - label = "`main` can only return types that implement `{Termination}`" + on( + all(not(bootstrap), cause = "MainFunctionType"), + message = "`main` has invalid return type `{Self}`", + label = "`main` can only return types that implement `{Termination}`" + ), + on( + bootstrap, + message = "`main` has invalid return type `{Self}`", + label = "`main` can only return types that implement `{Termination}`" + ) )] pub trait Termination { /// Is called to get the representation of the value as status code. @@ -2200,9 +2208,7 @@ impl<T: Termination, E: fmt::Debug> Termination for Result<T, E> { match self { Ok(val) => val.report(), Err(err) => { - // Ignore error if the write fails, for example because stderr is - // already closed. There is not much point panicking at this point. - let _ = writeln!(io::stderr(), "Error: {err:?}"); + io::attempt_print_to_stderr(format_args_nl!("Error: {err:?}")); ExitCode::FAILURE } } diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs index 98f6cc7aa..9c2f0c1dd 100644 --- a/library/std/src/rt.rs +++ b/library/std/src/rt.rs @@ -89,7 +89,7 @@ macro_rules! rtunwrap { // `src/tools/tidy/src/pal.rs` for more info. On all other platforms, `sigpipe` // has a value, but its value is ignored. // -// Even though it is an `u8`, it only ever has 3 values. These are documented in +// Even though it is an `u8`, it only ever has 4 values. These are documented in // `compiler/rustc_session/src/config/sigpipe.rs`. #[cfg_attr(test, allow(dead_code))] unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { @@ -160,15 +160,12 @@ fn lang_start<T: crate::process::Termination + 'static>( main: fn() -> T, argc: isize, argv: *const *const u8, - #[cfg(not(bootstrap))] sigpipe: u8, + sigpipe: u8, ) -> isize { let Ok(v) = lang_start_internal( &move || crate::sys_common::backtrace::__rust_begin_short_backtrace(main).report().to_i32(), argc, argv, - #[cfg(bootstrap)] - 2, // Temporary inlining of sigpipe::DEFAULT until bootstrap stops being special - #[cfg(not(bootstrap))] sigpipe, ); v diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs index 4c3812c79..4592e9141 100644 --- a/library/std/src/sync/mpsc/stream.rs +++ b/library/std/src/sync/mpsc/stream.rs @@ -114,7 +114,7 @@ impl<T> Packet<T> { match self.queue.producer_addition().cnt.fetch_add(1, Ordering::SeqCst) { // As described in the mod's doc comment, -1 == wakeup -1 => UpWoke(self.take_to_wake()), - // As as described before, SPSC queues must be >= -2 + // As described before, SPSC queues must be >= -2 -2 => UpSuccess, // Be sure to preserve the disconnected state, and the return value diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs index a7feea588..0f25417d6 100644 --- a/library/std/src/sync/once.rs +++ b/library/std/src/sync/once.rs @@ -3,99 +3,12 @@ //! This primitive is meant to be used to run one-time initialization. An //! example use case would be for initializing an FFI library. -// A "once" is a relatively simple primitive, and it's also typically provided -// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS -// primitives, however, tend to have surprising restrictions, such as the Unix -// one doesn't allow an argument to be passed to the function. -// -// As a result, we end up implementing it ourselves in the standard library. -// This also gives us the opportunity to optimize the implementation a bit which -// should help the fast path on call sites. Consequently, let's explain how this -// primitive works now! -// -// So to recap, the guarantees of a Once are that it will call the -// initialization closure at most once, and it will never return until the one -// that's running has finished running. This means that we need some form of -// blocking here while the custom callback is running at the very least. -// Additionally, we add on the restriction of **poisoning**. Whenever an -// initialization closure panics, the Once enters a "poisoned" state which means -// that all future calls will immediately panic as well. -// -// So to implement this, one might first reach for a `Mutex`, but those cannot -// be put into a `static`. It also gets a lot harder with poisoning to figure -// out when the mutex needs to be deallocated because it's not after the closure -// finishes, but after the first successful closure finishes. -// -// All in all, this is instead implemented with atomics and lock-free -// operations! Whee! Each `Once` has one word of atomic state, and this state is -// CAS'd on to determine what to do. There are four possible state of a `Once`: -// -// * Incomplete - no initialization has run yet, and no thread is currently -// using the Once. -// * Poisoned - some thread has previously attempted to initialize the Once, but -// it panicked, so the Once is now poisoned. There are no other -// threads currently accessing this Once. -// * Running - some thread is currently attempting to run initialization. It may -// succeed, so all future threads need to wait for it to finish. -// Note that this state is accompanied with a payload, described -// below. -// * Complete - initialization has completed and all future calls should finish -// immediately. -// -// With 4 states we need 2 bits to encode this, and we use the remaining bits -// in the word we have allocated as a queue of threads waiting for the thread -// responsible for entering the RUNNING state. This queue is just a linked list -// of Waiter nodes which is monotonically increasing in size. Each node is -// allocated on the stack, and whenever the running closure finishes it will -// consume the entire queue and notify all waiters they should try again. -// -// You'll find a few more details in the implementation, but that's the gist of -// it! -// -// Atomic orderings: -// When running `Once` we deal with multiple atomics: -// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. -// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the -// result of the `Once`, and (3) for synchronizing `Waiter` nodes. -// - At the end of the `call_inner` function we have to make sure the result -// of the `Once` is acquired. So every load which can be the only one to -// load COMPLETED must have at least Acquire ordering, which means all -// three of them. -// - `WaiterQueue::Drop` is the only place that may store COMPLETED, and -// must do so with Release ordering to make the result available. -// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and -// needs to make the nodes available with Release ordering. The load in -// its `compare_exchange` can be Relaxed because it only has to compare -// the atomic, not to read other data. -// - `WaiterQueue::Drop` must see the `Waiter` nodes, so it must load -// `state_and_queue` with Acquire ordering. -// - There is just one store where `state_and_queue` is used only as a -// state flag, without having to synchronize data: switching the state -// from INCOMPLETE to RUNNING in `call_inner`. This store can be Relaxed, -// but the read has to be Acquire because of the requirements mentioned -// above. -// * `Waiter.signaled` is both used as a flag, and to protect a field with -// interior mutability in `Waiter`. `Waiter.thread` is changed in -// `WaiterQueue::Drop` which then sets `signaled` with Release ordering. -// After `wait` loads `signaled` with Acquire and sees it is true, it needs to -// see the changes to drop the `Waiter` struct correctly. -// * There is one place where the two atomics `Once.state_and_queue` and -// `Waiter.signaled` come together, and might be reordered by the compiler or -// processor. Because both use Acquire ordering such a reordering is not -// allowed, so no need for SeqCst. - #[cfg(all(test, not(target_os = "emscripten")))] mod tests; -use crate::cell::Cell; use crate::fmt; -use crate::marker; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::ptr; -use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use crate::thread::{self, Thread}; - -type Masked = (); +use crate::sys_common::once as sys; /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related @@ -114,19 +27,9 @@ type Masked = (); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Once { - // `state_and_queue` is actually a pointer to a `Waiter` with extra state - // bits, so we add the `PhantomData` appropriately. - state_and_queue: AtomicPtr<Masked>, - _marker: marker::PhantomData<*const Waiter>, + inner: sys::Once, } -// The `PhantomData` of a raw pointer removes these two auto traits, but we -// enforce both below in the implementation so this should be safe to add. -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for Once {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for Once {} - #[stable(feature = "sync_once_unwind_safe", since = "1.59.0")] impl UnwindSafe for Once {} @@ -136,10 +39,8 @@ impl RefUnwindSafe for Once {} /// State yielded to [`Once::call_once_force()`]’s closure parameter. The state /// can be used to query the poison status of the [`Once`]. #[stable(feature = "once_poison", since = "1.51.0")] -#[derive(Debug)] pub struct OnceState { - poisoned: bool, - set_state_on_drop_to: Cell<*mut Masked>, + pub(crate) inner: sys::OnceState, } /// Initialization value for static [`Once`] values. @@ -159,38 +60,6 @@ pub struct OnceState { )] pub const ONCE_INIT: Once = Once::new(); -// Four states that a Once can be in, encoded into the lower bits of -// `state_and_queue` in the Once structure. -const INCOMPLETE: usize = 0x0; -const POISONED: usize = 0x1; -const RUNNING: usize = 0x2; -const COMPLETE: usize = 0x3; - -// Mask to learn about the state. All other bits are the queue of waiters if -// this is in the RUNNING state. -const STATE_MASK: usize = 0x3; - -// Representation of a node in the linked list of waiters, used while in the -// RUNNING state. -// Note: `Waiter` can't hold a mutable pointer to the next thread, because then -// `wait` would both hand out a mutable reference to its `Waiter` node, and keep -// a shared reference to check `signaled`. Instead we hold shared references and -// use interior mutability. -#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. -struct Waiter { - thread: Cell<Option<Thread>>, - signaled: AtomicBool, - next: *const Waiter, -} - -// Head of a linked list of waiters. -// Every node is a struct on the stack of a waiting thread. -// Will wake up the waiters when it gets dropped, i.e. also on panic. -struct WaiterQueue<'a> { - state_and_queue: &'a AtomicPtr<Masked>, - set_state_on_drop_to: *mut Masked, -} - impl Once { /// Creates a new `Once` value. #[inline] @@ -198,10 +67,7 @@ impl Once { #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] #[must_use] pub const fn new() -> Once { - Once { - state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)), - _marker: marker::PhantomData, - } + Once { inner: sys::Once::new() } } /// Performs an initialization routine once and only once. The given closure @@ -261,6 +127,7 @@ impl Once { /// This is similar to [poisoning with mutexes][poison]. /// /// [poison]: struct.Mutex.html#poisoning + #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[track_caller] pub fn call_once<F>(&self, f: F) @@ -268,12 +135,12 @@ impl Once { F: FnOnce(), { // Fast path check - if self.is_completed() { + if self.inner.is_completed() { return; } let mut f = Some(f); - self.call_inner(false, &mut |_| f.take().unwrap()()); + self.inner.call(false, &mut |_| f.take().unwrap()()); } /// Performs the same function as [`call_once()`] except ignores poisoning. @@ -320,18 +187,19 @@ impl Once { /// // once any success happens, we stop propagating the poison /// INIT.call_once(|| {}); /// ``` + #[inline] #[stable(feature = "once_poison", since = "1.51.0")] pub fn call_once_force<F>(&self, f: F) where F: FnOnce(&OnceState), { // Fast path check - if self.is_completed() { + if self.inner.is_completed() { return; } let mut f = Some(f); - self.call_inner(true, &mut |p| f.take().unwrap()(p)); + self.inner.call(true, &mut |p| f.take().unwrap()(p)); } /// Returns `true` if some [`call_once()`] call has completed @@ -378,119 +246,7 @@ impl Once { #[stable(feature = "once_is_completed", since = "1.43.0")] #[inline] pub fn is_completed(&self) -> bool { - // An `Acquire` load is enough because that makes all the initialization - // operations visible to us, and, this being a fast path, weaker - // ordering helps with performance. This `Acquire` synchronizes with - // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `call_once` (this isn't exactly a trivial or small implementation). - // - // Additionally, this is tagged with `#[cold]` as it should indeed be cold - // and it helps let LLVM know that calls to this function should be off the - // fast path. Essentially, this should help generate more straight line code - // in LLVM. - // - // Finally, this takes an `FnMut` instead of a `FnOnce` because there's - // currently no way to take an `FnOnce` and call it via virtual dispatch - // without some allocation overhead. - #[cold] - #[track_caller] - fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) { - let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); - loop { - match state_and_queue.addr() { - COMPLETE => break, - POISONED if !ignore_poisoning => { - // Panic to propagate the poison. - panic!("Once instance has previously been poisoned"); - } - POISONED | INCOMPLETE => { - // Try to register this thread as the one RUNNING. - let exchange_result = self.state_and_queue.compare_exchange( - state_and_queue, - ptr::invalid_mut(RUNNING), - Ordering::Acquire, - Ordering::Acquire, - ); - if let Err(old) = exchange_result { - state_and_queue = old; - continue; - } - // `waiter_queue` will manage other waiting threads, and - // wake them up on drop. - let mut waiter_queue = WaiterQueue { - state_and_queue: &self.state_and_queue, - set_state_on_drop_to: ptr::invalid_mut(POISONED), - }; - // Run the initialization function, letting it know if we're - // poisoned or not. - let init_state = OnceState { - poisoned: state_and_queue.addr() == POISONED, - set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), - }; - init(&init_state); - waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get(); - break; - } - _ => { - // All other values must be RUNNING with possibly a - // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue.addr() & STATE_MASK == RUNNING); - wait(&self.state_and_queue, state_and_queue); - state_and_queue = self.state_and_queue.load(Ordering::Acquire); - } - } - } - } -} - -fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) { - // Note: the following code was carefully written to avoid creating a - // mutable reference to `node` that gets aliased. - loop { - // Don't queue this thread if the status is no longer running, - // otherwise we will not be woken up. - if current_state.addr() & STATE_MASK != RUNNING { - return; - } - - // Create the node for our current thread. - let node = Waiter { - thread: Cell::new(Some(thread::current())), - signaled: AtomicBool::new(false), - next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, - }; - let me = &node as *const Waiter as *const Masked as *mut Masked; - - // Try to slide in the node at the head of the linked list, making sure - // that another thread didn't just replace the head of the linked list. - let exchange_result = state_and_queue.compare_exchange( - current_state, - me.with_addr(me.addr() | RUNNING), - Ordering::Release, - Ordering::Relaxed, - ); - if let Err(old) = exchange_result { - current_state = old; - continue; - } - - // We have enqueued ourselves, now lets wait. - // It is important not to return before being signaled, otherwise we - // would drop our `Waiter` node and leave a hole in the linked list - // (and a dangling reference). Guard against spurious wakeups by - // reparking ourselves until we are signaled. - while !node.signaled.load(Ordering::Acquire) { - // If the managing thread happens to signal and unpark us before we - // can park ourselves, the result could be this thread never gets - // unparked. Luckily `park` comes with the guarantee that if it got - // an `unpark` just before on an unparked thread it does not park. - thread::park(); - } - break; + self.inner.is_completed() } } @@ -501,37 +257,6 @@ impl fmt::Debug for Once { } } -impl Drop for WaiterQueue<'_> { - fn drop(&mut self) { - // Swap out our state with however we finished. - let state_and_queue = - self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); - - // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); - - // Walk the entire linked list of waiters and wake them up (in lifo - // order, last to register is first to wake up). - unsafe { - // Right after setting `node.signaled = true` the other thread may - // free `node` if there happens to be has a spurious wakeup. - // So we have to take out the `thread` field and copy the pointer to - // `next` first. - let mut queue = - state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; - while !queue.is_null() { - let next = (*queue).next; - let thread = (*queue).thread.take().unwrap(); - (*queue).signaled.store(true, Ordering::Release); - // ^- FIXME (maybe): This is another case of issue #55005 - // `store()` has a potentially dangling ref to `signaled`. - queue = next; - thread.unpark(); - } - } - } -} - impl OnceState { /// Returns `true` if the associated [`Once`] was poisoned prior to the /// invocation of the closure passed to [`Once::call_once_force()`]. @@ -568,13 +293,22 @@ impl OnceState { /// assert!(!state.is_poisoned()); /// }); #[stable(feature = "once_poison", since = "1.51.0")] + #[inline] pub fn is_poisoned(&self) -> bool { - self.poisoned + self.inner.is_poisoned() } /// Poison the associated [`Once`] without explicitly panicking. - // NOTE: This is currently only exposed for the `lazy` module + // NOTE: This is currently only exposed for `OnceLock`. + #[inline] pub(crate) fn poison(&self) { - self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); + self.inner.poison(); + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for OnceState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OnceState").field("poisoned", &self.is_poisoned()).finish() } } diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs index 9ab781561..8b3877607 100644 --- a/library/std/src/sync/rwlock.rs +++ b/library/std/src/sync/rwlock.rs @@ -76,6 +76,7 @@ use crate::sys_common::rwlock as sys; /// /// [`Mutex`]: super::Mutex #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")] pub struct RwLock<T: ?Sized> { inner: sys::MovableRwLock, poison: poison::Flag, @@ -166,7 +167,7 @@ impl<T> RwLock<T> { } impl<T: ?Sized> RwLock<T> { - /// Locks this rwlock with shared read access, blocking the current thread + /// Locks this `RwLock` with shared read access, blocking the current thread /// until it can be acquired. /// /// The calling thread will be blocked until there are no more writers which @@ -180,9 +181,10 @@ impl<T: ?Sized> RwLock<T> { /// /// # Errors /// - /// This function will return an error if the RwLock is poisoned. An RwLock - /// is poisoned whenever a writer panics while holding an exclusive lock. - /// The failure will occur immediately after the lock has been acquired. + /// This function will return an error if the `RwLock` is poisoned. An + /// `RwLock` is poisoned whenever a writer panics while holding an exclusive + /// lock. The failure will occur immediately after the lock has been + /// acquired. /// /// # Panics /// @@ -214,7 +216,7 @@ impl<T: ?Sized> RwLock<T> { } } - /// Attempts to acquire this rwlock with shared read access. + /// Attempts to acquire this `RwLock` with shared read access. /// /// If the access could not be granted at this time, then `Err` is returned. /// Otherwise, an RAII guard is returned which will release the shared access @@ -227,13 +229,13 @@ impl<T: ?Sized> RwLock<T> { /// /// # Errors /// - /// This function will return the [`Poisoned`] error if the RwLock is poisoned. - /// An RwLock is poisoned whenever a writer panics while holding an exclusive - /// lock. `Poisoned` will only be returned if the lock would have otherwise been - /// acquired. + /// This function will return the [`Poisoned`] error if the `RwLock` is + /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding + /// an exclusive lock. `Poisoned` will only be returned if the lock would + /// have otherwise been acquired. /// - /// This function will return the [`WouldBlock`] error if the RwLock could not - /// be acquired because it was already locked exclusively. + /// This function will return the [`WouldBlock`] error if the `RwLock` could + /// not be acquired because it was already locked exclusively. /// /// [`Poisoned`]: TryLockError::Poisoned /// [`WouldBlock`]: TryLockError::WouldBlock @@ -262,20 +264,20 @@ impl<T: ?Sized> RwLock<T> { } } - /// Locks this rwlock with exclusive write access, blocking the current + /// Locks this `RwLock` with exclusive write access, blocking the current /// thread until it can be acquired. /// /// This function will not return while other writers or other readers /// currently have access to the lock. /// - /// Returns an RAII guard which will drop the write access of this rwlock + /// Returns an RAII guard which will drop the write access of this `RwLock` /// when dropped. /// /// # Errors /// - /// This function will return an error if the RwLock is poisoned. An RwLock - /// is poisoned whenever a writer panics while holding an exclusive lock. - /// An error will be returned when the lock is acquired. + /// This function will return an error if the `RwLock` is poisoned. An + /// `RwLock` is poisoned whenever a writer panics while holding an exclusive + /// lock. An error will be returned when the lock is acquired. /// /// # Panics /// @@ -302,7 +304,7 @@ impl<T: ?Sized> RwLock<T> { } } - /// Attempts to lock this rwlock with exclusive write access. + /// Attempts to lock this `RwLock` with exclusive write access. /// /// If the lock could not be acquired at this time, then `Err` is returned. /// Otherwise, an RAII guard is returned which will release the lock when @@ -315,13 +317,13 @@ impl<T: ?Sized> RwLock<T> { /// /// # Errors /// - /// This function will return the [`Poisoned`] error if the RwLock is - /// poisoned. An RwLock is poisoned whenever a writer panics while holding - /// an exclusive lock. `Poisoned` will only be returned if the lock would have - /// otherwise been acquired. + /// This function will return the [`Poisoned`] error if the `RwLock` is + /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding + /// an exclusive lock. `Poisoned` will only be returned if the lock would + /// have otherwise been acquired. /// - /// This function will return the [`WouldBlock`] error if the RwLock could not - /// be acquired because it was already locked exclusively. + /// This function will return the [`WouldBlock`] error if the `RwLock` could + /// not be acquired because it was already locked exclusively. /// /// [`Poisoned`]: TryLockError::Poisoned /// [`WouldBlock`]: TryLockError::WouldBlock @@ -421,10 +423,10 @@ impl<T: ?Sized> RwLock<T> { /// /// # Errors /// - /// This function will return an error if the RwLock is poisoned. An RwLock - /// is poisoned whenever a writer panics while holding an exclusive lock. An - /// error will only be returned if the lock would have otherwise been - /// acquired. + /// This function will return an error if the `RwLock` is poisoned. An + /// `RwLock` is poisoned whenever a writer panics while holding an exclusive + /// lock. An error will only be returned if the lock would have otherwise + /// been acquired. /// /// # Examples /// @@ -454,10 +456,10 @@ impl<T: ?Sized> RwLock<T> { /// /// # Errors /// - /// This function will return an error if the RwLock is poisoned. An RwLock - /// is poisoned whenever a writer panics while holding an exclusive lock. An - /// error will only be returned if the lock would have otherwise been - /// acquired. + /// This function will return an error if the `RwLock` is poisoned. An + /// `RwLock` is poisoned whenever a writer panics while holding an exclusive + /// lock. An error will only be returned if the lock would have otherwise + /// been acquired. /// /// # Examples /// diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs index ff64d2aa8..29fc0835d 100644 --- a/library/std/src/sys/common/mod.rs +++ b/library/std/src/sys/common/mod.rs @@ -11,3 +11,7 @@ #![allow(dead_code)] pub mod alloc; +pub mod small_c_string; + +#[cfg(test)] +mod tests; diff --git a/library/std/src/sys/common/small_c_string.rs b/library/std/src/sys/common/small_c_string.rs new file mode 100644 index 000000000..01acd5191 --- /dev/null +++ b/library/std/src/sys/common/small_c_string.rs @@ -0,0 +1,58 @@ +use crate::ffi::{CStr, CString}; +use crate::mem::MaybeUninit; +use crate::path::Path; +use crate::slice; +use crate::{io, ptr}; + +// Make sure to stay under 4096 so the compiler doesn't insert a probe frame: +// https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html +#[cfg(not(target_os = "espidf"))] +const MAX_STACK_ALLOCATION: usize = 384; +#[cfg(target_os = "espidf")] +const MAX_STACK_ALLOCATION: usize = 32; + +const NUL_ERR: io::Error = + io::const_io_error!(io::ErrorKind::InvalidInput, "file name contained an unexpected NUL byte"); + +#[inline] +pub fn run_path_with_cstr<T, F>(path: &Path, f: F) -> io::Result<T> +where + F: FnOnce(&CStr) -> io::Result<T>, +{ + run_with_cstr(path.as_os_str().bytes(), f) +} + +#[inline] +pub fn run_with_cstr<T, F>(bytes: &[u8], f: F) -> io::Result<T> +where + F: FnOnce(&CStr) -> io::Result<T>, +{ + if bytes.len() >= MAX_STACK_ALLOCATION { + return run_with_cstr_allocating(bytes, f); + } + + let mut buf = MaybeUninit::<[u8; MAX_STACK_ALLOCATION]>::uninit(); + let buf_ptr = buf.as_mut_ptr() as *mut u8; + + unsafe { + ptr::copy_nonoverlapping(bytes.as_ptr(), buf_ptr, bytes.len()); + buf_ptr.add(bytes.len()).write(0); + } + + match CStr::from_bytes_with_nul(unsafe { slice::from_raw_parts(buf_ptr, bytes.len() + 1) }) { + Ok(s) => f(s), + Err(_) => Err(NUL_ERR), + } +} + +#[cold] +#[inline(never)] +fn run_with_cstr_allocating<T, F>(bytes: &[u8], f: F) -> io::Result<T> +where + F: FnOnce(&CStr) -> io::Result<T>, +{ + match CString::new(bytes) { + Ok(s) => f(&s), + Err(_) => Err(NUL_ERR), + } +} diff --git a/library/std/src/sys/common/tests.rs b/library/std/src/sys/common/tests.rs new file mode 100644 index 000000000..fb6f5d6af --- /dev/null +++ b/library/std/src/sys/common/tests.rs @@ -0,0 +1,66 @@ +use crate::ffi::CString; +use crate::hint::black_box; +use crate::path::Path; +use crate::sys::common::small_c_string::run_path_with_cstr; +use core::iter::repeat; + +#[test] +fn stack_allocation_works() { + let path = Path::new("abc"); + let result = run_path_with_cstr(path, |p| { + assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap()); + Ok(42) + }); + assert_eq!(result.unwrap(), 42); +} + +#[test] +fn stack_allocation_fails() { + let path = Path::new("ab\0"); + assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err()); +} + +#[test] +fn heap_allocation_works() { + let path = repeat("a").take(384).collect::<String>(); + let path = Path::new(&path); + let result = run_path_with_cstr(path, |p| { + assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap()); + Ok(42) + }); + assert_eq!(result.unwrap(), 42); +} + +#[test] +fn heap_allocation_fails() { + let mut path = repeat("a").take(384).collect::<String>(); + path.push('\0'); + let path = Path::new(&path); + assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err()); +} + +#[bench] +fn bench_stack_path_alloc(b: &mut test::Bencher) { + let path = repeat("a").take(383).collect::<String>(); + let p = Path::new(&path); + b.iter(|| { + run_path_with_cstr(p, |cstr| { + black_box(cstr); + Ok(()) + }) + .unwrap(); + }); +} + +#[bench] +fn bench_heap_path_alloc(b: &mut test::Bencher) { + let path = repeat("a").take(384).collect::<String>(); + let p = Path::new(&path); + b.iter(|| { + run_path_with_cstr(p, |cstr| { + black_box(cstr); + Ok(()) + }) + .unwrap(); + }); +} diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs index 1c7e1dd8d..afcae6c90 100644 --- a/library/std/src/sys/hermit/args.rs +++ b/library/std/src/sys/hermit/args.rs @@ -1,20 +1,37 @@ -use crate::ffi::OsString; +use crate::ffi::{c_char, CStr, OsString}; use crate::fmt; +use crate::os::unix::ffi::OsStringExt; +use crate::ptr; +use crate::sync::atomic::{ + AtomicIsize, AtomicPtr, + Ordering::{Acquire, Relaxed, Release}, +}; use crate::vec; +static ARGC: AtomicIsize = AtomicIsize::new(0); +static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut()); + /// One-time global initialization. pub unsafe fn init(argc: isize, argv: *const *const u8) { - imp::init(argc, argv) -} - -/// One-time global cleanup. -pub unsafe fn cleanup() { - imp::cleanup() + ARGC.store(argc, Relaxed); + // Use release ordering here to broadcast writes by the OS. + ARGV.store(argv as *mut *const u8, Release); } /// Returns the command line arguments pub fn args() -> Args { - imp::args() + // Synchronize with the store above. + let argv = ARGV.load(Acquire); + // If argv has not been initialized yet, do not return any arguments. + let argc = if argv.is_null() { 0 } else { ARGC.load(Relaxed) }; + let args: Vec<OsString> = (0..argc) + .map(|i| unsafe { + let cstr = CStr::from_ptr(*argv.offset(i) as *const c_char); + OsStringExt::from_vec(cstr.to_bytes().to_vec()) + }) + .collect(); + + Args { iter: args.into_iter() } } pub struct Args { @@ -51,44 +68,3 @@ impl DoubleEndedIterator for Args { self.iter.next_back() } } - -mod imp { - use super::Args; - use crate::ffi::{CStr, OsString}; - use crate::os::unix::ffi::OsStringExt; - use crate::ptr; - - use crate::sys_common::mutex::StaticMutex; - - static mut ARGC: isize = 0; - static mut ARGV: *const *const u8 = ptr::null(); - static LOCK: StaticMutex = StaticMutex::new(); - - pub unsafe fn init(argc: isize, argv: *const *const u8) { - let _guard = LOCK.lock(); - ARGC = argc; - ARGV = argv; - } - - pub unsafe fn cleanup() { - let _guard = LOCK.lock(); - ARGC = 0; - ARGV = ptr::null(); - } - - pub fn args() -> Args { - Args { iter: clone().into_iter() } - } - - fn clone() -> Vec<OsString> { - unsafe { - let _guard = LOCK.lock(); - (0..ARGC) - .map(|i| { - let cstr = CStr::from_ptr(*ARGV.offset(i) as *const i8); - OsStringExt::from_vec(cstr.to_bytes().to_vec()) - }) - .collect() - } - } -} diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs index f921839cf..af297ff1e 100644 --- a/library/std/src/sys/hermit/fs.rs +++ b/library/std/src/sys/hermit/fs.rs @@ -1,3 +1,4 @@ +use crate::convert::TryFrom; use crate::ffi::{CStr, CString, OsString}; use crate::fmt; use crate::hash::{Hash, Hasher}; @@ -5,6 +6,7 @@ use crate::io::{self, Error, ErrorKind}; use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom}; use crate::os::unix::ffi::OsStrExt; use crate::path::{Path, PathBuf}; +use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::cvt; use crate::sys::hermit::abi; use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY}; @@ -15,10 +17,6 @@ use crate::sys::unsupported; pub use crate::sys_common::fs::{copy, try_exists}; //pub use crate::sys_common::fs::remove_dir_all; -fn cstr(path: &Path) -> io::Result<CString> { - Ok(CString::new(path.as_os_str().as_bytes())?) -} - #[derive(Debug)] pub struct File(FileDesc); @@ -272,8 +270,7 @@ impl OpenOptions { impl File { pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { - let path = cstr(path)?; - File::open_c(&path, opts) + run_path_with_cstr(path, |path| File::open_c(&path, opts)) } pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { @@ -373,9 +370,7 @@ pub fn readdir(_p: &Path) -> io::Result<ReadDir> { } pub fn unlink(path: &Path) -> io::Result<()> { - let name = cstr(path)?; - let _ = unsafe { cvt(abi::unlink(name.as_ptr()))? }; - Ok(()) + run_path_with_cstr(path, |path| cvt(unsafe { abi::unlink(path.as_ptr()) }).map(|_| ())) } pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs index 827d82900..e6534df89 100644 --- a/library/std/src/sys/hermit/mod.rs +++ b/library/std/src/sys/hermit/mod.rs @@ -106,9 +106,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) { // SAFETY: must be called only once during runtime cleanup. // NOTE: this is not guaranteed to run, for example when the program aborts. -pub unsafe fn cleanup() { - args::cleanup(); -} +pub unsafe fn cleanup() {} #[cfg(not(test))] #[no_mangle] diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index 167c918c9..c080c176a 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -22,7 +22,7 @@ #![allow(missing_debug_implementations)] -mod common; +pub mod common; cfg_if::cfg_if! { if #[cfg(unix)] { diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs index 13d96e9a6..09c4ab3d3 100644 --- a/library/std/src/sys/sgx/abi/tls/mod.rs +++ b/library/std/src/sys/sgx/abi/tls/mod.rs @@ -111,6 +111,7 @@ impl Tls { rtabort!("TLS limit exceeded") }; TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); + unsafe { Self::current() }.data[index].set(ptr::null_mut()); Key::from_index(index) } diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs index 5409bd177..0d934318c 100644 --- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -316,9 +316,9 @@ where // | small1 | Chunk smaller than 8 bytes // +--------+ fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) { - let small0_size = if ptr as usize % 8 == 0 { 0 } else { 8 - ptr as usize % 8 }; - let small1_size = (len - small0_size as usize) % 8; - let big_size = len - small0_size as usize - small1_size as usize; + let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 }; + let small1_size = (len - small0_size) % 8; + let big_size = len - small0_size - small1_size; (small0_size, big_size, small1_size) } @@ -364,8 +364,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) mfence lfence ", - val = in(reg_byte) *src.offset(off as isize), - dst = in(reg) dst.offset(off as isize), + val = in(reg_byte) *src.add(off), + dst = in(reg) dst.add(off), seg_sel = in(reg) &mut seg_sel, options(nostack, att_syntax) ); @@ -378,8 +378,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) assert!(is_enclave_range(src, len)); assert!(is_user_range(dst, len)); assert!(len < isize::MAX as usize); - assert!(!(src as usize).overflowing_add(len).1); - assert!(!(dst as usize).overflowing_add(len).1); + assert!(!src.addr().overflowing_add(len).1); + assert!(!dst.addr().overflowing_add(len).1); if len < 8 { // Can't align on 8 byte boundary: copy safely byte per byte @@ -404,17 +404,17 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) unsafe { // Copy small0 - copy_bytewise_to_userspace(src, dst, small0_size as _); + copy_bytewise_to_userspace(src, dst, small0_size); // Copy big - let big_src = src.offset(small0_size as _); - let big_dst = dst.offset(small0_size as _); - copy_quadwords(big_src as _, big_dst, big_size); + let big_src = src.add(small0_size); + let big_dst = dst.add(small0_size); + copy_quadwords(big_src, big_dst, big_size); // Copy small1 - let small1_src = src.offset(big_size as isize + small0_size as isize); - let small1_dst = dst.offset(big_size as isize + small0_size as isize); - copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _); + let small1_src = src.add(big_size + small0_size); + let small1_dst = dst.add(big_size + small0_size); + copy_bytewise_to_userspace(small1_src, small1_dst, small1_size); } } } diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs index b21784475..c7a57d3a3 100644 --- a/library/std/src/sys/sgx/thread_local_key.rs +++ b/library/std/src/sys/sgx/thread_local_key.rs @@ -21,8 +21,3 @@ pub unsafe fn get(key: Key) -> *mut u8 { pub unsafe fn destroy(key: Key) { Tls::destroy(AbiKey::from_usize(key)) } - -#[inline] -pub fn requires_synchronized_create() -> bool { - false -} diff --git a/library/std/src/sys/solid/fs.rs b/library/std/src/sys/solid/fs.rs index 969222253..6c66b93a3 100644 --- a/library/std/src/sys/solid/fs.rs +++ b/library/std/src/sys/solid/fs.rs @@ -175,15 +175,19 @@ impl Iterator for ReadDir { type Item = io::Result<DirEntry>; fn next(&mut self) -> Option<io::Result<DirEntry>> { - unsafe { - let mut out_dirent = MaybeUninit::uninit(); - error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir( + let entry = unsafe { + let mut out_entry = MaybeUninit::uninit(); + match error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir( self.inner.dirp, - out_dirent.as_mut_ptr(), - )) - .ok()?; - Some(Ok(DirEntry { entry: out_dirent.assume_init(), inner: Arc::clone(&self.inner) })) - } + out_entry.as_mut_ptr(), + )) { + Ok(_) => out_entry.assume_init(), + Err(e) if e.as_raw() == abi::SOLID_ERR_NOTFOUND => return None, + Err(e) => return Some(Err(e.as_io_error())), + } + }; + + (entry.d_name[0] != 0).then(|| Ok(DirEntry { entry, inner: Arc::clone(&self.inner) })) } } diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs index b5649d6e0..4906c6268 100644 --- a/library/std/src/sys/solid/os.rs +++ b/library/std/src/sys/solid/os.rs @@ -1,4 +1,5 @@ use super::unsupported; +use crate::convert::TryFrom; use crate::error::Error as StdError; use crate::ffi::{CStr, CString, OsStr, OsString}; use crate::fmt; @@ -8,7 +9,8 @@ use crate::os::{ solid::ffi::{OsStrExt, OsStringExt}, }; use crate::path::{self, PathBuf}; -use crate::sys_common::rwlock::StaticRwLock; +use crate::sync::RwLock; +use crate::sys::common::small_c_string::run_with_cstr; use crate::vec; use super::{error, itron, memchr}; @@ -78,7 +80,7 @@ pub fn current_exe() -> io::Result<PathBuf> { unsupported() } -static ENV_LOCK: StaticRwLock = StaticRwLock::new(); +static ENV_LOCK: RwLock<()> = RwLock::new(()); pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, @@ -139,35 +141,33 @@ pub fn env() -> Env { pub fn getenv(k: &OsStr) -> Option<OsString> { // environment variables with a nul byte can't be set, so their value is // always None as well - let k = CString::new(k.as_bytes()).ok()?; - unsafe { + let s = run_with_cstr(k.as_bytes(), |k| { let _guard = ENV_LOCK.read(); - let s = libc::getenv(k.as_ptr()) as *const libc::c_char; - if s.is_null() { - None - } else { - Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) - } + Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char) + }) + .ok()?; + + if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec())) } } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - let k = CString::new(k.as_bytes())?; - let v = CString::new(v.as_bytes())?; - - unsafe { - let _guard = ENV_LOCK.write(); - cvt_env(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) - } + run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(v.as_bytes(), |v| { + let _guard = ENV_LOCK.write(); + cvt_env(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop) + }) + }) } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - let nbuf = CString::new(n.as_bytes())?; - - unsafe { + run_with_cstr(n.as_bytes(), |nbuf| { let _guard = ENV_LOCK.write(); - cvt_env(libc::unsetenv(nbuf.as_ptr())).map(drop) - } + cvt_env(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop) + }) } /// In kmclib, `setenv` and `unsetenv` don't always set `errno`, so this diff --git a/library/std/src/sys/solid/thread_local_key.rs b/library/std/src/sys/solid/thread_local_key.rs index b17521f70..b37bf9996 100644 --- a/library/std/src/sys/solid/thread_local_key.rs +++ b/library/std/src/sys/solid/thread_local_key.rs @@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 { pub unsafe fn destroy(_key: Key) { panic!("should not be used on the solid target"); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - panic!("should not be used on the solid target"); -} diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index cc347e358..37a49f2d7 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -1,13 +1,26 @@ +// miri has some special hacks here that make things unused. +#![cfg_attr(miri, allow(unused))] + use crate::os::unix::prelude::*; -use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::ffi::{CStr, OsStr, OsString}; use crate::fmt; use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom}; use crate::mem; +#[cfg(any( + target_os = "android", + target_os = "linux", + target_os = "solaris", + target_os = "fuchsia", + target_os = "redox", + target_os = "illumos" +))] +use crate::mem::MaybeUninit; use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd}; use crate::path::{Path, PathBuf}; use crate::ptr; use crate::sync::Arc; +use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::fd::FileDesc; use crate::sys::time::SystemTime; use crate::sys::{cvt, cvt_r}; @@ -260,7 +273,7 @@ pub struct DirEntry { // We need to store an owned copy of the entry name on platforms that use // readdir() (not readdir_r()), because a) struct dirent may use a flexible // array to store the name, b) it lives only until the next readdir() call. - name: CString, + name: crate::ffi::CString, } // Define a minimal subset of fields we need from `dirent64`, especially since @@ -313,8 +326,11 @@ pub struct FilePermissions { mode: mode_t, } -#[derive(Copy, Clone)] -pub struct FileTimes([libc::timespec; 2]); +#[derive(Copy, Clone, Debug, Default)] +pub struct FileTimes { + accessed: Option<SystemTime>, + modified: Option<SystemTime>, +} #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct FileType { @@ -512,45 +528,11 @@ impl FilePermissions { impl FileTimes { pub fn set_accessed(&mut self, t: SystemTime) { - self.0[0] = t.t.to_timespec().expect("Invalid system time"); + self.accessed = Some(t); } pub fn set_modified(&mut self, t: SystemTime) { - self.0[1] = t.t.to_timespec().expect("Invalid system time"); - } -} - -struct TimespecDebugAdapter<'a>(&'a libc::timespec); - -impl fmt::Debug for TimespecDebugAdapter<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("timespec") - .field("tv_sec", &self.0.tv_sec) - .field("tv_nsec", &self.0.tv_nsec) - .finish() - } -} - -impl fmt::Debug for FileTimes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FileTimes") - .field("accessed", &TimespecDebugAdapter(&self.0[0])) - .field("modified", &TimespecDebugAdapter(&self.0[1])) - .finish() - } -} - -impl Default for FileTimes { - fn default() -> Self { - // Redox doesn't appear to support `UTIME_OMIT`, so we stub it out here, and always return - // an error in `set_times`. - // ESP-IDF and HorizonOS do not support `futimens` at all and the behavior for those OS is therefore - // the same as for Redox. - #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] - let omit = libc::timespec { tv_sec: 0, tv_nsec: 0 }; - #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))] - let omit = libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }; - Self([omit; 2]) + self.modified = Some(t); } } @@ -614,33 +596,69 @@ impl Iterator for ReadDir { }; } - // Only d_reclen bytes of *entry_ptr are valid, so we can't just copy the - // whole thing (#93384). Instead, copy everything except the name. - let mut copy: dirent64 = mem::zeroed(); - // Can't dereference entry_ptr, so use the local entry to get - // offsetof(struct dirent, d_name) - let copy_bytes = &mut copy as *mut _ as *mut u8; - let copy_name = &mut copy.d_name as *mut _ as *mut u8; - let name_offset = copy_name.offset_from(copy_bytes) as usize; - let entry_bytes = entry_ptr as *const u8; - let entry_name = entry_bytes.add(name_offset); - ptr::copy_nonoverlapping(entry_bytes, copy_bytes, name_offset); + // The dirent64 struct is a weird imaginary thing that isn't ever supposed + // to be worked with by value. Its trailing d_name field is declared + // variously as [c_char; 256] or [c_char; 1] on different systems but + // either way that size is meaningless; only the offset of d_name is + // meaningful. The dirent64 pointers that libc returns from readdir64 are + // allowed to point to allocations smaller _or_ LARGER than implied by the + // definition of the struct. + // + // As such, we need to be even more careful with dirent64 than if its + // contents were "simply" partially initialized data. + // + // Like for uninitialized contents, converting entry_ptr to `&dirent64` + // would not be legal. However, unique to dirent64 is that we don't even + // get to use `addr_of!((*entry_ptr).d_name)` because that operation + // requires the full extent of *entry_ptr to be in bounds of the same + // allocation, which is not necessarily the case here. + // + // Absent any other way to obtain a pointer to `(*entry_ptr).d_name` + // legally in Rust analogously to how it would be done in C, we instead + // need to make our own non-libc allocation that conforms to the weird + // imaginary definition of dirent64, and use that for a field offset + // computation. + macro_rules! offset_ptr { + ($entry_ptr:expr, $field:ident) => {{ + const OFFSET: isize = { + let delusion = MaybeUninit::<dirent64>::uninit(); + let entry_ptr = delusion.as_ptr(); + unsafe { + ptr::addr_of!((*entry_ptr).$field) + .cast::<u8>() + .offset_from(entry_ptr.cast::<u8>()) + } + }; + if true { + // Cast to the same type determined by the else branch. + $entry_ptr.byte_offset(OFFSET).cast::<_>() + } else { + #[allow(deref_nullptr)] + { + ptr::addr_of!((*ptr::null::<dirent64>()).$field) + } + } + }}; + } + + // d_name is guaranteed to be null-terminated. + let name = CStr::from_ptr(offset_ptr!(entry_ptr, d_name).cast()); + let name_bytes = name.to_bytes(); + if name_bytes == b"." || name_bytes == b".." { + continue; + } let entry = dirent64_min { - d_ino: copy.d_ino as u64, + d_ino: *offset_ptr!(entry_ptr, d_ino) as u64, #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] - d_type: copy.d_type as u8, + d_type: *offset_ptr!(entry_ptr, d_type) as u8, }; - let ret = DirEntry { + return Some(Ok(DirEntry { entry, - // d_name is guaranteed to be null-terminated. - name: CStr::from_ptr(entry_name as *const _).to_owned(), + name: name.to_owned(), dir: Arc::clone(&self.inner), - }; - if ret.name_bytes() != b"." && ret.name_bytes() != b".." { - return Some(Ok(ret)); - } + })); } } } @@ -704,7 +722,10 @@ impl DirEntry { self.file_name_os_str().to_os_string() } - #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))] + #[cfg(all( + any(target_os = "linux", target_os = "emscripten", target_os = "android"), + not(miri) + ))] pub fn metadata(&self) -> io::Result<FileAttr> { let fd = cvt(unsafe { dirfd(self.dir.dirp.0) })?; let name = self.name_cstr().as_ptr(); @@ -725,7 +746,10 @@ impl DirEntry { Ok(FileAttr::from_stat64(stat)) } - #[cfg(not(any(target_os = "linux", target_os = "emscripten", target_os = "android")))] + #[cfg(any( + not(any(target_os = "linux", target_os = "emscripten", target_os = "android")), + miri + ))] pub fn metadata(&self) -> io::Result<FileAttr> { lstat(&self.path()) } @@ -829,7 +853,6 @@ impl DirEntry { target_os = "fuchsia", target_os = "redox" )))] - #[cfg_attr(miri, allow(unused))] fn name_cstr(&self) -> &CStr { unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) } } @@ -841,7 +864,6 @@ impl DirEntry { target_os = "fuchsia", target_os = "redox" ))] - #[cfg_attr(miri, allow(unused))] fn name_cstr(&self) -> &CStr { &self.name } @@ -931,8 +953,7 @@ impl OpenOptions { impl File { pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { - let path = cstr(path)?; - File::open_c(&path, opts) + run_path_with_cstr(path, |path| File::open_c(path, opts)) } pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { @@ -1084,6 +1105,17 @@ impl File { } pub fn set_times(&self, times: FileTimes) -> io::Result<()> { + #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))] + let to_timespec = |time: Option<SystemTime>| { + match time { + Some(time) if let Some(ts) = time.t.to_timespec() => Ok(ts), + Some(time) if time > crate::sys::time::UNIX_EPOCH => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")), + Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too small to set as a file time")), + None => Ok(libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }), + } + }; + #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))] + let times = [to_timespec(times.accessed)?, to_timespec(times.modified)?]; cfg_if::cfg_if! { if #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] { // Redox doesn't appear to support `UTIME_OMIT`. @@ -1099,7 +1131,7 @@ impl File { cvt(unsafe { weak!(fn futimens(c_int, *const libc::timespec) -> c_int); match futimens.get() { - Some(futimens) => futimens(self.as_raw_fd(), times.0.as_ptr()), + Some(futimens) => futimens(self.as_raw_fd(), times.as_ptr()), #[cfg(target_os = "macos")] None => { fn ts_to_tv(ts: &libc::timespec) -> libc::timeval { @@ -1108,7 +1140,7 @@ impl File { tv_usec: (ts.tv_nsec / 1000) as _ } } - let timevals = [ts_to_tv(×.0[0]), ts_to_tv(×.0[1])]; + let timevals = [ts_to_tv(×[0]), ts_to_tv(×[1])]; libc::futimes(self.as_raw_fd(), timevals.as_ptr()) } // futimes requires even newer Android. @@ -1121,7 +1153,7 @@ impl File { })?; Ok(()) } else { - cvt(unsafe { libc::futimens(self.as_raw_fd(), times.0.as_ptr()) })?; + cvt(unsafe { libc::futimens(self.as_raw_fd(), times.as_ptr()) })?; Ok(()) } } @@ -1134,9 +1166,7 @@ impl DirBuilder { } pub fn mkdir(&self, p: &Path) -> io::Result<()> { - let p = cstr(p)?; - cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?; - Ok(()) + run_path_with_cstr(p, |p| cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }).map(|_| ())) } pub fn set_mode(&mut self, mode: u32) { @@ -1144,10 +1174,6 @@ impl DirBuilder { } } -fn cstr(path: &Path) -> io::Result<CString> { - Ok(CString::new(path.as_os_str().as_bytes())?) -} - impl AsInner<FileDesc> for File { fn as_inner(&self) -> &FileDesc { &self.0 @@ -1198,7 +1224,12 @@ impl FromRawFd for File { impl fmt::Debug for File { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #[cfg(any(target_os = "linux", target_os = "netbsd"))] + #[cfg(any( + target_os = "linux", + target_os = "netbsd", + target_os = "illumos", + target_os = "solaris" + ))] fn get_path(fd: c_int) -> Option<PathBuf> { let mut p = PathBuf::from("/proc/self/fd"); p.push(&fd.to_string()); @@ -1253,14 +1284,23 @@ impl fmt::Debug for File { target_os = "macos", target_os = "vxworks", all(target_os = "freebsd", target_arch = "x86_64"), - target_os = "netbsd" + target_os = "netbsd", + target_os = "illumos", + target_os = "solaris" )))] fn get_path(_fd: c_int) -> Option<PathBuf> { // FIXME(#24570): implement this for other Unix platforms None } - #[cfg(any(target_os = "linux", target_os = "macos", target_os = "vxworks"))] + #[cfg(any( + target_os = "linux", + target_os = "macos", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + target_os = "vxworks" + ))] fn get_mode(fd: c_int) -> Option<(bool, bool)> { let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) }; if mode == -1 { @@ -1274,7 +1314,14 @@ impl fmt::Debug for File { } } - #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "vxworks")))] + #[cfg(not(any( + target_os = "linux", + target_os = "macos", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + target_os = "vxworks" + )))] fn get_mode(_fd: c_int) -> Option<(bool, bool)> { // FIXME(#24570): implement this for other Unix platforms None @@ -1293,173 +1340,170 @@ impl fmt::Debug for File { } } -pub fn readdir(p: &Path) -> io::Result<ReadDir> { - let root = p.to_path_buf(); - let p = cstr(p)?; - unsafe { - let ptr = libc::opendir(p.as_ptr()); - if ptr.is_null() { - Err(Error::last_os_error()) - } else { - let inner = InnerReadDir { dirp: Dir(ptr), root }; - Ok(ReadDir { - inner: Arc::new(inner), - #[cfg(not(any( - target_os = "android", - target_os = "linux", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia", - target_os = "redox", - )))] - end_of_stream: false, - }) - } +pub fn readdir(path: &Path) -> io::Result<ReadDir> { + let ptr = run_path_with_cstr(path, |p| unsafe { Ok(libc::opendir(p.as_ptr())) })?; + if ptr.is_null() { + Err(Error::last_os_error()) + } else { + let root = path.to_path_buf(); + let inner = InnerReadDir { dirp: Dir(ptr), root }; + Ok(ReadDir { + inner: Arc::new(inner), + #[cfg(not(any( + target_os = "android", + target_os = "linux", + target_os = "solaris", + target_os = "illumos", + target_os = "fuchsia", + target_os = "redox", + )))] + end_of_stream: false, + }) } } pub fn unlink(p: &Path) -> io::Result<()> { - let p = cstr(p)?; - cvt(unsafe { libc::unlink(p.as_ptr()) })?; - Ok(()) + run_path_with_cstr(p, |p| cvt(unsafe { libc::unlink(p.as_ptr()) }).map(|_| ())) } pub fn rename(old: &Path, new: &Path) -> io::Result<()> { - let old = cstr(old)?; - let new = cstr(new)?; - cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?; - Ok(()) + run_path_with_cstr(old, |old| { + run_path_with_cstr(new, |new| { + cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }).map(|_| ()) + }) + }) } pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> { - let p = cstr(p)?; - cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?; - Ok(()) + run_path_with_cstr(p, |p| cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }).map(|_| ())) } pub fn rmdir(p: &Path) -> io::Result<()> { - let p = cstr(p)?; - cvt(unsafe { libc::rmdir(p.as_ptr()) })?; - Ok(()) + run_path_with_cstr(p, |p| cvt(unsafe { libc::rmdir(p.as_ptr()) }).map(|_| ())) } pub fn readlink(p: &Path) -> io::Result<PathBuf> { - let c_path = cstr(p)?; - let p = c_path.as_ptr(); + run_path_with_cstr(p, |c_path| { + let p = c_path.as_ptr(); - let mut buf = Vec::with_capacity(256); + let mut buf = Vec::with_capacity(256); - loop { - let buf_read = - cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize; + loop { + let buf_read = + cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? + as usize; - unsafe { - buf.set_len(buf_read); - } + unsafe { + buf.set_len(buf_read); + } - if buf_read != buf.capacity() { - buf.shrink_to_fit(); + if buf_read != buf.capacity() { + buf.shrink_to_fit(); - return Ok(PathBuf::from(OsString::from_vec(buf))); - } + return Ok(PathBuf::from(OsString::from_vec(buf))); + } - // Trigger the internal buffer resizing logic of `Vec` by requiring - // more space than the current capacity. The length is guaranteed to be - // the same as the capacity due to the if statement above. - buf.reserve(1); - } + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. The length is guaranteed to be + // the same as the capacity due to the if statement above. + buf.reserve(1); + } + }) } pub fn symlink(original: &Path, link: &Path) -> io::Result<()> { - let original = cstr(original)?; - let link = cstr(link)?; - cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) })?; - Ok(()) + run_path_with_cstr(original, |original| { + run_path_with_cstr(link, |link| { + cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) }).map(|_| ()) + }) + }) } pub fn link(original: &Path, link: &Path) -> io::Result<()> { - let original = cstr(original)?; - let link = cstr(link)?; - cfg_if::cfg_if! { - if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] { - // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves - // it implementation-defined whether `link` follows symlinks, so rely on the - // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior. - // Android has `linkat` on newer versions, but we happen to know `link` - // always has the correct behavior, so it's here as well. - cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?; - } else if #[cfg(target_os = "macos")] { - // On MacOS, older versions (<=10.9) lack support for linkat while newer - // versions have it. We want to use linkat if it is available, so we use weak! - // to check. `linkat` is preferable to `link` because it gives us a flag to - // specify how symlinks should be handled. We pass 0 as the flags argument, - // meaning it shouldn't follow symlinks. - weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int); - - if let Some(f) = linkat.get() { - cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?; - } else { - cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?; - }; - } else { - // Where we can, use `linkat` instead of `link`; see the comment above - // this one for details on why. - cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?; - } - } - Ok(()) + run_path_with_cstr(original, |original| { + run_path_with_cstr(link, |link| { + cfg_if::cfg_if! { + if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] { + // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves + // it implementation-defined whether `link` follows symlinks, so rely on the + // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior. + // Android has `linkat` on newer versions, but we happen to know `link` + // always has the correct behavior, so it's here as well. + cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?; + } else if #[cfg(target_os = "macos")] { + // On MacOS, older versions (<=10.9) lack support for linkat while newer + // versions have it. We want to use linkat if it is available, so we use weak! + // to check. `linkat` is preferable to `link` because it gives us a flag to + // specify how symlinks should be handled. We pass 0 as the flags argument, + // meaning it shouldn't follow symlinks. + weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int); + + if let Some(f) = linkat.get() { + cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?; + } else { + cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?; + }; + } else { + // Where we can, use `linkat` instead of `link`; see the comment above + // this one for details on why. + cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?; + } + } + Ok(()) + }) + }) } pub fn stat(p: &Path) -> io::Result<FileAttr> { - let p = cstr(p)?; - - cfg_has_statx! { - if let Some(ret) = unsafe { try_statx( - libc::AT_FDCWD, - p.as_ptr(), - libc::AT_STATX_SYNC_AS_STAT, - libc::STATX_ALL, - ) } { - return ret; + run_path_with_cstr(p, |p| { + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + libc::AT_FDCWD, + p.as_ptr(), + libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } } - } - let mut stat: stat64 = unsafe { mem::zeroed() }; - cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?; - Ok(FileAttr::from_stat64(stat)) + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?; + Ok(FileAttr::from_stat64(stat)) + }) } pub fn lstat(p: &Path) -> io::Result<FileAttr> { - let p = cstr(p)?; - - cfg_has_statx! { - if let Some(ret) = unsafe { try_statx( - libc::AT_FDCWD, - p.as_ptr(), - libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT, - libc::STATX_ALL, - ) } { - return ret; + run_path_with_cstr(p, |p| { + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + libc::AT_FDCWD, + p.as_ptr(), + libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } } - } - let mut stat: stat64 = unsafe { mem::zeroed() }; - cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?; - Ok(FileAttr::from_stat64(stat)) + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?; + Ok(FileAttr::from_stat64(stat)) + }) } pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { - let path = CString::new(p.as_os_str().as_bytes())?; - let buf; - unsafe { - let r = libc::realpath(path.as_ptr(), ptr::null_mut()); - if r.is_null() { - return Err(io::Error::last_os_error()); - } - buf = CStr::from_ptr(r).to_bytes().to_vec(); - libc::free(r as *mut _); + let r = run_path_with_cstr(p, |path| unsafe { + Ok(libc::realpath(path.as_ptr(), ptr::null_mut())) + })?; + if r.is_null() { + return Err(io::Error::last_os_error()); } - Ok(PathBuf::from(OsString::from_vec(buf))) + Ok(PathBuf::from(OsString::from_vec(unsafe { + let buf = CStr::from_ptr(r).to_bytes().to_vec(); + libc::free(r as *mut _); + buf + }))) } fn open_from(from: &Path) -> io::Result<(crate::fs::File, crate::fs::Metadata)> { @@ -1609,9 +1653,9 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { // Opportunistically attempt to create a copy-on-write clone of `from` // using `fclonefileat`. if HAS_FCLONEFILEAT.load(Ordering::Relaxed) { - let to = cstr(to)?; - let clonefile_result = - cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) }); + let clonefile_result = run_path_with_cstr(to, |to| { + cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) }) + }); match clonefile_result { Ok(_) => return Ok(reader_metadata.len()), Err(err) => match err.raw_os_error() { @@ -1655,9 +1699,10 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { } pub fn chown(path: &Path, uid: u32, gid: u32) -> io::Result<()> { - let path = cstr(path)?; - cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?; - Ok(()) + run_path_with_cstr(path, |path| { + cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) }) + .map(|_| ()) + }) } pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> { @@ -1666,16 +1711,15 @@ pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> { } pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> { - let path = cstr(path)?; - cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?; - Ok(()) + run_path_with_cstr(path, |path| { + cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) }) + .map(|_| ()) + }) } #[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))] pub fn chroot(dir: &Path) -> io::Result<()> { - let dir = cstr(dir)?; - cvt(unsafe { libc::chroot(dir.as_ptr()) })?; - Ok(()) + run_path_with_cstr(dir, |dir| cvt(unsafe { libc::chroot(dir.as_ptr()) }).map(|_| ())) } pub use remove_dir_impl::remove_dir_all; @@ -1689,13 +1733,14 @@ mod remove_dir_impl { // Modern implementation using openat(), unlinkat() and fdopendir() #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri)))] mod remove_dir_impl { - use super::{cstr, lstat, Dir, DirEntry, InnerReadDir, ReadDir}; + use super::{lstat, Dir, DirEntry, InnerReadDir, ReadDir}; use crate::ffi::CStr; use crate::io; use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; use crate::os::unix::prelude::{OwnedFd, RawFd}; use crate::path::{Path, PathBuf}; use crate::sync::Arc; + use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::{cvt, cvt_r}; #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))] @@ -1862,7 +1907,7 @@ mod remove_dir_impl { if attr.file_type().is_symlink() { crate::fs::remove_file(p) } else { - remove_dir_all_recursive(None, &cstr(p)?) + run_path_with_cstr(p, |p| remove_dir_all_recursive(None, &p)) } } diff --git a/library/std/src/sys/unix/io.rs b/library/std/src/sys/unix/io.rs index deb5ee76b..29c340dd3 100644 --- a/library/std/src/sys/unix/io.rs +++ b/library/std/src/sys/unix/io.rs @@ -1,4 +1,5 @@ use crate::marker::PhantomData; +use crate::os::fd::{AsFd, AsRawFd}; use crate::slice; use libc::{c_void, iovec}; @@ -74,3 +75,8 @@ impl<'a> IoSliceMut<'a> { unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) } } } + +pub fn is_terminal(fd: &impl AsFd) -> bool { + let fd = fd.as_fd(); + unsafe { libc::isatty(fd.as_raw_fd()) != 0 } +} diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs index 8f7abb55e..94546ca09 100644 --- a/library/std/src/sys/unix/kernel_copy.rs +++ b/library/std/src/sys/unix/kernel_copy.rs @@ -20,7 +20,7 @@ //! Since those syscalls have requirements that cannot be fully checked in advance and //! gathering additional information about file descriptors would require additional syscalls //! anyway it simply attempts to use them one after another (guided by inaccurate hints) to -//! figure out which one works and and falls back to the generic read-write copy loop if none of them +//! figure out which one works and falls back to the generic read-write copy loop if none of them //! does. //! Once a working syscall is found for a pair of file descriptors it will be called in a loop //! until the copy operation is completed. diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs index f5f92f693..9bb314b70 100644 --- a/library/std/src/sys/unix/locks/mod.rs +++ b/library/std/src/sys/unix/locks/mod.rs @@ -11,21 +11,21 @@ cfg_if::cfg_if! { mod futex_rwlock; mod futex_condvar; pub(crate) use futex_mutex::{Mutex, MovableMutex}; - pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; + pub(crate) use futex_rwlock::MovableRwLock; pub(crate) use futex_condvar::MovableCondvar; } else if #[cfg(target_os = "fuchsia")] { mod fuchsia_mutex; mod futex_rwlock; mod futex_condvar; pub(crate) use fuchsia_mutex::{Mutex, MovableMutex}; - pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; + pub(crate) use futex_rwlock::MovableRwLock; pub(crate) use futex_condvar::MovableCondvar; } else { mod pthread_mutex; mod pthread_rwlock; mod pthread_condvar; pub(crate) use pthread_mutex::{Mutex, MovableMutex}; - pub(crate) use pthread_rwlock::{RwLock, MovableRwLock}; + pub(crate) use pthread_rwlock::MovableRwLock; pub(crate) use pthread_condvar::MovableCondvar; } } diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index c84e292ea..9055a011c 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -163,17 +163,27 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { // See the other file for docs. NOTE: Make sure to keep them in // sync! mod sigpipe { + pub const DEFAULT: u8 = 0; pub const INHERIT: u8 = 1; pub const SIG_IGN: u8 = 2; pub const SIG_DFL: u8 = 3; } - let handler = match sigpipe { - sigpipe::INHERIT => None, - sigpipe::SIG_IGN => Some(libc::SIG_IGN), - sigpipe::SIG_DFL => Some(libc::SIG_DFL), + let (sigpipe_attr_specified, handler) = match sigpipe { + sigpipe::DEFAULT => (false, Some(libc::SIG_IGN)), + sigpipe::INHERIT => (true, None), + sigpipe::SIG_IGN => (true, Some(libc::SIG_IGN)), + sigpipe::SIG_DFL => (true, Some(libc::SIG_DFL)), _ => unreachable!(), }; + // The bootstrap compiler doesn't know about sigpipe::DEFAULT, and always passes in + // SIG_IGN. This causes some tests to fail because they expect SIGPIPE to be reset to + // default on process spawning (which doesn't happen if #[unix_sigpipe] is specified). + // Since we can't differentiate between the cases here, treat SIG_IGN as DEFAULT + // unconditionally. + if sigpipe_attr_specified && !(cfg!(bootstrap) && sigpipe == sigpipe::SIG_IGN) { + UNIX_SIGPIPE_ATTR_SPECIFIED.store(true, crate::sync::atomic::Ordering::Relaxed); + } if let Some(handler) = handler { rtassert!(signal(libc::SIGPIPE, handler) != libc::SIG_ERR); } @@ -181,6 +191,26 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { } } +// This is set (up to once) in reset_sigpipe. +#[cfg(not(any( + target_os = "espidf", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "horizon" +)))] +static UNIX_SIGPIPE_ATTR_SPECIFIED: crate::sync::atomic::AtomicBool = + crate::sync::atomic::AtomicBool::new(false); + +#[cfg(not(any( + target_os = "espidf", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "horizon" +)))] +pub(crate) fn unix_sigpipe_attr_specified() -> bool { + UNIX_SIGPIPE_ATTR_SPECIFIED.load(crate::sync::atomic::Ordering::Relaxed) +} + // SAFETY: must be called only once during runtime cleanup. // NOTE: this is not guaranteed to run, for example when the program aborts. pub unsafe fn cleanup() { @@ -352,16 +382,12 @@ cfg_if::cfg_if! { extern "C" {} } else if #[cfg(target_os = "macos")] { #[link(name = "System")] - // res_init and friends require -lresolv on macOS/iOS. - // See #41582 and https://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html - #[link(name = "resolv")] extern "C" {} } else if #[cfg(any(target_os = "ios", target_os = "watchos"))] { #[link(name = "System")] #[link(name = "objc")] #[link(name = "Security", kind = "framework")] #[link(name = "Foundation", kind = "framework")] - #[link(name = "resolv")] extern "C" {} } else if #[cfg(target_os = "fuchsia")] { #[link(name = "zircon")] diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 46545a083..2f2663db6 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -7,6 +7,7 @@ mod tests; use crate::os::unix::prelude::*; +use crate::convert::TryFrom; use crate::error::Error as StdError; use crate::ffi::{CStr, CString, OsStr, OsString}; use crate::fmt; @@ -17,10 +18,11 @@ use crate::path::{self, PathBuf}; use crate::ptr; use crate::slice; use crate::str; +use crate::sync::{PoisonError, RwLock}; +use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr}; use crate::sys::cvt; use crate::sys::fd; use crate::sys::memchr; -use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard}; use crate::vec; #[cfg(all(target_env = "gnu", not(target_os = "vxworks")))] @@ -125,7 +127,9 @@ pub fn error_string(errno: i32) -> String { } let p = p as *const _; - str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned() + // We can't always expect a UTF-8 environment. When we don't get that luxury, + // it's better to give a low-quality error message than none at all. + String::from_utf8_lossy(CStr::from_ptr(p).to_bytes()).into() } } @@ -168,12 +172,8 @@ pub fn chdir(p: &path::Path) -> io::Result<()> { #[cfg(not(target_os = "espidf"))] pub fn chdir(p: &path::Path) -> io::Result<()> { - let p: &OsStr = p.as_ref(); - let p = CString::new(p.as_bytes())?; - if unsafe { libc::chdir(p.as_ptr()) } != 0 { - return Err(io::Error::last_os_error()); - } - Ok(()) + let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; + if result == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } pub struct SplitPaths<'a> { @@ -501,10 +501,10 @@ pub unsafe fn environ() -> *mut *const *const c_char { ptr::addr_of_mut!(environ) } -static ENV_LOCK: StaticRwLock = StaticRwLock::new(); +static ENV_LOCK: RwLock<()> = RwLock::new(()); -pub fn env_read_lock() -> StaticRwLockReadGuard { - ENV_LOCK.read() +pub fn env_read_lock() -> impl Drop { + ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner) } /// Returns a vector of (variable, value) byte-vector pairs for all the @@ -546,35 +546,32 @@ pub fn env() -> Env { pub fn getenv(k: &OsStr) -> Option<OsString> { // environment variables with a nul byte can't be set, so their value is // always None as well - let k = CString::new(k.as_bytes()).ok()?; - unsafe { + let s = run_with_cstr(k.as_bytes(), |k| { let _guard = env_read_lock(); - let s = libc::getenv(k.as_ptr()) as *const libc::c_char; - if s.is_null() { - None - } else { - Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) - } + Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char) + }) + .ok()?; + if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec())) } } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - let k = CString::new(k.as_bytes())?; - let v = CString::new(v.as_bytes())?; - - unsafe { - let _guard = ENV_LOCK.write(); - cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) - } + run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(v.as_bytes(), |v| { + let _guard = ENV_LOCK.write(); + cvt(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop) + }) + }) } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - let nbuf = CString::new(n.as_bytes())?; - - unsafe { + run_with_cstr(n.as_bytes(), |nbuf| { let _guard = ENV_LOCK.write(); - cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) - } + cvt(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop) + }) } #[cfg(not(target_os = "espidf"))] diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs index 2834ee0ac..848adca78 100644 --- a/library/std/src/sys/unix/process/process_common.rs +++ b/library/std/src/sys/unix/process/process_common.rs @@ -39,10 +39,12 @@ cfg_if::cfg_if! { // https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h cfg_if::cfg_if! { if #[cfg(target_os = "android")] { + #[allow(dead_code)] pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int { set.write_bytes(0u8, 1); return 0; } + #[allow(dead_code)] pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int { use crate::{ diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs index d176b3401..03631e4e3 100644 --- a/library/std/src/sys/unix/process/process_common/tests.rs +++ b/library/std/src/sys/unix/process/process_common/tests.rs @@ -31,41 +31,54 @@ macro_rules! t { ignore )] fn test_process_mask() { - unsafe { - // Test to make sure that a signal mask does not get inherited. - let mut cmd = Command::new(OsStr::new("cat")); - - let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit(); - let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit(); - t!(cvt(sigemptyset(set.as_mut_ptr()))); - t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT))); - t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr()))); - - cmd.stdin(Stdio::MakePipe); - cmd.stdout(Stdio::MakePipe); - - let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true)); - let stdin_write = pipes.stdin.take().unwrap(); - let stdout_read = pipes.stdout.take().unwrap(); - - t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut()))); - - t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT))); - // We need to wait until SIGINT is definitely delivered. The - // easiest way is to write something to cat, and try to read it - // back: if SIGINT is unmasked, it'll get delivered when cat is - // next scheduled. - let _ = stdin_write.write(b"Hello"); - drop(stdin_write); - - // Either EOF or failure (EPIPE) is okay. - let mut buf = [0; 5]; - if let Ok(ret) = stdout_read.read(&mut buf) { - assert_eq!(ret, 0); + // Test to make sure that a signal mask *does* get inherited. + fn test_inner(mut cmd: Command) { + unsafe { + let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit(); + let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit(); + t!(cvt(sigemptyset(set.as_mut_ptr()))); + t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT))); + t!(cvt_nz(libc::pthread_sigmask( + libc::SIG_SETMASK, + set.as_ptr(), + old_set.as_mut_ptr() + ))); + + cmd.stdin(Stdio::MakePipe); + cmd.stdout(Stdio::MakePipe); + + let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true)); + let stdin_write = pipes.stdin.take().unwrap(); + let stdout_read = pipes.stdout.take().unwrap(); + + t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut()))); + + t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT))); + // We need to wait until SIGINT is definitely delivered. The + // easiest way is to write something to cat, and try to read it + // back: if SIGINT is unmasked, it'll get delivered when cat is + // next scheduled. + let _ = stdin_write.write(b"Hello"); + drop(stdin_write); + + // Exactly 5 bytes should be read. + let mut buf = [0; 5]; + let ret = t!(stdout_read.read(&mut buf)); + assert_eq!(ret, 5); + assert_eq!(&buf, b"Hello"); + + t!(cat.wait()); } - - t!(cat.wait()); } + + // A plain `Command::new` uses the posix_spawn path on many platforms. + let cmd = Command::new(OsStr::new("cat")); + test_inner(cmd); + + // Specifying `pre_exec` forces the fork/exec path. + let mut cmd = Command::new(OsStr::new("cat")); + unsafe { cmd.pre_exec(Box::new(|| Ok(()))) }; + test_inner(cmd); } #[test] diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs index 73f5d3a61..66ea3db20 100644 --- a/library/std/src/sys/unix/process/process_fuchsia.rs +++ b/library/std/src/sys/unix/process/process_fuchsia.rs @@ -287,7 +287,7 @@ impl ExitStatus { // SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't // necessarily fit. // - // It seems to me that that the right answer would be to provide std::os::fuchsia with its + // It seems to me that the right answer would be to provide std::os::fuchsia with its // own ExitStatusExt, rather that trying to provide a not very convincing imitation of // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But // fixing this up that is beyond the scope of my efforts now. diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs index 26ae62817..56a805cef 100644 --- a/library/std/src/sys/unix/process/process_unix.rs +++ b/library/std/src/sys/unix/process/process_unix.rs @@ -2,7 +2,6 @@ use crate::fmt; use crate::io::{self, Error, ErrorKind}; use crate::mem; use crate::num::NonZeroI32; -use crate::ptr; use crate::sys; use crate::sys::cvt; use crate::sys::process::process_common::*; @@ -310,7 +309,7 @@ impl Command { //FIXME: Redox kernel does not support setgroups yet #[cfg(not(target_os = "redox"))] if libc::getuid() == 0 && self.get_groups().is_none() { - cvt(libc::setgroups(0, ptr::null()))?; + cvt(libc::setgroups(0, crate::ptr::null()))?; } cvt(libc::setuid(u as uid_t))?; } @@ -326,30 +325,26 @@ impl Command { // emscripten has no signal support. #[cfg(not(target_os = "emscripten"))] { - use crate::mem::MaybeUninit; - use crate::sys::cvt_nz; - // Reset signal handling so the child process starts in a - // standardized state. libstd ignores SIGPIPE, and signal-handling - // libraries often set a mask. Child processes inherit ignored - // signals and the signal mask from their parent, but most - // UNIX programs do not reset these things on their own, so we - // need to clean things up now to avoid confusing the program - // we're about to run. - let mut set = MaybeUninit::<libc::sigset_t>::uninit(); - cvt(sigemptyset(set.as_mut_ptr()))?; - cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?; - - #[cfg(target_os = "android")] // see issue #88585 - { - let mut action: libc::sigaction = mem::zeroed(); - action.sa_sigaction = libc::SIG_DFL; - cvt(libc::sigaction(libc::SIGPIPE, &action, ptr::null_mut()))?; - } - #[cfg(not(target_os = "android"))] - { - let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL); - if ret == libc::SIG_ERR { - return Err(io::Error::last_os_error()); + // Inherit the signal mask from the parent rather than resetting it (i.e. do not call + // pthread_sigmask). + + // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL. + // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility. + // + // #[unix_sigpipe] is an opportunity to change the default here. + if !crate::sys::unix_sigpipe_attr_specified() { + #[cfg(target_os = "android")] // see issue #88585 + { + let mut action: libc::sigaction = mem::zeroed(); + action.sa_sigaction = libc::SIG_DFL; + cvt(libc::sigaction(libc::SIGPIPE, &action, crate::ptr::null_mut()))?; + } + #[cfg(not(target_os = "android"))] + { + let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL); + if ret == libc::SIG_ERR { + return Err(io::Error::last_os_error()); + } } } } @@ -411,7 +406,7 @@ impl Command { envp: Option<&CStringArray>, ) -> io::Result<Option<Process>> { use crate::mem::MaybeUninit; - use crate::sys::{self, cvt_nz}; + use crate::sys::{self, cvt_nz, unix_sigpipe_attr_specified}; if self.get_gid().is_some() || self.get_uid().is_some() @@ -531,13 +526,24 @@ impl Command { cvt_nz(libc::posix_spawnattr_setpgroup(attrs.0.as_mut_ptr(), pgroup))?; } - let mut set = MaybeUninit::<libc::sigset_t>::uninit(); - cvt(sigemptyset(set.as_mut_ptr()))?; - cvt_nz(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?; - cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?; - cvt_nz(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?; + // Inherit the signal mask from this process rather than resetting it (i.e. do not call + // posix_spawnattr_setsigmask). + + // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL. + // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility. + // + // #[unix_sigpipe] is an opportunity to change the default here. + if !unix_sigpipe_attr_specified() { + let mut default_set = MaybeUninit::<libc::sigset_t>::uninit(); + cvt(sigemptyset(default_set.as_mut_ptr()))?; + cvt(sigaddset(default_set.as_mut_ptr(), libc::SIGPIPE))?; + cvt_nz(libc::posix_spawnattr_setsigdefault( + attrs.0.as_mut_ptr(), + default_set.as_ptr(), + ))?; + flags |= libc::POSIX_SPAWN_SETSIGDEF; + } - flags |= libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK; cvt_nz(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?; // Make sure we synchronize access to the global `environ` resource @@ -822,14 +828,14 @@ impl crate::os::linux::process::ChildExt for crate::process::Child { self.handle .pidfd .as_ref() - .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created.")) + .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created.")) } fn take_pidfd(&mut self) -> io::Result<PidFd> { self.handle .pidfd .take() - .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created.")) + .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created.")) } } diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs index 329f9433d..b3626c564 100644 --- a/library/std/src/sys/unix/stdio.rs +++ b/library/std/src/sys/unix/stdio.rs @@ -1,6 +1,6 @@ use crate::io::{self, IoSlice, IoSliceMut}; use crate::mem::ManuallyDrop; -use crate::os::unix::io::{AsFd, BorrowedFd, FromRawFd}; +use crate::os::unix::io::FromRawFd; use crate::sys::fd::FileDesc; pub struct Stdin(()); @@ -91,51 +91,3 @@ pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; pub fn panic_output() -> Option<impl io::Write> { Some(Stderr::new()) } - -#[stable(feature = "io_safety", since = "1.63.0")] -impl AsFd for io::Stdin { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) } - } -} - -#[stable(feature = "io_safety", since = "1.63.0")] -impl<'a> AsFd for io::StdinLock<'a> { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) } - } -} - -#[stable(feature = "io_safety", since = "1.63.0")] -impl AsFd for io::Stdout { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) } - } -} - -#[stable(feature = "io_safety", since = "1.63.0")] -impl<'a> AsFd for io::StdoutLock<'a> { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) } - } -} - -#[stable(feature = "io_safety", since = "1.63.0")] -impl AsFd for io::Stderr { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) } - } -} - -#[stable(feature = "io_safety", since = "1.63.0")] -impl<'a> AsFd for io::StderrLock<'a> { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) } - } -} diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs index f6b627afc..c1d30dd9d 100644 --- a/library/std/src/sys/unix/thread.rs +++ b/library/std/src/sys/unix/thread.rs @@ -137,7 +137,9 @@ impl Thread { unsafe { // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20. let name = truncate_cstr(name, TASK_COMM_LEN); - libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); + let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, 0); } } @@ -152,20 +154,22 @@ impl Thread { pub fn set_name(name: &CStr) { unsafe { let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE); - libc::pthread_setname_np(name.as_ptr()); + let res = libc::pthread_setname_np(name.as_ptr()); + // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. + debug_assert_eq!(res, 0); } } #[cfg(target_os = "netbsd")] pub fn set_name(name: &CStr) { - use crate::ffi::CString; - let cname = CString::new(&b"%s"[..]).unwrap(); unsafe { - libc::pthread_setname_np( + let cname = CStr::from_bytes_with_nul_unchecked(b"%s\0".as_slice()); + let res = libc::pthread_setname_np( libc::pthread_self(), cname.as_ptr(), name.as_ptr() as *mut libc::c_void, ); + debug_assert_eq!(res, 0); } } @@ -178,9 +182,8 @@ impl Thread { } if let Some(f) = pthread_setname_np.get() { - unsafe { - f(libc::pthread_self(), name.as_ptr()); - } + let res = unsafe { f(libc::pthread_self(), name.as_ptr()) }; + debug_assert_eq!(res, 0); } } @@ -785,6 +788,16 @@ pub mod guard { const GUARD_PAGES: usize = 1; let guard = guardaddr..guardaddr + GUARD_PAGES * page_size; Some(guard) + } else if cfg!(target_os = "openbsd") { + // OpenBSD stack already includes a guard page, and stack is + // immutable. + // + // We'll just note where we expect rlimit to start + // faulting, so our handler can report "stack overflow", and + // trust that the kernel's own stack guard will work. + let stackptr = get_stack_start_aligned()?; + let stackaddr = stackptr.addr(); + Some(stackaddr - page_size..stackaddr) } else { // Reallocate the last page of the stack. // This ensures SIGBUS will be raised on diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs index 6e8be2a91..d7fd2130f 100644 --- a/library/std/src/sys/unix/thread_local_dtor.rs +++ b/library/std/src/sys/unix/thread_local_dtor.rs @@ -17,6 +17,7 @@ target_os = "redox", target_os = "emscripten" ))] +#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten) pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { use crate::mem; use crate::sys_common::thread_local_dtor::register_dtor_fallback; diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs index 2c5b94b1e..2b2d079ee 100644 --- a/library/std/src/sys/unix/thread_local_key.rs +++ b/library/std/src/sys/unix/thread_local_key.rs @@ -27,8 +27,3 @@ pub unsafe fn destroy(key: Key) { let r = libc::pthread_key_delete(key); debug_assert_eq!(r, 0); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - false -} diff --git a/library/std/src/sys/unix/thread_parker/darwin.rs b/library/std/src/sys/unix/thread_parker/darwin.rs new file mode 100644 index 000000000..2f5356fe2 --- /dev/null +++ b/library/std/src/sys/unix/thread_parker/darwin.rs @@ -0,0 +1,131 @@ +//! Thread parking for Darwin-based systems. +//! +//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they +//! cannot be used in `std` because they are non-public (their use will lead to +//! rejection from the App Store) and because they are only available starting +//! with macOS version 10.12, even though the minimum target version is 10.7. +//! +//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin +//! supports semaphores, which allow us to implement the behaviour we need with +//! only one primitive (as opposed to a mutex-condvar pair). We use the semaphore +//! provided by libdispatch, as the underlying Mach semaphore is only dubiously +//! public. + +use crate::pin::Pin; +use crate::sync::atomic::{ + AtomicI8, + Ordering::{Acquire, Release}, +}; +use crate::time::Duration; + +type dispatch_semaphore_t = *mut crate::ffi::c_void; +type dispatch_time_t = u64; + +const DISPATCH_TIME_NOW: dispatch_time_t = 0; +const DISPATCH_TIME_FOREVER: dispatch_time_t = !0; + +// Contained in libSystem.dylib, which is linked by default. +extern "C" { + fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t; + fn dispatch_semaphore_create(val: isize) -> dispatch_semaphore_t; + fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> isize; + fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> isize; + fn dispatch_release(object: *mut crate::ffi::c_void); +} + +const EMPTY: i8 = 0; +const NOTIFIED: i8 = 1; +const PARKED: i8 = -1; + +pub struct Parker { + semaphore: dispatch_semaphore_t, + state: AtomicI8, +} + +unsafe impl Sync for Parker {} +unsafe impl Send for Parker {} + +impl Parker { + pub unsafe fn new(parker: *mut Parker) { + let semaphore = dispatch_semaphore_create(0); + assert!( + !semaphore.is_null(), + "failed to create dispatch semaphore for thread synchronization" + ); + parker.write(Parker { semaphore, state: AtomicI8::new(EMPTY) }) + } + + // Does not need `Pin`, but other implementation do. + pub unsafe fn park(self: Pin<&Self>) { + // The semaphore counter must be zero at this point, because unparking + // threads will not actually increase it until we signalled that we + // are waiting. + + // Change NOTIFIED to EMPTY and EMPTY to PARKED. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + // Another thread may increase the semaphore counter from this point on. + // If it is faster than us, we will decrement it again immediately below. + // If we are faster, we wait. + + // Ensure that the semaphore counter has actually been decremented, even + // if the call timed out for some reason. + while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} + + // At this point, the semaphore counter is zero again. + + // We were definitely woken up, so we don't need to check the state. + // Still, we need to reset the state using a swap to observe the state + // change with acquire ordering. + self.state.swap(EMPTY, Acquire); + } + + // Does not need `Pin`, but other implementation do. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + let nanos = dur.as_nanos().try_into().unwrap_or(i64::MAX); + let timeout = dispatch_time(DISPATCH_TIME_NOW, nanos); + + let timeout = dispatch_semaphore_wait(self.semaphore, timeout) != 0; + + let state = self.state.swap(EMPTY, Acquire); + if state == NOTIFIED && timeout { + // If the state was NOTIFIED but semaphore_wait returned without + // decrementing the count because of a timeout, it means another + // thread is about to call semaphore_signal. We must wait for that + // to happen to ensure the semaphore count is reset. + while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} + } else { + // Either a timeout occurred and we reset the state before any thread + // tried to wake us up, or we were woken up and reset the state, + // making sure to observe the state change with acquire ordering. + // Either way, the semaphore counter is now zero again. + } + } + + // Does not need `Pin`, but other implementation do. + pub fn unpark(self: Pin<&Self>) { + let state = self.state.swap(NOTIFIED, Release); + if state == PARKED { + unsafe { + dispatch_semaphore_signal(self.semaphore); + } + } + } +} + +impl Drop for Parker { + fn drop(&mut self) { + // SAFETY: + // We always ensure that the semaphore count is reset, so this will + // never cause an exception. + unsafe { + dispatch_release(self.semaphore); + } + } +} diff --git a/library/std/src/sys/unix/thread_parker/mod.rs b/library/std/src/sys/unix/thread_parker/mod.rs index e2453580d..35f1e68a8 100644 --- a/library/std/src/sys/unix/thread_parker/mod.rs +++ b/library/std/src/sys/unix/thread_parker/mod.rs @@ -11,7 +11,18 @@ )))] cfg_if::cfg_if! { - if #[cfg(target_os = "netbsd")] { + if #[cfg(all( + any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "tvos", + ), + not(miri), + ))] { + mod darwin; + pub use darwin::Parker; + } else if #[cfg(target_os = "netbsd")] { mod netbsd; pub use netbsd::Parker; } else { diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs index dff973f59..cca9c6767 100644 --- a/library/std/src/sys/unix/time.rs +++ b/library/std/src/sys/unix/time.rs @@ -7,6 +7,12 @@ const NSEC_PER_SEC: u64 = 1_000_000_000; pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(0)] +#[rustc_layout_scalar_valid_range_end(999_999_999)] +struct Nanoseconds(u32); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SystemTime { pub(in crate::sys::unix) t: Timespec, } @@ -14,7 +20,7 @@ pub struct SystemTime { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(in crate::sys::unix) struct Timespec { tv_sec: i64, - tv_nsec: i64, + tv_nsec: Nanoseconds, } impl SystemTime { @@ -46,18 +52,20 @@ impl fmt::Debug for SystemTime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SystemTime") .field("tv_sec", &self.t.tv_sec) - .field("tv_nsec", &self.t.tv_nsec) + .field("tv_nsec", &self.t.tv_nsec.0) .finish() } } impl Timespec { pub const fn zero() -> Timespec { - Timespec { tv_sec: 0, tv_nsec: 0 } + Timespec::new(0, 0) } - fn new(tv_sec: i64, tv_nsec: i64) -> Timespec { - Timespec { tv_sec, tv_nsec } + const fn new(tv_sec: i64, tv_nsec: i64) -> Timespec { + assert!(tv_nsec >= 0 && tv_nsec < NSEC_PER_SEC as i64); + // SAFETY: The assert above checks tv_nsec is within the valid range + Timespec { tv_sec, tv_nsec: unsafe { Nanoseconds(tv_nsec as u32) } } } pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> { @@ -75,12 +83,12 @@ impl Timespec { // // Ideally this code could be rearranged such that it more // directly expresses the lower-cost behavior we want from it. - let (secs, nsec) = if self.tv_nsec >= other.tv_nsec { - ((self.tv_sec - other.tv_sec) as u64, (self.tv_nsec - other.tv_nsec) as u32) + let (secs, nsec) = if self.tv_nsec.0 >= other.tv_nsec.0 { + ((self.tv_sec - other.tv_sec) as u64, self.tv_nsec.0 - other.tv_nsec.0) } else { ( (self.tv_sec - other.tv_sec - 1) as u64, - self.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.tv_nsec as u32, + self.tv_nsec.0 + (NSEC_PER_SEC as u32) - other.tv_nsec.0, ) }; @@ -102,7 +110,7 @@ impl Timespec { // Nano calculations can't overflow because nanos are <1B which fit // in a u32. - let mut nsec = other.subsec_nanos() + self.tv_nsec as u32; + let mut nsec = other.subsec_nanos() + self.tv_nsec.0; if nsec >= NSEC_PER_SEC as u32 { nsec -= NSEC_PER_SEC as u32; secs = secs.checked_add(1)?; @@ -118,7 +126,7 @@ impl Timespec { .and_then(|secs| self.tv_sec.checked_sub(secs))?; // Similar to above, nanos can't overflow. - let mut nsec = self.tv_nsec as i32 - other.subsec_nanos() as i32; + let mut nsec = self.tv_nsec.0 as i32 - other.subsec_nanos() as i32; if nsec < 0 { nsec += NSEC_PER_SEC as i32; secs = secs.checked_sub(1)?; @@ -130,7 +138,7 @@ impl Timespec { pub fn to_timespec(&self) -> Option<libc::timespec> { Some(libc::timespec { tv_sec: self.tv_sec.try_into().ok()?, - tv_nsec: self.tv_nsec.try_into().ok()?, + tv_nsec: self.tv_nsec.0.try_into().ok()?, }) } } @@ -293,7 +301,7 @@ mod inner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Instant") .field("tv_sec", &self.t.tv_sec) - .field("tv_nsec", &self.t.tv_nsec) + .field("tv_nsec", &self.t.tv_nsec.0) .finish() } } @@ -334,7 +342,7 @@ mod inner { let mut t = MaybeUninit::uninit(); cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap(); let t = unsafe { t.assume_init() }; - return Timespec { tv_sec: t.tv_sec, tv_nsec: t.tv_nsec as i64 }; + return Timespec::new(t.tv_sec, t.tv_nsec as i64); } } diff --git a/library/std/src/sys/unsupported/io.rs b/library/std/src/sys/unsupported/io.rs index d5f475b43..82610ffab 100644 --- a/library/std/src/sys/unsupported/io.rs +++ b/library/std/src/sys/unsupported/io.rs @@ -45,3 +45,7 @@ impl<'a> IoSliceMut<'a> { self.0 } } + +pub fn is_terminal<T>(_: &T) -> bool { + false +} diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs index e703fd0d2..527a26a12 100644 --- a/library/std/src/sys/unsupported/locks/condvar.rs +++ b/library/std/src/sys/unsupported/locks/condvar.rs @@ -7,6 +7,7 @@ pub type MovableCondvar = Condvar; impl Condvar { #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Condvar { Condvar {} } diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs index d412ff152..602a2d623 100644 --- a/library/std/src/sys/unsupported/locks/mod.rs +++ b/library/std/src/sys/unsupported/locks/mod.rs @@ -3,4 +3,4 @@ mod mutex; mod rwlock; pub use condvar::{Condvar, MovableCondvar}; pub use mutex::{MovableMutex, Mutex}; -pub use rwlock::{MovableRwLock, RwLock}; +pub use rwlock::MovableRwLock; diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs index 2be0b34b9..87ea475c6 100644 --- a/library/std/src/sys/unsupported/locks/mutex.rs +++ b/library/std/src/sys/unsupported/locks/mutex.rs @@ -12,6 +12,7 @@ unsafe impl Sync for Mutex {} // no threads on this platform impl Mutex { #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Mutex { Mutex { locked: Cell::new(false) } } diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs index aca5fb715..5292691b9 100644 --- a/library/std/src/sys/unsupported/locks/rwlock.rs +++ b/library/std/src/sys/unsupported/locks/rwlock.rs @@ -12,6 +12,7 @@ unsafe impl Sync for RwLock {} // no threads on this platform impl RwLock { #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> RwLock { RwLock { mode: Cell::new(0) } } diff --git a/library/std/src/sys/unsupported/thread_local_dtor.rs b/library/std/src/sys/unsupported/thread_local_dtor.rs index 85d660983..84660ea58 100644 --- a/library/std/src/sys/unsupported/thread_local_dtor.rs +++ b/library/std/src/sys/unsupported/thread_local_dtor.rs @@ -1,5 +1,6 @@ #![unstable(feature = "thread_local_internals", issue = "none")] +#[cfg_attr(target_family = "wasm", allow(unused))] // unused on wasm32-unknown-unknown pub unsafe fn register_dtor(_t: *mut u8, _dtor: unsafe extern "C" fn(*mut u8)) { // FIXME: right now there is no concept of "thread exit", but this is likely // going to show up at some point in the form of an exported symbol that the diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs index c31b61cbf..b6e5e4cd2 100644 --- a/library/std/src/sys/unsupported/thread_local_key.rs +++ b/library/std/src/sys/unsupported/thread_local_key.rs @@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 { pub unsafe fn destroy(_key: Key) { panic!("should not be used on this target"); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - panic!("should not be used on this target"); -} diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs index 510cf36b1..d4866bbc3 100644 --- a/library/std/src/sys/wasi/fs.rs +++ b/library/std/src/sys/wasi/fs.rs @@ -1,7 +1,7 @@ #![deny(unsafe_op_in_unsafe_fn)] use super::fd::WasiFd; -use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::ffi::{CStr, OsStr, OsString}; use crate::fmt; use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom}; use crate::iter; @@ -12,6 +12,7 @@ use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd use crate::path::{Path, PathBuf}; use crate::ptr; use crate::sync::Arc; +use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::time::SystemTime; use crate::sys::unsupported; use crate::sys_common::{AsInner, FromInner, IntoInner}; @@ -65,8 +66,8 @@ pub struct FilePermissions { #[derive(Copy, Clone, Debug, Default)] pub struct FileTimes { - accessed: Option<wasi::Timestamp>, - modified: Option<wasi::Timestamp>, + accessed: Option<SystemTime>, + modified: Option<SystemTime>, } #[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)] @@ -120,11 +121,11 @@ impl FilePermissions { impl FileTimes { pub fn set_accessed(&mut self, t: SystemTime) { - self.accessed = Some(t.to_wasi_timestamp_or_panic()); + self.accessed = Some(t); } pub fn set_modified(&mut self, t: SystemTime) { - self.modified = Some(t.to_wasi_timestamp_or_panic()); + self.modified = Some(t); } } @@ -476,9 +477,16 @@ impl File { } pub fn set_times(&self, times: FileTimes) -> io::Result<()> { + let to_timestamp = |time: Option<SystemTime>| { + match time { + Some(time) if let Some(ts) = time.to_wasi_timestamp() => Ok(ts), + Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")), + None => Ok(0), + } + }; self.fd.filestat_set_times( - times.accessed.unwrap_or(0), - times.modified.unwrap_or(0), + to_timestamp(times.accessed)?, + to_timestamp(times.modified)?, times.accessed.map_or(0, |_| wasi::FSTFLAGS_ATIM) | times.modified.map_or(0, |_| wasi::FSTFLAGS_MTIM), ) @@ -687,51 +695,52 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> { /// Note that this can fail if `p` doesn't look like it can be opened relative /// to any pre-opened file descriptor. fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> { - let p = CString::new(p.as_os_str().as_bytes())?; - let mut buf = Vec::<u8>::with_capacity(512); - loop { - unsafe { - let mut relative_path = buf.as_ptr().cast(); - let mut abs_prefix = ptr::null(); - let fd = __wasilibc_find_relpath( - p.as_ptr(), - &mut abs_prefix, - &mut relative_path, - buf.capacity(), - ); - if fd == -1 { - if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) { - // Trigger the internal buffer resizing logic of `Vec` by requiring - // more space than the current capacity. - let cap = buf.capacity(); - buf.set_len(cap); - buf.reserve(1); - continue; - } - let msg = format!( - "failed to find a pre-opened file descriptor \ - through which {:?} could be opened", - p + run_path_with_cstr(p, |p| { + let mut buf = Vec::<u8>::with_capacity(512); + loop { + unsafe { + let mut relative_path = buf.as_ptr().cast(); + let mut abs_prefix = ptr::null(); + let fd = __wasilibc_find_relpath( + p.as_ptr(), + &mut abs_prefix, + &mut relative_path, + buf.capacity(), ); - return Err(io::Error::new(io::ErrorKind::Uncategorized, msg)); - } - let relative = CStr::from_ptr(relative_path).to_bytes().to_vec(); + if fd == -1 { + if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) { + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. + let cap = buf.capacity(); + buf.set_len(cap); + buf.reserve(1); + continue; + } + let msg = format!( + "failed to find a pre-opened file descriptor \ + through which {:?} could be opened", + p + ); + return Err(io::Error::new(io::ErrorKind::Uncategorized, msg)); + } + let relative = CStr::from_ptr(relative_path).to_bytes().to_vec(); - return Ok(( - ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)), - PathBuf::from(OsString::from_vec(relative)), - )); + return Ok(( + ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)), + PathBuf::from(OsString::from_vec(relative)), + )); + } } - } - extern "C" { - pub fn __wasilibc_find_relpath( - path: *const libc::c_char, - abs_prefix: *mut *const libc::c_char, - relative_path: *mut *const libc::c_char, - relative_path_len: libc::size_t, - ) -> libc::c_int; - } + extern "C" { + pub fn __wasilibc_find_relpath( + path: *const libc::c_char, + abs_prefix: *mut *const libc::c_char, + relative_path: *mut *const libc::c_char, + relative_path_len: libc::size_t, + ) -> libc::c_int; + } + }) } pub fn osstr2str(f: &OsStr) -> io::Result<&str> { diff --git a/library/std/src/sys/wasi/io.rs b/library/std/src/sys/wasi/io.rs index ee017d13a..2cd45df88 100644 --- a/library/std/src/sys/wasi/io.rs +++ b/library/std/src/sys/wasi/io.rs @@ -1,6 +1,7 @@ #![deny(unsafe_op_in_unsafe_fn)] use crate::marker::PhantomData; +use crate::os::fd::{AsFd, AsRawFd}; use crate::slice; #[derive(Copy, Clone)] @@ -71,3 +72,8 @@ impl<'a> IoSliceMut<'a> { unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.buf_len) } } } + +pub fn is_terminal(fd: &impl AsFd) -> bool { + let fd = fd.as_fd(); + unsafe { libc::isatty(fd.as_raw_fd()) != 0 } +} diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs index 683a07a34..c8c47763a 100644 --- a/library/std/src/sys/wasi/mod.rs +++ b/library/std/src/sys/wasi/mod.rs @@ -25,6 +25,9 @@ pub mod cmath; pub mod env; pub mod fd; pub mod fs; +#[allow(unused)] +#[path = "../wasm/atomics/futex.rs"] +pub mod futex; pub mod io; #[path = "../unsupported/locks/mod.rs"] pub mod locks; diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs index c5229a188..f5513e999 100644 --- a/library/std/src/sys/wasi/os.rs +++ b/library/std/src/sys/wasi/os.rs @@ -1,14 +1,15 @@ #![deny(unsafe_op_in_unsafe_fn)] -use crate::any::Any; use crate::error::Error as StdError; -use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::ffi::{CStr, OsStr, OsString}; use crate::fmt; use crate::io; use crate::marker::PhantomData; +use crate::ops::Drop; use crate::os::wasi::prelude::*; use crate::path::{self, PathBuf}; use crate::str; +use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr}; use crate::sys::memchr; use crate::sys::unsupported; use crate::vec; @@ -23,10 +24,26 @@ mod libc { } } -#[cfg(not(target_feature = "atomics"))] -pub unsafe fn env_lock() -> impl Any { - // No need for a lock if we're single-threaded, but this function will need - // to get implemented for multi-threaded scenarios +cfg_if::cfg_if! { + if #[cfg(target_feature = "atomics")] { + // Access to the environment must be protected by a lock in multi-threaded scenarios. + use crate::sync::{PoisonError, RwLock}; + static ENV_LOCK: RwLock<()> = RwLock::new(()); + pub fn env_read_lock() -> impl Drop { + ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner) + } + pub fn env_write_lock() -> impl Drop { + ENV_LOCK.write().unwrap_or_else(PoisonError::into_inner) + } + } else { + // No need for a lock if we are single-threaded. + pub fn env_read_lock() -> impl Drop { + Box::new(()) + } + pub fn env_write_lock() -> impl Drop { + Box::new(()) + } + } } pub fn errno() -> i32 { @@ -77,13 +94,10 @@ pub fn getcwd() -> io::Result<PathBuf> { } pub fn chdir(p: &path::Path) -> io::Result<()> { - let p: &OsStr = p.as_ref(); - let p = CString::new(p.as_bytes())?; - unsafe { - match libc::chdir(p.as_ptr()) == (0 as libc::c_int) { - true => Ok(()), - false => Err(io::Error::last_os_error()), - } + let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?; + match result == (0 as libc::c_int) { + true => Ok(()), + false => Err(io::Error::last_os_error()), } } @@ -146,7 +160,7 @@ impl Iterator for Env { pub fn env() -> Env { unsafe { - let _guard = env_lock(); + let _guard = env_read_lock(); let mut environ = libc::environ; let mut result = Vec::new(); if !environ.is_null() { @@ -176,35 +190,32 @@ pub fn env() -> Env { } pub fn getenv(k: &OsStr) -> Option<OsString> { - let k = CString::new(k.as_bytes()).ok()?; - unsafe { - let _guard = env_lock(); - let s = libc::getenv(k.as_ptr()) as *const libc::c_char; - if s.is_null() { - None - } else { - Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) - } + let s = run_with_cstr(k.as_bytes(), |k| unsafe { + let _guard = env_read_lock(); + Ok(libc::getenv(k.as_ptr()) as *const libc::c_char) + }) + .ok()?; + if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec())) } } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { - let k = CString::new(k.as_bytes())?; - let v = CString::new(v.as_bytes())?; - - unsafe { - let _guard = env_lock(); - cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) - } + run_with_cstr(k.as_bytes(), |k| { + run_with_cstr(v.as_bytes(), |v| unsafe { + let _guard = env_write_lock(); + cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) + }) + }) } pub fn unsetenv(n: &OsStr) -> io::Result<()> { - let nbuf = CString::new(n.as_bytes())?; - - unsafe { - let _guard = env_lock(); + run_with_cstr(n.as_bytes(), |nbuf| unsafe { + let _guard = env_write_lock(); cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) - } + }) } pub fn temp_dir() -> PathBuf { diff --git a/library/std/src/sys/wasi/stdio.rs b/library/std/src/sys/wasi/stdio.rs index d2081771b..4cc0e4ed5 100644 --- a/library/std/src/sys/wasi/stdio.rs +++ b/library/std/src/sys/wasi/stdio.rs @@ -4,7 +4,7 @@ use super::fd::WasiFd; use crate::io::{self, IoSlice, IoSliceMut}; use crate::mem::ManuallyDrop; use crate::os::raw; -use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd}; +use crate::os::wasi::io::{AsRawFd, FromRawFd}; pub struct Stdin; pub struct Stdout; @@ -23,13 +23,6 @@ impl AsRawFd for Stdin { } } -impl AsFd for Stdin { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(0) } - } -} - impl io::Read for Stdin { fn read(&mut self, data: &mut [u8]) -> io::Result<usize> { self.read_vectored(&mut [IoSliceMut::new(data)]) @@ -58,13 +51,6 @@ impl AsRawFd for Stdout { } } -impl AsFd for Stdout { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(1) } - } -} - impl io::Write for Stdout { fn write(&mut self, data: &[u8]) -> io::Result<usize> { self.write_vectored(&[IoSlice::new(data)]) @@ -96,13 +82,6 @@ impl AsRawFd for Stderr { } } -impl AsFd for Stderr { - #[inline] - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(2) } - } -} - impl io::Write for Stderr { fn write(&mut self, data: &[u8]) -> io::Result<usize> { self.write_vectored(&[IoSlice::new(data)]) diff --git a/library/std/src/sys/wasi/time.rs b/library/std/src/sys/wasi/time.rs index 3d326e491..016b06efb 100644 --- a/library/std/src/sys/wasi/time.rs +++ b/library/std/src/sys/wasi/time.rs @@ -47,8 +47,8 @@ impl SystemTime { SystemTime(Duration::from_nanos(ts)) } - pub fn to_wasi_timestamp_or_panic(&self) -> wasi::Timestamp { - self.0.as_nanos().try_into().expect("time does not fit in WASI timestamp") + pub fn to_wasi_timestamp(&self) -> Option<wasi::Timestamp> { + self.0.as_nanos().try_into().ok() } pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs index 4159efe2a..93838390b 100644 --- a/library/std/src/sys/wasm/mod.rs +++ b/library/std/src/sys/wasm/mod.rs @@ -57,7 +57,7 @@ cfg_if::cfg_if! { mod futex_rwlock; pub(crate) use futex_condvar::{Condvar, MovableCondvar}; pub(crate) use futex_mutex::{Mutex, MovableMutex}; - pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; + pub(crate) use futex_rwlock::MovableRwLock; } #[path = "atomics/futex.rs"] pub mod futex; diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index 89d0ab59b..be6fc2ebb 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -71,6 +71,7 @@ pub type BCRYPT_ALG_HANDLE = LPVOID; pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE; pub type PLARGE_INTEGER = *mut c_longlong; pub type PSRWLOCK = *mut SRWLOCK; +pub type LPINIT_ONCE = *mut INIT_ONCE; pub type SOCKET = crate::os::windows::raw::SOCKET; pub type socklen_t = c_int; @@ -126,6 +127,10 @@ pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000; pub const FIONBIO: c_ulong = 0x8004667e; +pub const MAX_PATH: usize = 260; + +pub const FILE_TYPE_PIPE: u32 = 3; + #[repr(C)] #[derive(Copy)] pub struct WIN32_FIND_DATAW { @@ -194,6 +199,9 @@ pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002; pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() }; pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() }; +pub const INIT_ONCE_STATIC_INIT: INIT_ONCE = INIT_ONCE { ptr: ptr::null_mut() }; + +pub const INIT_ONCE_INIT_FAILED: DWORD = 0x00000004; pub const DETACHED_PROCESS: DWORD = 0x00000008; pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200; @@ -279,7 +287,6 @@ pub const STATUS_INVALID_PARAMETER: NTSTATUS = 0xc000000d_u32 as _; pub const STATUS_PENDING: NTSTATUS = 0x103 as _; pub const STATUS_END_OF_FILE: NTSTATUS = 0xC0000011_u32 as _; pub const STATUS_NOT_IMPLEMENTED: NTSTATUS = 0xC0000002_u32 as _; -pub const STATUS_NOT_SUPPORTED: NTSTATUS = 0xC00000BB_u32 as _; // Equivalent to the `NT_SUCCESS` C preprocessor macro. // See: https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/using-ntstatus-values @@ -289,6 +296,7 @@ pub fn nt_success(status: NTSTATUS) -> bool { // "RNG\0" pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0]; +pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002; #[repr(C)] pub struct UNICODE_STRING { @@ -535,6 +543,12 @@ pub struct SYMBOLIC_LINK_REPARSE_BUFFER { /// NB: Use carefully! In general using this as a reference is likely to get the /// provenance wrong for the `PathBuffer` field! #[repr(C)] +pub struct FILE_NAME_INFO { + pub FileNameLength: DWORD, + pub FileName: [WCHAR; 1], +} + +#[repr(C)] pub struct MOUNT_POINT_REPARSE_BUFFER { pub SubstituteNameOffset: c_ushort, pub SubstituteNameLength: c_ushort, @@ -565,6 +579,10 @@ pub struct CONDITION_VARIABLE { pub struct SRWLOCK { pub ptr: LPVOID, } +#[repr(C)] +pub struct INIT_ONCE { + pub ptr: LPVOID, +} #[repr(C)] pub struct REPARSE_MOUNTPOINT_DATA_BUFFER { @@ -817,10 +835,6 @@ if #[cfg(not(target_vendor = "uwp"))] { #[link(name = "advapi32")] extern "system" { - // Forbidden when targeting UWP - #[link_name = "SystemFunction036"] - pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN; - // Allowed but unused by UWP pub fn OpenProcessToken( ProcessHandle: HANDLE, @@ -959,6 +973,7 @@ extern "system" { pub fn TlsAlloc() -> DWORD; pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID; pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL; + pub fn TlsFree(dwTlsIndex: DWORD) -> BOOL; pub fn GetLastError() -> DWORD; pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL; pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL; @@ -1101,6 +1116,7 @@ extern "system" { lpFileInformation: LPVOID, dwBufferSize: DWORD, ) -> BOOL; + pub fn GetFileType(hfile: HANDLE) -> DWORD; pub fn SleepConditionVariableSRW( ConditionVariable: PCONDITION_VARIABLE, SRWLock: PSRWLOCK, @@ -1118,6 +1134,14 @@ extern "system" { pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN; pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN; + pub fn InitOnceBeginInitialize( + lpInitOnce: LPINIT_ONCE, + dwFlags: DWORD, + fPending: LPBOOL, + lpContext: *mut LPVOID, + ) -> BOOL; + pub fn InitOnceComplete(lpInitOnce: LPINIT_ONCE, dwFlags: DWORD, lpContext: LPVOID) -> BOOL; + pub fn CompareStringOrdinal( lpString1: LPCWSTR, cchCount1: c_int, diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index 155d0297e..378098038 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -1,5 +1,6 @@ use crate::os::windows::prelude::*; +use crate::borrow::Cow; use crate::ffi::OsString; use crate::fmt; use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom}; @@ -572,6 +573,14 @@ impl File { "Cannot set file timestamp to 0", )); } + let is_max = + |t: c::FILETIME| t.dwLowDateTime == c::DWORD::MAX && t.dwHighDateTime == c::DWORD::MAX; + if times.accessed.map_or(false, is_max) || times.modified.map_or(false, is_max) { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + "Cannot set file timestamp to 0xFFFF_FFFF_FFFF_FFFF", + )); + } cvt(unsafe { c::SetFileTime(self.as_handle(), None, times.accessed.as_ref(), times.modified.as_ref()) })?; @@ -711,7 +720,7 @@ impl<'a> DirBuffIter<'a> { } } impl<'a> Iterator for DirBuffIter<'a> { - type Item = (&'a [u16], bool); + type Item = (Cow<'a, [u16]>, bool); fn next(&mut self) -> Option<Self::Item> { use crate::mem::size_of; let buffer = &self.buffer?[self.cursor..]; @@ -726,15 +735,19 @@ impl<'a> Iterator for DirBuffIter<'a> { // `FileNameLength` bytes) let (name, is_directory, next_entry) = unsafe { let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>(); - // Guaranteed to be aligned in documentation for + // While this is guaranteed to be aligned in documentation for // https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_both_dir_info - assert!(info.is_aligned()); - let next_entry = (*info).NextEntryOffset as usize; - let name = crate::slice::from_raw_parts( + // it does not seem that reality is so kind, and assuming this + // caused crashes in some cases (https://github.com/rust-lang/rust/issues/104530) + // presumably, this can be blamed on buggy filesystem drivers, but who knows. + let next_entry = ptr::addr_of!((*info).NextEntryOffset).read_unaligned() as usize; + let length = ptr::addr_of!((*info).FileNameLength).read_unaligned() as usize; + let attrs = ptr::addr_of!((*info).FileAttributes).read_unaligned(); + let name = from_maybe_unaligned( ptr::addr_of!((*info).FileName).cast::<u16>(), - (*info).FileNameLength as usize / size_of::<u16>(), + length / size_of::<u16>(), ); - let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0; + let is_directory = (attrs & c::FILE_ATTRIBUTE_DIRECTORY) != 0; (name, is_directory, next_entry) }; @@ -747,13 +760,21 @@ impl<'a> Iterator for DirBuffIter<'a> { // Skip `.` and `..` pseudo entries. const DOT: u16 = b'.' as u16; - match name { + match &name[..] { [DOT] | [DOT, DOT] => self.next(), _ => Some((name, is_directory)), } } } +unsafe fn from_maybe_unaligned<'a>(p: *const u16, len: usize) -> Cow<'a, [u16]> { + if p.is_aligned() { + Cow::Borrowed(crate::slice::from_raw_parts(p, len)) + } else { + Cow::Owned((0..len).map(|i| p.add(i).read_unaligned()).collect()) + } +} + /// Open a link relative to the parent directory, ensure no symlinks are followed. fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<File> { // This is implemented using the lower level `NtCreateFile` function as @@ -1109,13 +1130,13 @@ fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io if is_directory { let child_dir = open_link_no_reparse( &dir, - name, + &name, c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY, )?; dirlist.push(child_dir); } else { for i in 1..=MAX_RETRIES { - let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE); + let result = open_link_no_reparse(&dir, &name, c::SYNCHRONIZE | c::DELETE); match result { Ok(f) => delete(&f)?, // Already deleted, so skip. diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs index fb06df1f8..2cc34c986 100644 --- a/library/std/src/sys/windows/io.rs +++ b/library/std/src/sys/windows/io.rs @@ -1,6 +1,10 @@ use crate::marker::PhantomData; +use crate::mem::size_of; +use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle}; use crate::slice; -use crate::sys::c; +use crate::sys::{c, Align8}; +use core; +use libc; #[derive(Copy, Clone)] #[repr(transparent)] @@ -78,3 +82,73 @@ impl<'a> IoSliceMut<'a> { unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) } } } + +pub fn is_terminal(h: &impl AsHandle) -> bool { + unsafe { handle_is_console(h.as_handle()) } +} + +unsafe fn handle_is_console(handle: BorrowedHandle<'_>) -> bool { + let handle = handle.as_raw_handle(); + + // A null handle means the process has no console. + if handle.is_null() { + return false; + } + + let mut out = 0; + if c::GetConsoleMode(handle, &mut out) != 0 { + // False positives aren't possible. If we got a console then we definitely have a console. + return true; + } + + // At this point, we *could* have a false negative. We can determine that this is a true + // negative if we can detect the presence of a console on any of the standard I/O streams. If + // another stream has a console, then we know we're in a Windows console and can therefore + // trust the negative. + for std_handle in [c::STD_INPUT_HANDLE, c::STD_OUTPUT_HANDLE, c::STD_ERROR_HANDLE] { + let std_handle = c::GetStdHandle(std_handle); + if !std_handle.is_null() + && std_handle != handle + && c::GetConsoleMode(std_handle, &mut out) != 0 + { + return false; + } + } + + // Otherwise, we fall back to an msys hack to see if we can detect the presence of a pty. + msys_tty_on(handle) +} + +unsafe fn msys_tty_on(handle: c::HANDLE) -> bool { + // Early return if the handle is not a pipe. + if c::GetFileType(handle) != c::FILE_TYPE_PIPE { + return false; + } + + const SIZE: usize = size_of::<c::FILE_NAME_INFO>() + c::MAX_PATH * size_of::<c::WCHAR>(); + let mut name_info_bytes = Align8([0u8; SIZE]); + let res = c::GetFileInformationByHandleEx( + handle, + c::FileNameInfo, + name_info_bytes.0.as_mut_ptr() as *mut libc::c_void, + SIZE as u32, + ); + if res == 0 { + return false; + } + let name_info: &c::FILE_NAME_INFO = &*(name_info_bytes.0.as_ptr() as *const c::FILE_NAME_INFO); + let name_len = name_info.FileNameLength as usize / 2; + // Offset to get the `FileName` field. + let name_ptr = name_info_bytes.0.as_ptr().offset(size_of::<c::DWORD>() as isize).cast::<u16>(); + let s = core::slice::from_raw_parts(name_ptr, name_len); + let name = String::from_utf16_lossy(s); + // Get the file name only. + let name = name.rsplit('\\').next().unwrap_or(&name); + // This checks whether 'pty' exists in the file name, which indicates that + // a pseudo-terminal is attached. To mitigate against false positives + // (e.g., an actual file name that contains 'pty'), we also require that + // the file name begins with either the strings 'msys-' or 'cygwin-'.) + let is_msys = name.starts_with("msys-") || name.starts_with("cygwin-"); + let is_pty = name.contains("-pty"); + is_msys && is_pty +} diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs index d412ff152..602a2d623 100644 --- a/library/std/src/sys/windows/locks/mod.rs +++ b/library/std/src/sys/windows/locks/mod.rs @@ -3,4 +3,4 @@ mod mutex; mod rwlock; pub use condvar::{Condvar, MovableCondvar}; pub use mutex::{MovableMutex, Mutex}; -pub use rwlock::{MovableRwLock, RwLock}; +pub use rwlock::MovableRwLock; diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs index 02d5af471..9cbb4ef19 100644 --- a/library/std/src/sys/windows/process.rs +++ b/library/std/src/sys/windows/process.rs @@ -16,6 +16,7 @@ use crate::os::windows::ffi::{OsStrExt, OsStringExt}; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle}; use crate::path::{Path, PathBuf}; use crate::ptr; +use crate::sync::Mutex; use crate::sys::args::{self, Arg}; use crate::sys::c; use crate::sys::c::NonZeroDWORD; @@ -25,7 +26,6 @@ use crate::sys::handle::Handle; use crate::sys::path; use crate::sys::pipe::{self, AnonPipe}; use crate::sys::stdio; -use crate::sys_common::mutex::StaticMutex; use crate::sys_common::process::{CommandEnv, CommandEnvs}; use crate::sys_common::IntoInner; @@ -301,9 +301,9 @@ impl Command { // // For more information, msdn also has an article about this race: // https://support.microsoft.com/kb/315939 - static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new(); + static CREATE_PROCESS_LOCK: Mutex<()> = Mutex::new(()); - let _guard = unsafe { CREATE_PROCESS_LOCK.lock() }; + let _guard = CREATE_PROCESS_LOCK.lock(); let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None }; let null = Stdio::Null; diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs index d6cd8f802..b5a49489d 100644 --- a/library/std/src/sys/windows/rand.rs +++ b/library/std/src/sys/windows/rand.rs @@ -13,15 +13,12 @@ //! but significant number of users to experience panics caused by a failure of //! this function. See [#94098]. //! -//! The current version changes this to use the `BCRYPT_RNG_ALG_HANDLE` -//! [Pseudo-handle], which gets the default RNG algorithm without querying the -//! system preference thus hopefully avoiding the previous issue. -//! This is only supported on Windows 10+ so a fallback is used for older versions. +//! The current version falls back to using `BCryptOpenAlgorithmProvider` if +//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason. //! //! [#94098]: https://github.com/rust-lang/rust/issues/94098 //! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom //! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom -//! [Pseudo-handle]: https://docs.microsoft.com/en-us/windows/win32/seccng/cng-algorithm-pseudo-handles use crate::mem; use crate::ptr; use crate::sys::c; @@ -33,37 +30,35 @@ use crate::sys::c; /// [`HashMap`]: crate::collections::HashMap /// [`RandomState`]: crate::collections::hash_map::RandomState pub fn hashmap_random_keys() -> (u64, u64) { - Rng::open().and_then(|rng| rng.gen_random_keys()).unwrap_or_else(fallback_rng) + Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng) } -struct Rng(c::BCRYPT_ALG_HANDLE); +struct Rng { + algorithm: c::BCRYPT_ALG_HANDLE, + flags: u32, +} impl Rng { - #[cfg(miri)] - fn open() -> Result<Self, c::NTSTATUS> { - const BCRYPT_RNG_ALG_HANDLE: c::BCRYPT_ALG_HANDLE = ptr::invalid_mut(0x81); - let _ = ( - c::BCryptOpenAlgorithmProvider, - c::BCryptCloseAlgorithmProvider, - c::BCRYPT_RNG_ALGORITHM, - c::STATUS_NOT_SUPPORTED, - ); - Ok(Self(BCRYPT_RNG_ALG_HANDLE)) + const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) }; + + /// Create the RNG from an existing algorithm handle. + /// + /// # Safety + /// + /// The handle must either be null or a valid algorithm handle. + const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self { + Self { algorithm, flags } } - #[cfg(not(miri))] - // Open a handle to the RNG algorithm. + + /// Open a handle to the RNG algorithm. fn open() -> Result<Self, c::NTSTATUS> { use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::Ordering::{Acquire, Release}; - const ERROR_VALUE: c::LPVOID = ptr::invalid_mut(usize::MAX); // An atomic is used so we don't need to reopen the handle every time. static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(ptr::null_mut()); let mut handle = HANDLE.load(Acquire); - // We use a sentinel value to designate an error occurred last time. - if handle == ERROR_VALUE { - Err(c::STATUS_NOT_SUPPORTED) - } else if handle.is_null() { + if handle.is_null() { let status = unsafe { c::BCryptOpenAlgorithmProvider( &mut handle, @@ -80,13 +75,12 @@ impl Rng { unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) }; handle = previous_handle; } - Ok(Self(handle)) + Ok(unsafe { Self::new(handle, 0) }) } else { - HANDLE.store(ERROR_VALUE, Release); Err(status) } } else { - Ok(Self(handle)) + Ok(unsafe { Self::new(handle, 0) }) } } @@ -94,33 +88,19 @@ impl Rng { let mut v = (0, 0); let status = unsafe { let size = mem::size_of_val(&v).try_into().unwrap(); - c::BCryptGenRandom(self.0, ptr::addr_of_mut!(v).cast(), size, 0) + c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags) }; if c::nt_success(status) { Ok(v) } else { Err(status) } } } -/// Generate random numbers using the fallback RNG function (RtlGenRandom) -#[cfg(not(target_vendor = "uwp"))] +/// Generate random numbers using the fallback RNG function #[inline(never)] fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) { - let mut v = (0, 0); - let ret = - unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) }; - - if ret != 0 { - v - } else { - panic!( - "RNG broken: {rng_status:#x}, fallback RNG broken: {}", - crate::io::Error::last_os_error() - ) + match Rng::open().and_then(|rng| rng.gen_random_keys()) { + Ok(keys) => keys, + Err(status) => { + panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}") + } } } - -/// We can't use RtlGenRandom with UWP, so there is no fallback -#[cfg(target_vendor = "uwp")] -#[inline(never)] -fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) { - panic!("RNG broken: {rng_status:#x} fallback RNG broken: RtlGenRandom() not supported on UWP"); -} diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs index ec670238e..17628b757 100644 --- a/library/std/src/sys/windows/thread_local_key.rs +++ b/library/std/src/sys/windows/thread_local_key.rs @@ -1,11 +1,16 @@ -use crate::mem::ManuallyDrop; +use crate::cell::UnsafeCell; use crate::ptr; -use crate::sync::atomic::AtomicPtr; -use crate::sync::atomic::Ordering::SeqCst; +use crate::sync::atomic::{ + AtomicPtr, AtomicU32, + Ordering::{AcqRel, Acquire, Relaxed, Release}, +}; use crate::sys::c; -pub type Key = c::DWORD; -pub type Dtor = unsafe extern "C" fn(*mut u8); +#[cfg(test)] +mod tests; + +type Key = c::DWORD; +type Dtor = unsafe extern "C" fn(*mut u8); // Turns out, like pretty much everything, Windows is pretty close the // functionality that Unix provides, but slightly different! In the case of @@ -22,60 +27,109 @@ pub type Dtor = unsafe extern "C" fn(*mut u8); // To accomplish this feat, we perform a number of threads, all contained // within this module: // -// * All TLS destructors are tracked by *us*, not the windows runtime. This +// * All TLS destructors are tracked by *us*, not the Windows runtime. This // means that we have a global list of destructors for each TLS key that // we know about. // * When a thread exits, we run over the entire list and run dtors for all // non-null keys. This attempts to match Unix semantics in this regard. // -// This ends up having the overhead of using a global list, having some -// locks here and there, and in general just adding some more code bloat. We -// attempt to optimize runtime by forgetting keys that don't have -// destructors, but this only gets us so far. -// // For more details and nitty-gritty, see the code sections below! // // [1]: https://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way -// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base -// /threading/thread_local_storage_win.cc#L42 +// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base/threading/thread_local_storage_win.cc#L42 -// ------------------------------------------------------------------------- -// Native bindings -// -// This section is just raw bindings to the native functions that Windows -// provides, There's a few extra calls to deal with destructors. +pub struct StaticKey { + /// The key value shifted up by one. Since TLS_OUT_OF_INDEXES == DWORD::MAX + /// is not a valid key value, this allows us to use zero as sentinel value + /// without risking overflow. + key: AtomicU32, + dtor: Option<Dtor>, + next: AtomicPtr<StaticKey>, + /// Currently, destructors cannot be unregistered, so we cannot use racy + /// initialization for keys. Instead, we need synchronize initialization. + /// Use the Windows-provided `Once` since it does not require TLS. + once: UnsafeCell<c::INIT_ONCE>, +} -#[inline] -pub unsafe fn create(dtor: Option<Dtor>) -> Key { - let key = c::TlsAlloc(); - assert!(key != c::TLS_OUT_OF_INDEXES); - if let Some(f) = dtor { - register_dtor(key, f); +impl StaticKey { + #[inline] + pub const fn new(dtor: Option<Dtor>) -> StaticKey { + StaticKey { + key: AtomicU32::new(0), + dtor, + next: AtomicPtr::new(ptr::null_mut()), + once: UnsafeCell::new(c::INIT_ONCE_STATIC_INIT), + } } - key -} -#[inline] -pub unsafe fn set(key: Key, value: *mut u8) { - let r = c::TlsSetValue(key, value as c::LPVOID); - debug_assert!(r != 0); -} + #[inline] + pub unsafe fn set(&'static self, val: *mut u8) { + let r = c::TlsSetValue(self.key(), val.cast()); + debug_assert_eq!(r, c::TRUE); + } -#[inline] -pub unsafe fn get(key: Key) -> *mut u8 { - c::TlsGetValue(key) as *mut u8 -} + #[inline] + pub unsafe fn get(&'static self) -> *mut u8 { + c::TlsGetValue(self.key()).cast() + } -#[inline] -pub unsafe fn destroy(_key: Key) { - rtabort!("can't destroy tls keys on windows") -} + #[inline] + unsafe fn key(&'static self) -> Key { + match self.key.load(Acquire) { + 0 => self.init(), + key => key - 1, + } + } + + #[cold] + unsafe fn init(&'static self) -> Key { + if self.dtor.is_some() { + let mut pending = c::FALSE; + let r = c::InitOnceBeginInitialize(self.once.get(), 0, &mut pending, ptr::null_mut()); + assert_eq!(r, c::TRUE); -#[inline] -pub fn requires_synchronized_create() -> bool { - true + if pending == c::FALSE { + // Some other thread initialized the key, load it. + self.key.load(Relaxed) - 1 + } else { + let key = c::TlsAlloc(); + if key == c::TLS_OUT_OF_INDEXES { + // Wakeup the waiting threads before panicking to avoid deadlock. + c::InitOnceComplete(self.once.get(), c::INIT_ONCE_INIT_FAILED, ptr::null_mut()); + panic!("out of TLS indexes"); + } + + self.key.store(key + 1, Release); + register_dtor(self); + + let r = c::InitOnceComplete(self.once.get(), 0, ptr::null_mut()); + debug_assert_eq!(r, c::TRUE); + + key + } + } else { + // If there is no destructor to clean up, we can use racy initialization. + + let key = c::TlsAlloc(); + assert_ne!(key, c::TLS_OUT_OF_INDEXES, "out of TLS indexes"); + + match self.key.compare_exchange(0, key + 1, AcqRel, Acquire) { + Ok(_) => key, + Err(new) => { + // Some other thread completed initialization first, so destroy + // our key and use theirs. + let r = c::TlsFree(key); + debug_assert_eq!(r, c::TRUE); + new - 1 + } + } + } + } } +unsafe impl Send for StaticKey {} +unsafe impl Sync for StaticKey {} + // ------------------------------------------------------------------------- // Dtor registration // @@ -96,29 +150,21 @@ pub fn requires_synchronized_create() -> bool { // Typically processes have a statically known set of TLS keys which is pretty // small, and we'd want to keep this memory alive for the whole process anyway // really. -// -// Perhaps one day we can fold the `Box` here into a static allocation, -// expanding the `StaticKey` structure to contain not only a slot for the TLS -// key but also a slot for the destructor queue on windows. An optimization for -// another day! - -static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut()); - -struct Node { - dtor: Dtor, - key: Key, - next: *mut Node, -} -unsafe fn register_dtor(key: Key, dtor: Dtor) { - let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); +static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut()); - let mut head = DTORS.load(SeqCst); +/// Should only be called once per key, otherwise loops or breaks may occur in +/// the linked list. +unsafe fn register_dtor(key: &'static StaticKey) { + let this = <*const StaticKey>::cast_mut(key); + // Use acquire ordering to pass along the changes done by the previously + // registered keys when we store the new head with release ordering. + let mut head = DTORS.load(Acquire); loop { - node.next = head; - match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) { - Ok(_) => return, // nothing to drop, we successfully added the node to the list - Err(cur) => head = cur, + key.next.store(head, Relaxed); + match DTORS.compare_exchange_weak(head, this, Release, Acquire) { + Ok(_) => break, + Err(new) => head = new, } } } @@ -214,25 +260,29 @@ unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv: unsafe fn reference_tls_used() {} } -#[allow(dead_code)] // actually called above +#[allow(dead_code)] // actually called below unsafe fn run_dtors() { - let mut any_run = true; for _ in 0..5 { - if !any_run { - break; - } - any_run = false; - let mut cur = DTORS.load(SeqCst); + let mut any_run = false; + + // Use acquire ordering to observe key initialization. + let mut cur = DTORS.load(Acquire); while !cur.is_null() { - let ptr = c::TlsGetValue((*cur).key); + let key = (*cur).key.load(Relaxed) - 1; + let dtor = (*cur).dtor.unwrap(); + let ptr = c::TlsGetValue(key); if !ptr.is_null() { - c::TlsSetValue((*cur).key, ptr::null_mut()); - ((*cur).dtor)(ptr as *mut _); + c::TlsSetValue(key, ptr::null_mut()); + dtor(ptr as *mut _); any_run = true; } - cur = (*cur).next; + cur = (*cur).next.load(Relaxed); + } + + if !any_run { + break; } } } diff --git a/library/std/src/sys/windows/thread_local_key/tests.rs b/library/std/src/sys/windows/thread_local_key/tests.rs new file mode 100644 index 000000000..c95f383fb --- /dev/null +++ b/library/std/src/sys/windows/thread_local_key/tests.rs @@ -0,0 +1,53 @@ +use super::StaticKey; +use crate::ptr; + +#[test] +fn smoke() { + static K1: StaticKey = StaticKey::new(None); + static K2: StaticKey = StaticKey::new(None); + + unsafe { + assert!(K1.get().is_null()); + assert!(K2.get().is_null()); + K1.set(ptr::invalid_mut(1)); + K2.set(ptr::invalid_mut(2)); + assert_eq!(K1.get() as usize, 1); + assert_eq!(K2.get() as usize, 2); + } +} + +#[test] +fn destructors() { + use crate::mem::ManuallyDrop; + use crate::sync::Arc; + use crate::thread; + + unsafe extern "C" fn destruct(ptr: *mut u8) { + drop(Arc::from_raw(ptr as *const ())); + } + + static KEY: StaticKey = StaticKey::new(Some(destruct)); + + let shared1 = Arc::new(()); + let shared2 = Arc::clone(&shared1); + + unsafe { + assert!(KEY.get().is_null()); + KEY.set(Arc::into_raw(shared1) as *mut u8); + } + + thread::spawn(move || unsafe { + assert!(KEY.get().is_null()); + KEY.set(Arc::into_raw(shared2) as *mut u8); + }) + .join() + .unwrap(); + + // Leak the Arc, let the TLS destructor clean it up. + let shared1 = unsafe { ManuallyDrop::new(Arc::from_raw(KEY.get() as *const ())) }; + assert_eq!( + Arc::strong_count(&shared1), + 1, + "destructor should have dropped the other reference on thread exit" + ); +} diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index 31164afdc..8807077cb 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -7,15 +7,14 @@ use crate::fmt; use crate::io; use crate::io::prelude::*; use crate::path::{self, Path, PathBuf}; -use crate::sys_common::mutex::StaticMutex; +use crate::sync::{Mutex, PoisonError}; /// Max number of frames to print. const MAX_NB_FRAMES: usize = 100; -// SAFETY: Don't attempt to lock this reentrantly. -pub unsafe fn lock() -> impl Drop { - static LOCK: StaticMutex = StaticMutex::new(); - LOCK.lock() +pub fn lock() -> impl Drop { + static LOCK: Mutex<()> = Mutex::new(()); + LOCK.lock().unwrap_or_else(PoisonError::into_inner) } /// Prints the current backtrace. diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs index f3ac1061b..8bc5b2411 100644 --- a/library/std/src/sys_common/condvar.rs +++ b/library/std/src/sys_common/condvar.rs @@ -15,6 +15,7 @@ pub struct Condvar { impl Condvar { /// Creates a new condition variable for use. #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Self { Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() } } diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs index ce8f36704..4ac9e62bf 100644 --- a/library/std/src/sys_common/condvar/check.rs +++ b/library/std/src/sys_common/condvar/check.rs @@ -50,6 +50,7 @@ pub struct NoCheck; #[allow(dead_code)] impl NoCheck { + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Self { Self } diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index 80f56bf75..8c19f9332 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -27,17 +27,25 @@ pub mod io; pub mod lazy_box; pub mod memchr; pub mod mutex; +pub mod once; pub mod process; pub mod remutex; pub mod rwlock; pub mod thread; pub mod thread_info; pub mod thread_local_dtor; -pub mod thread_local_key; pub mod thread_parker; pub mod wtf8; cfg_if::cfg_if! { + if #[cfg(target_os = "windows")] { + pub use crate::sys::thread_local_key; + } else { + pub mod thread_local_key; + } +} + +cfg_if::cfg_if! { if #[cfg(any(target_os = "l4re", target_os = "hermit", feature = "restricted-std", diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs index 48479f5bd..98046f20f 100644 --- a/library/std/src/sys_common/mutex.rs +++ b/library/std/src/sys_common/mutex.rs @@ -1,49 +1,5 @@ use crate::sys::locks as imp; -/// An OS-based mutual exclusion lock, meant for use in static variables. -/// -/// This mutex has a const constructor ([`StaticMutex::new`]), does not -/// implement `Drop` to cleanup resources, and causes UB when used reentrantly. -/// -/// This mutex does not implement poisoning. -/// -/// This is a wrapper around `imp::Mutex` that does *not* call `init()` and -/// `destroy()`. -pub struct StaticMutex(imp::Mutex); - -unsafe impl Sync for StaticMutex {} - -impl StaticMutex { - /// Creates a new mutex for use. - #[inline] - pub const fn new() -> Self { - Self(imp::Mutex::new()) - } - - /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex - /// will be unlocked. - /// - /// It is undefined behaviour to call this function while locked by the - /// same thread. - #[inline] - pub unsafe fn lock(&'static self) -> StaticMutexGuard { - self.0.lock(); - StaticMutexGuard(&self.0) - } -} - -#[must_use] -pub struct StaticMutexGuard(&'static imp::Mutex); - -impl Drop for StaticMutexGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.unlock(); - } - } -} - /// An OS-based mutual exclusion lock. /// /// This mutex cleans up its resources in its `Drop` implementation, may safely @@ -61,6 +17,7 @@ unsafe impl Sync for MovableMutex {} impl MovableMutex { /// Creates a new mutex. #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Self { Self(imp::MovableMutex::new()) } diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs index 3ad802afa..fad4a6333 100644 --- a/library/std/src/sys_common/net.rs +++ b/library/std/src/sys_common/net.rs @@ -2,12 +2,13 @@ mod tests; use crate::cmp; -use crate::ffi::CString; +use crate::convert::{TryFrom, TryInto}; use crate::fmt; use crate::io::{self, ErrorKind, IoSlice, IoSliceMut}; use crate::mem; use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; use crate::ptr; +use crate::sys::common::small_c_string::run_with_cstr; use crate::sys::net::netc as c; use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket}; use crate::sys_common::{AsInner, FromInner, IntoInner}; @@ -197,14 +198,15 @@ impl<'a> TryFrom<(&'a str, u16)> for LookupHost { fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> { init(); - let c_host = CString::new(host)?; - let mut hints: c::addrinfo = unsafe { mem::zeroed() }; - hints.ai_socktype = c::SOCK_STREAM; - let mut res = ptr::null_mut(); - unsafe { - cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res)) - .map(|_| LookupHost { original: res, cur: res, port }) - } + run_with_cstr(host.as_bytes(), |c_host| { + let mut hints: c::addrinfo = unsafe { mem::zeroed() }; + hints.ai_socktype = c::SOCK_STREAM; + let mut res = ptr::null_mut(); + unsafe { + cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res)) + .map(|_| LookupHost { original: res, cur: res, port }) + } + }) } } diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs new file mode 100644 index 000000000..5c7e6c013 --- /dev/null +++ b/library/std/src/sys_common/once/futex.rs @@ -0,0 +1,134 @@ +use crate::cell::Cell; +use crate::sync as public; +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::{futex_wait, futex_wake_all}; + +// On some platforms, the OS is very nice and handles the waiter queue for us. +// This means we only need one atomic value with 5 states: + +/// No initialization has run yet, and no thread is currently using the Once. +const INCOMPLETE: u32 = 0; +/// Some thread has previously attempted to initialize the Once, but it panicked, +/// so the Once is now poisoned. There are no other threads currently accessing +/// this Once. +const POISONED: u32 = 1; +/// Some thread is currently attempting to run initialization. It may succeed, +/// so all future threads need to wait for it to finish. +const RUNNING: u32 = 2; +/// Some thread is currently attempting to run initialization and there are threads +/// waiting for it to finish. +const QUEUED: u32 = 3; +/// Initialization has completed and all future calls should finish immediately. +const COMPLETE: u32 = 4; + +// Threads wait by setting the state to QUEUED and calling `futex_wait` on the state +// variable. When the running thread finishes, it will wake all waiting threads using +// `futex_wake_all`. + +pub struct OnceState { + poisoned: bool, + set_state_to: Cell<u32>, +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_to.set(POISONED); + } +} + +struct CompletionGuard<'a> { + state: &'a AtomicU32, + set_state_on_drop_to: u32, +} + +impl<'a> Drop for CompletionGuard<'a> { + fn drop(&mut self) { + // Use release ordering to propagate changes to all threads checking + // up on the Once. `futex_wake_all` does its own synchronization, hence + // we do not need `AcqRel`. + if self.state.swap(self.set_state_on_drop_to, Release) == QUEUED { + futex_wake_all(&self.state); + } + } +} + +pub struct Once { + state: AtomicU32, +} + +impl Once { + #[inline] + pub const fn new() -> Once { + Once { state: AtomicU32::new(INCOMPLETE) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + // Use acquire ordering to make all initialization changes visible to the + // current thread. + self.state.load(Acquire) == COMPLETE + } + + // This uses FnMut to match the API of the generic implementation. As this + // implementation is quite light-weight, it is generic over the closure and + // so avoids the cost of dynamic dispatch. + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { + let mut state = self.state.load(Acquire); + loop { + match state { + POISONED if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + INCOMPLETE | POISONED => { + // Try to register the current thread as the one running. + if let Err(new) = + self.state.compare_exchange_weak(state, RUNNING, Acquire, Acquire) + { + state = new; + continue; + } + // `waiter_queue` will manage other waiting threads, and + // wake them up on drop. + let mut waiter_queue = + CompletionGuard { state: &self.state, set_state_on_drop_to: POISONED }; + // Run the function, letting it know if we're poisoned or not. + let f_state = public::OnceState { + inner: OnceState { + poisoned: state == POISONED, + set_state_to: Cell::new(COMPLETE), + }, + }; + f(&f_state); + waiter_queue.set_state_on_drop_to = f_state.inner.set_state_to.get(); + return; + } + RUNNING | QUEUED => { + // Set the state to QUEUED if it is not already. + if state == RUNNING + && let Err(new) = self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire) + { + state = new; + continue; + } + + futex_wait(&self.state, QUEUED, None); + state = self.state.load(Acquire); + } + COMPLETE => return, + _ => unreachable!("state is never set to invalid values"), + } + } + } +} diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/generic.rs new file mode 100644 index 000000000..acf5f2471 --- /dev/null +++ b/library/std/src/sys_common/once/generic.rs @@ -0,0 +1,282 @@ +// Each `Once` has one word of atomic state, and this state is CAS'd on to +// determine what to do. There are four possible state of a `Once`: +// +// * Incomplete - no initialization has run yet, and no thread is currently +// using the Once. +// * Poisoned - some thread has previously attempted to initialize the Once, but +// it panicked, so the Once is now poisoned. There are no other +// threads currently accessing this Once. +// * Running - some thread is currently attempting to run initialization. It may +// succeed, so all future threads need to wait for it to finish. +// Note that this state is accompanied with a payload, described +// below. +// * Complete - initialization has completed and all future calls should finish +// immediately. +// +// With 4 states we need 2 bits to encode this, and we use the remaining bits +// in the word we have allocated as a queue of threads waiting for the thread +// responsible for entering the RUNNING state. This queue is just a linked list +// of Waiter nodes which is monotonically increasing in size. Each node is +// allocated on the stack, and whenever the running closure finishes it will +// consume the entire queue and notify all waiters they should try again. +// +// You'll find a few more details in the implementation, but that's the gist of +// it! +// +// Atomic orderings: +// When running `Once` we deal with multiple atomics: +// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. +// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the +// result of the `Once`, and (3) for synchronizing `Waiter` nodes. +// - At the end of the `call` function we have to make sure the result +// of the `Once` is acquired. So every load which can be the only one to +// load COMPLETED must have at least acquire ordering, which means all +// three of them. +// - `WaiterQueue::drop` is the only place that may store COMPLETED, and +// must do so with release ordering to make the result available. +// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and +// needs to make the nodes available with release ordering. The load in +// its `compare_exchange` can be relaxed because it only has to compare +// the atomic, not to read other data. +// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load +// `state_and_queue` with acquire ordering. +// - There is just one store where `state_and_queue` is used only as a +// state flag, without having to synchronize data: switching the state +// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed, +// but the read has to be Acquire because of the requirements mentioned +// above. +// * `Waiter.signaled` is both used as a flag, and to protect a field with +// interior mutability in `Waiter`. `Waiter.thread` is changed in +// `WaiterQueue::drop` which then sets `signaled` with release ordering. +// After `wait` loads `signaled` with acquire ordering and sees it is true, +// it needs to see the changes to drop the `Waiter` struct correctly. +// * There is one place where the two atomics `Once.state_and_queue` and +// `Waiter.signaled` come together, and might be reordered by the compiler or +// processor. Because both use acquire ordering such a reordering is not +// allowed, so no need for `SeqCst`. + +use crate::cell::Cell; +use crate::fmt; +use crate::ptr; +use crate::sync as public; +use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::thread::{self, Thread}; + +type Masked = (); + +pub struct Once { + state_and_queue: AtomicPtr<Masked>, +} + +pub struct OnceState { + poisoned: bool, + set_state_on_drop_to: Cell<*mut Masked>, +} + +// Four states that a Once can be in, encoded into the lower bits of +// `state_and_queue` in the Once structure. +const INCOMPLETE: usize = 0x0; +const POISONED: usize = 0x1; +const RUNNING: usize = 0x2; +const COMPLETE: usize = 0x3; + +// Mask to learn about the state. All other bits are the queue of waiters if +// this is in the RUNNING state. +const STATE_MASK: usize = 0x3; + +// Representation of a node in the linked list of waiters, used while in the +// RUNNING state. +// Note: `Waiter` can't hold a mutable pointer to the next thread, because then +// `wait` would both hand out a mutable reference to its `Waiter` node, and keep +// a shared reference to check `signaled`. Instead we hold shared references and +// use interior mutability. +#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. +struct Waiter { + thread: Cell<Option<Thread>>, + signaled: AtomicBool, + next: *const Waiter, +} + +// Head of a linked list of waiters. +// Every node is a struct on the stack of a waiting thread. +// Will wake up the waiters when it gets dropped, i.e. also on panic. +struct WaiterQueue<'a> { + state_and_queue: &'a AtomicPtr<Masked>, + set_state_on_drop_to: *mut Masked, +} + +impl Once { + #[inline] + pub const fn new() -> Once { + Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + // An `Acquire` load is enough because that makes all the initialization + // operations visible to us, and, this being a fast path, weaker + // ordering helps with performance. This `Acquire` synchronizes with + // `Release` operations on the slow path. + self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE + } + + // This is a non-generic function to reduce the monomorphization cost of + // using `call_once` (this isn't exactly a trivial or small implementation). + // + // Additionally, this is tagged with `#[cold]` as it should indeed be cold + // and it helps let LLVM know that calls to this function should be off the + // fast path. Essentially, this should help generate more straight line code + // in LLVM. + // + // Finally, this takes an `FnMut` instead of a `FnOnce` because there's + // currently no way to take an `FnOnce` and call it via virtual dispatch + // without some allocation overhead. + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { + let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); + loop { + match state_and_queue.addr() { + COMPLETE => break, + POISONED if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + POISONED | INCOMPLETE => { + // Try to register this thread as the one RUNNING. + let exchange_result = self.state_and_queue.compare_exchange( + state_and_queue, + ptr::invalid_mut(RUNNING), + Ordering::Acquire, + Ordering::Acquire, + ); + if let Err(old) = exchange_result { + state_and_queue = old; + continue; + } + // `waiter_queue` will manage other waiting threads, and + // wake them up on drop. + let mut waiter_queue = WaiterQueue { + state_and_queue: &self.state_and_queue, + set_state_on_drop_to: ptr::invalid_mut(POISONED), + }; + // Run the initialization function, letting it know if we're + // poisoned or not. + let init_state = public::OnceState { + inner: OnceState { + poisoned: state_and_queue.addr() == POISONED, + set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), + }, + }; + init(&init_state); + waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get(); + break; + } + _ => { + // All other values must be RUNNING with possibly a + // pointer to the waiter queue in the more significant bits. + assert!(state_and_queue.addr() & STATE_MASK == RUNNING); + wait(&self.state_and_queue, state_and_queue); + state_and_queue = self.state_and_queue.load(Ordering::Acquire); + } + } + } + } +} + +fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) { + // Note: the following code was carefully written to avoid creating a + // mutable reference to `node` that gets aliased. + loop { + // Don't queue this thread if the status is no longer running, + // otherwise we will not be woken up. + if current_state.addr() & STATE_MASK != RUNNING { + return; + } + + // Create the node for our current thread. + let node = Waiter { + thread: Cell::new(Some(thread::current())), + signaled: AtomicBool::new(false), + next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, + }; + let me = &node as *const Waiter as *const Masked as *mut Masked; + + // Try to slide in the node at the head of the linked list, making sure + // that another thread didn't just replace the head of the linked list. + let exchange_result = state_and_queue.compare_exchange( + current_state, + me.with_addr(me.addr() | RUNNING), + Ordering::Release, + Ordering::Relaxed, + ); + if let Err(old) = exchange_result { + current_state = old; + continue; + } + + // We have enqueued ourselves, now lets wait. + // It is important not to return before being signaled, otherwise we + // would drop our `Waiter` node and leave a hole in the linked list + // (and a dangling reference). Guard against spurious wakeups by + // reparking ourselves until we are signaled. + while !node.signaled.load(Ordering::Acquire) { + // If the managing thread happens to signal and unpark us before we + // can park ourselves, the result could be this thread never gets + // unparked. Luckily `park` comes with the guarantee that if it got + // an `unpark` just before on an unparked thread it does not park. + thread::park(); + } + break; + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Once").finish_non_exhaustive() + } +} + +impl Drop for WaiterQueue<'_> { + fn drop(&mut self) { + // Swap out our state with however we finished. + let state_and_queue = + self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); + + // We should only ever see an old state which was RUNNING. + assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); + + // Walk the entire linked list of waiters and wake them up (in lifo + // order, last to register is first to wake up). + unsafe { + // Right after setting `node.signaled = true` the other thread may + // free `node` if there happens to be has a spurious wakeup. + // So we have to take out the `thread` field and copy the pointer to + // `next` first. + let mut queue = + state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; + while !queue.is_null() { + let next = (*queue).next; + let thread = (*queue).thread.take().unwrap(); + (*queue).signaled.store(true, Ordering::Release); + // ^- FIXME (maybe): This is another case of issue #55005 + // `store()` has a potentially dangling ref to `signaled`. + queue = next; + thread.unpark(); + } + } + } +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); + } +} diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs new file mode 100644 index 000000000..8742e68cc --- /dev/null +++ b/library/std/src/sys_common/once/mod.rs @@ -0,0 +1,43 @@ +// A "once" is a relatively simple primitive, and it's also typically provided +// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS +// primitives, however, tend to have surprising restrictions, such as the Unix +// one doesn't allow an argument to be passed to the function. +// +// As a result, we end up implementing it ourselves in the standard library. +// This also gives us the opportunity to optimize the implementation a bit which +// should help the fast path on call sites. +// +// So to recap, the guarantees of a Once are that it will call the +// initialization closure at most once, and it will never return until the one +// that's running has finished running. This means that we need some form of +// blocking here while the custom callback is running at the very least. +// Additionally, we add on the restriction of **poisoning**. Whenever an +// initialization closure panics, the Once enters a "poisoned" state which means +// that all future calls will immediately panic as well. +// +// So to implement this, one might first reach for a `Mutex`, but those cannot +// be put into a `static`. It also gets a lot harder with poisoning to figure +// out when the mutex needs to be deallocated because it's not after the closure +// finishes, but after the first successful closure finishes. +// +// All in all, this is instead implemented with atomics and lock-free +// operations! Whee! + +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + all(target_arch = "wasm32", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + target_os = "hermit", + ))] { + mod futex; + pub use futex::{Once, OnceState}; + } else { + mod generic; + pub use generic::{Once, OnceState}; + } +} diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs index ba56f3a8f..042981dac 100644 --- a/library/std/src/sys_common/rwlock.rs +++ b/library/std/src/sys_common/rwlock.rs @@ -1,65 +1,5 @@ use crate::sys::locks as imp; -/// An OS-based reader-writer lock, meant for use in static variables. -/// -/// This rwlock does not implement poisoning. -/// -/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not -/// implement `Drop` to cleanup resources. -pub struct StaticRwLock(imp::RwLock); - -impl StaticRwLock { - /// Creates a new rwlock for use. - #[inline] - pub const fn new() -> Self { - Self(imp::RwLock::new()) - } - - /// Acquires shared access to the underlying lock, blocking the current - /// thread to do so. - /// - /// The lock is automatically unlocked when the returned guard is dropped. - #[inline] - pub fn read(&'static self) -> StaticRwLockReadGuard { - unsafe { self.0.read() }; - StaticRwLockReadGuard(&self.0) - } - - /// Acquires write access to the underlying lock, blocking the current thread - /// to do so. - /// - /// The lock is automatically unlocked when the returned guard is dropped. - #[inline] - pub fn write(&'static self) -> StaticRwLockWriteGuard { - unsafe { self.0.write() }; - StaticRwLockWriteGuard(&self.0) - } -} - -#[must_use] -pub struct StaticRwLockReadGuard(&'static imp::RwLock); - -impl Drop for StaticRwLockReadGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.read_unlock(); - } - } -} - -#[must_use] -pub struct StaticRwLockWriteGuard(&'static imp::RwLock); - -impl Drop for StaticRwLockWriteGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.write_unlock(); - } - } -} - /// An OS-based reader-writer lock. /// /// This rwlock cleans up its resources in its `Drop` implementation and may @@ -75,6 +15,7 @@ pub struct MovableRwLock(imp::MovableRwLock); impl MovableRwLock { /// Creates a new reader-writer lock for use. #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] pub const fn new() -> Self { Self(imp::MovableRwLock::new()) } diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 032bf604d..747579f17 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -53,7 +53,6 @@ mod tests; use crate::sync::atomic::{self, AtomicUsize, Ordering}; use crate::sys::thread_local_key as imp; -use crate::sys_common::mutex::StaticMutex; /// A type for TLS keys that are statically allocated. /// @@ -151,25 +150,6 @@ impl StaticKey { } unsafe fn lazy_init(&self) -> usize { - // Currently the Windows implementation of TLS is pretty hairy, and - // it greatly simplifies creation if we just synchronize everything. - // - // Additionally a 0-index of a tls key hasn't been seen on windows, so - // we just simplify the whole branch. - if imp::requires_synchronized_create() { - // We never call `INIT_LOCK.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static INIT_LOCK: StaticMutex = StaticMutex::new(); - let _guard = INIT_LOCK.lock(); - let mut key = self.key.load(Ordering::SeqCst); - if key == 0 { - key = imp::create(self.dtor) as usize; - self.key.store(key, Ordering::SeqCst); - } - rtassert!(key != 0); - return key; - } - // POSIX allows the key created here to be 0, but the compare_exchange // below relies on using 0 as a sentinel value to check who won the // race to set the shared TLS key. As far as I know, there is no @@ -232,8 +212,6 @@ impl Key { impl Drop for Key { fn drop(&mut self) { - // Right now Windows doesn't support TLS key destruction, but this also - // isn't used anywhere other than tests, so just leak the TLS key. - // unsafe { imp::destroy(self.key) } + unsafe { imp::destroy(self.key) } } } diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index 8aedfc4a6..5d267891b 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -95,6 +95,7 @@ use crate::fmt; /// [loader lock]: https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices /// [`JoinHandle::join`]: crate::thread::JoinHandle::join /// [`with`]: LocalKey::with +#[cfg_attr(not(test), rustc_diagnostic_item = "LocalKey")] #[stable(feature = "rust1", since = "1.0.0")] pub struct LocalKey<T: 'static> { // This outer `LocalKey<T>` type is what's going to be stored in statics, @@ -900,7 +901,7 @@ pub mod statik { } #[doc(hidden)] -#[cfg(target_thread_local)] +#[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics"))),))] pub mod fast { use super::lazy::LazyKeyInner; use crate::cell::Cell; @@ -1036,7 +1037,10 @@ pub mod fast { } #[doc(hidden)] -#[cfg(not(target_thread_local))] +#[cfg(all( + not(target_thread_local), + not(all(target_family = "wasm", not(target_feature = "atomics"))), +))] pub mod os { use super::lazy::LazyKeyInner; use crate::cell::Cell; diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index ceea6986e..05023df1b 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -150,6 +150,8 @@ #![stable(feature = "rust1", since = "1.0.0")] #![deny(unsafe_op_in_unsafe_fn)] +// Under `test`, `__FastLocalKeyInner` seems unused. +#![cfg_attr(test, allow(dead_code))] #[cfg(all(test, not(target_os = "emscripten")))] mod tests; @@ -160,7 +162,7 @@ use crate::ffi::{CStr, CString}; use crate::fmt; use crate::io; use crate::marker::PhantomData; -use crate::mem; +use crate::mem::{self, forget}; use crate::num::NonZeroU64; use crate::num::NonZeroUsize; use crate::panic; @@ -192,32 +194,40 @@ pub use scoped::{scope, Scope, ScopedJoinHandle}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::local::{AccessError, LocalKey}; -// Select the type used by the thread_local! macro to access TLS keys. There -// are three types: "static", "fast", "OS". The "OS" thread local key +// Provide the type used by the thread_local! macro to access TLS keys. This +// needs to be kept in sync with the macro itself (in `local.rs`). +// There are three types: "static", "fast", "OS". The "OS" thread local key // type is accessed via platform-specific API calls and is slow, while the "fast" // key type is accessed via code generated via LLVM, where TLS keys are set up // by the elf linker. "static" is for single-threaded platforms where a global // static is sufficient. #[unstable(feature = "libstd_thread_internals", issue = "none")] -#[cfg(target_thread_local)] #[cfg(not(test))] +#[cfg(all( + target_thread_local, + not(all(target_family = "wasm", not(target_feature = "atomics"))), +))] #[doc(hidden)] pub use self::local::fast::Key as __FastLocalKeyInner; -#[unstable(feature = "libstd_thread_internals", issue = "none")] -#[cfg(target_thread_local)] -#[cfg(test)] // when building for tests, use real std's key -pub use realstd::thread::__FastLocalKeyInner; +// when building for tests, use real std's type #[unstable(feature = "libstd_thread_internals", issue = "none")] -#[cfg(target_thread_local)] #[cfg(test)] -pub use self::local::fast::Key as __FastLocalKeyInnerUnused; // we import this anyway to silence 'unused' warnings +#[cfg(all( + target_thread_local, + not(all(target_family = "wasm", not(target_feature = "atomics"))), +))] +pub use realstd::thread::__FastLocalKeyInner; #[unstable(feature = "libstd_thread_internals", issue = "none")] +#[cfg(all( + not(target_thread_local), + not(all(target_family = "wasm", not(target_feature = "atomics"))), +))] #[doc(hidden)] -#[cfg(not(target_thread_local))] pub use self::local::os::Key as __OsLocalKeyInner; + #[unstable(feature = "libstd_thread_internals", issue = "none")] #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))] #[doc(hidden)] @@ -499,6 +509,31 @@ impl Builder { let output_capture = crate::io::set_output_capture(None); crate::io::set_output_capture(output_capture.clone()); + // Pass `f` in `MaybeUninit` because actually that closure might *run longer than the lifetime of `F`*. + // See <https://github.com/rust-lang/rust/issues/101983> for more details. + // To prevent leaks we use a wrapper that drops its contents. + #[repr(transparent)] + struct MaybeDangling<T>(mem::MaybeUninit<T>); + impl<T> MaybeDangling<T> { + fn new(x: T) -> Self { + MaybeDangling(mem::MaybeUninit::new(x)) + } + fn into_inner(self) -> T { + // SAFETY: we are always initiailized. + let ret = unsafe { self.0.assume_init_read() }; + // Make sure we don't drop. + mem::forget(self); + ret + } + } + impl<T> Drop for MaybeDangling<T> { + fn drop(&mut self) { + // SAFETY: we are always initiailized. + unsafe { self.0.assume_init_drop() }; + } + } + + let f = MaybeDangling::new(f); let main = move || { if let Some(name) = their_thread.cname() { imp::Thread::set_name(name); @@ -506,6 +541,8 @@ impl Builder { crate::io::set_output_capture(output_capture); + // SAFETY: we constructed `f` initialized. + let f = f.into_inner(); // SAFETY: the stack guard passed is the one for the current thread. // This means the current thread's stack and the new thread's stack // are properly set and protected from each other. @@ -518,6 +555,12 @@ impl Builder { // same `JoinInner` as this closure meaning the mutation will be // safe (not modify it and affect a value far away). unsafe { *their_packet.result.get() = Some(try_result) }; + // Here `their_packet` gets dropped, and if this is the last `Arc` for that packet that + // will call `decrement_num_running_threads` and therefore signal that this thread is + // done. + drop(their_packet); + // Here, the lifetime `'a` and even `'scope` can end. `main` keeps running for a bit + // after that before returning itself. }; if let Some(scope_data) = &my_packet.scope { @@ -779,6 +822,8 @@ pub fn panicking() -> bool { panicking::panicking() } +/// Use [`sleep`]. +/// /// Puts the current thread to sleep for at least the specified amount of time. /// /// The thread may sleep longer than the duration specified due to scheduling @@ -849,10 +894,22 @@ pub fn sleep(dur: Duration) { imp::Thread::sleep(dur) } +/// Used to ensure that `park` and `park_timeout` do not unwind, as that can +/// cause undefined behaviour if not handled correctly (see #102398 for context). +struct PanicGuard; + +impl Drop for PanicGuard { + fn drop(&mut self) { + rtabort!("an irrecoverable error occurred while synchronizing threads") + } +} + /// Blocks unless or until the current thread's token is made available. /// /// A call to `park` does not guarantee that the thread will remain parked -/// forever, and callers should be prepared for this possibility. +/// forever, and callers should be prepared for this possibility. However, +/// it is guaranteed that this function will not panic (it may abort the +/// process if the implementation encounters some rare errors). /// /// # park and unpark /// @@ -937,10 +994,13 @@ pub fn sleep(dur: Duration) { /// [`thread::park_timeout`]: park_timeout #[stable(feature = "rust1", since = "1.0.0")] pub fn park() { + let guard = PanicGuard; // SAFETY: park_timeout is called on the parker owned by this thread. unsafe { current().inner.as_ref().parker().park(); } + // No panic occurred, do not abort. + forget(guard); } /// Use [`park_timeout`]. @@ -1001,10 +1061,13 @@ pub fn park_timeout_ms(ms: u32) { /// ``` #[stable(feature = "park_timeout", since = "1.4.0")] pub fn park_timeout(dur: Duration) { + let guard = PanicGuard; // SAFETY: park_timeout is called on the parker owned by this thread. unsafe { current().inner.as_ref().parker().park_timeout(dur); } + // No panic occurred, do not abort. + forget(guard); } //////////////////////////////////////////////////////////////////////////////// @@ -1065,24 +1128,21 @@ impl ThreadId { } } } else { - use crate::sys_common::mutex::StaticMutex; + use crate::sync::{Mutex, PoisonError}; - // It is UB to attempt to acquire this mutex reentrantly! - static GUARD: StaticMutex = StaticMutex::new(); - static mut COUNTER: u64 = 0; + static COUNTER: Mutex<u64> = Mutex::new(0); - unsafe { - let guard = GUARD.lock(); + let mut counter = COUNTER.lock().unwrap_or_else(PoisonError::into_inner); + let Some(id) = counter.checked_add(1) else { + // in case the panic handler ends up calling `ThreadId::new()`, + // avoid reentrant lock acquire. + drop(counter); + exhausted(); + }; - let Some(id) = COUNTER.checked_add(1) else { - drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire. - exhausted(); - }; - - COUNTER = id; - drop(guard); - ThreadId(NonZeroU64::new(id).unwrap()) - } + *counter = id; + drop(counter); + ThreadId(NonZeroU64::new(id).unwrap()) } } } diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs index 777964f04..6c9ce6fa0 100644 --- a/library/std/src/thread/tests.rs +++ b/library/std/src/thread/tests.rs @@ -276,6 +276,28 @@ fn test_try_panic_any_message_unit_struct() { } #[test] +fn test_park_unpark_before() { + for _ in 0..10 { + thread::current().unpark(); + thread::park(); + } +} + +#[test] +fn test_park_unpark_called_other_thread() { + for _ in 0..10 { + let th = thread::current(); + + let _guard = thread::spawn(move || { + super::sleep(Duration::from_millis(50)); + th.unpark(); + }); + + thread::park(); + } +} + +#[test] fn test_park_timeout_unpark_before() { for _ in 0..10 { thread::current().unpark(); diff --git a/library/std/src/time.rs b/library/std/src/time.rs index 759a59e1f..ecd06ebf7 100644 --- a/library/std/src/time.rs +++ b/library/std/src/time.rs @@ -43,8 +43,8 @@ use crate::sys_common::{FromInner, IntoInner}; #[stable(feature = "time", since = "1.3.0")] pub use core::time::Duration; -#[unstable(feature = "duration_checked_float", issue = "83400")] -pub use core::time::FromFloatSecsError; +#[stable(feature = "duration_checked_float", since = "1.66.0")] +pub use core::time::TryFromFloatSecsError; /// A measurement of a monotonically nondecreasing clock. /// Opaque and useful only with [`Duration`]. @@ -356,7 +356,7 @@ impl Instant { /// /// # Panics /// - /// Previous rust versions panicked when self was earlier than the current time. Currently this + /// Previous rust versions panicked when the current time was earlier than self. Currently this /// method returns a Duration of zero in that case. Future versions may reintroduce the panic. /// See [Monotonicity]. /// diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs index a57a52d9b..02c076f1b 100644 --- a/library/std/tests/run-time-detect.rs +++ b/library/std/tests/run-time-detect.rs @@ -14,77 +14,85 @@ #[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android")))] fn arm_linux() { use std::arch::is_arm_feature_detected; + // tidy-alphabetical-start + println!("aes: {}", is_arm_feature_detected!("aes")); + println!("crc: {}", is_arm_feature_detected!("crc")); + println!("crypto: {}", is_arm_feature_detected!("crypto")); println!("neon: {}", is_arm_feature_detected!("neon")); println!("pmull: {}", is_arm_feature_detected!("pmull")); - println!("crypto: {}", is_arm_feature_detected!("crypto")); - println!("crc: {}", is_arm_feature_detected!("crc")); - println!("aes: {}", is_arm_feature_detected!("aes")); println!("sha2: {}", is_arm_feature_detected!("sha2")); + // tidy-alphabetical-end } #[test] #[cfg(all(target_arch = "aarch64", any(target_os = "linux", target_os = "android")))] fn aarch64_linux() { use std::arch::is_aarch64_feature_detected; - println!("neon: {}", is_aarch64_feature_detected!("neon")); + // tidy-alphabetical-start + println!("aes: {}", is_aarch64_feature_detected!("aes")); println!("asimd: {}", is_aarch64_feature_detected!("asimd")); - println!("pmull: {}", is_aarch64_feature_detected!("pmull")); - println!("fp16: {}", is_aarch64_feature_detected!("fp16")); - println!("sve: {}", is_aarch64_feature_detected!("sve")); + println!("bf16: {}", is_aarch64_feature_detected!("bf16")); + println!("bti: {}", is_aarch64_feature_detected!("bti")); println!("crc: {}", is_aarch64_feature_detected!("crc")); - println!("lse: {}", is_aarch64_feature_detected!("lse")); - println!("lse2: {}", is_aarch64_feature_detected!("lse2")); - println!("rdm: {}", is_aarch64_feature_detected!("rdm")); - println!("rcpc: {}", is_aarch64_feature_detected!("rcpc")); - println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2")); + println!("dit: {}", is_aarch64_feature_detected!("dit")); println!("dotprod: {}", is_aarch64_feature_detected!("dotprod")); - println!("tme: {}", is_aarch64_feature_detected!("tme")); + println!("dpb2: {}", is_aarch64_feature_detected!("dpb2")); + println!("dpb: {}", is_aarch64_feature_detected!("dpb")); + println!("f32mm: {}", is_aarch64_feature_detected!("f32mm")); + println!("f64mm: {}", is_aarch64_feature_detected!("f64mm")); + println!("fcma: {}", is_aarch64_feature_detected!("fcma")); println!("fhm: {}", is_aarch64_feature_detected!("fhm")); - println!("dit: {}", is_aarch64_feature_detected!("dit")); println!("flagm: {}", is_aarch64_feature_detected!("flagm")); - println!("ssbs: {}", is_aarch64_feature_detected!("ssbs")); - println!("sb: {}", is_aarch64_feature_detected!("sb")); - println!("paca: {}", is_aarch64_feature_detected!("paca")); - println!("pacg: {}", is_aarch64_feature_detected!("pacg")); - println!("dpb: {}", is_aarch64_feature_detected!("dpb")); - println!("dpb2: {}", is_aarch64_feature_detected!("dpb2")); - println!("sve2: {}", is_aarch64_feature_detected!("sve2")); - println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes")); - println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4")); - println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3")); - println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm")); + println!("fp16: {}", is_aarch64_feature_detected!("fp16")); println!("frintts: {}", is_aarch64_feature_detected!("frintts")); println!("i8mm: {}", is_aarch64_feature_detected!("i8mm")); - println!("f32mm: {}", is_aarch64_feature_detected!("f32mm")); - println!("f64mm: {}", is_aarch64_feature_detected!("f64mm")); - println!("bf16: {}", is_aarch64_feature_detected!("bf16")); - println!("rand: {}", is_aarch64_feature_detected!("rand")); - println!("bti: {}", is_aarch64_feature_detected!("bti")); - println!("mte: {}", is_aarch64_feature_detected!("mte")); println!("jsconv: {}", is_aarch64_feature_detected!("jsconv")); - println!("fcma: {}", is_aarch64_feature_detected!("fcma")); - println!("aes: {}", is_aarch64_feature_detected!("aes")); + println!("lse2: {}", is_aarch64_feature_detected!("lse2")); + println!("lse: {}", is_aarch64_feature_detected!("lse")); + println!("mte: {}", is_aarch64_feature_detected!("mte")); + println!("neon: {}", is_aarch64_feature_detected!("neon")); + println!("paca: {}", is_aarch64_feature_detected!("paca")); + println!("pacg: {}", is_aarch64_feature_detected!("pacg")); + println!("pmull: {}", is_aarch64_feature_detected!("pmull")); + println!("rand: {}", is_aarch64_feature_detected!("rand")); + println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2")); + println!("rcpc: {}", is_aarch64_feature_detected!("rcpc")); + println!("rdm: {}", is_aarch64_feature_detected!("rdm")); + println!("sb: {}", is_aarch64_feature_detected!("sb")); println!("sha2: {}", is_aarch64_feature_detected!("sha2")); println!("sha3: {}", is_aarch64_feature_detected!("sha3")); println!("sm4: {}", is_aarch64_feature_detected!("sm4")); + println!("ssbs: {}", is_aarch64_feature_detected!("ssbs")); + println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes")); + println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm")); + println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3")); + println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4")); + println!("sve2: {}", is_aarch64_feature_detected!("sve2")); + println!("sve: {}", is_aarch64_feature_detected!("sve")); + println!("tme: {}", is_aarch64_feature_detected!("tme")); + // tidy-alphabetical-end } #[test] #[cfg(all(target_arch = "powerpc", target_os = "linux"))] fn powerpc_linux() { use std::arch::is_powerpc_feature_detected; + // tidy-alphabetical-start println!("altivec: {}", is_powerpc_feature_detected!("altivec")); - println!("vsx: {}", is_powerpc_feature_detected!("vsx")); println!("power8: {}", is_powerpc_feature_detected!("power8")); + println!("vsx: {}", is_powerpc_feature_detected!("vsx")); + // tidy-alphabetical-end } #[test] #[cfg(all(target_arch = "powerpc64", target_os = "linux"))] fn powerpc64_linux() { use std::arch::is_powerpc64_feature_detected; + // tidy-alphabetical-start println!("altivec: {}", is_powerpc64_feature_detected!("altivec")); - println!("vsx: {}", is_powerpc64_feature_detected!("vsx")); println!("power8: {}", is_powerpc64_feature_detected!("power8")); + println!("vsx: {}", is_powerpc64_feature_detected!("vsx")); + // tidy-alphabetical-end } #[test] @@ -102,9 +110,9 @@ fn x86_all() { // the below is in alphabetical order and matches // the order of X86_ALLOWED_FEATURES in rustc_codegen_ssa's target_features.rs + // tidy-alphabetical-start println!("adx: {:?}", is_x86_feature_detected!("adx")); println!("aes: {:?}", is_x86_feature_detected!("aes")); - println!("avx: {:?}", is_x86_feature_detected!("avx")); println!("avx2: {:?}", is_x86_feature_detected!("avx2")); println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16")); println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg")); @@ -117,13 +125,14 @@ fn x86_all() { println!("avx512ifma: {:?}", is_x86_feature_detected!("avx512ifma")); println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf")); println!("avx512vaes: {:?}", is_x86_feature_detected!("avx512vaes")); - println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi")); println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2")); + println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi")); println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl")); println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni")); println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect")); println!("avx512vpclmulqdq: {:?}", is_x86_feature_detected!("avx512vpclmulqdq")); println!("avx512vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq")); + println!("avx: {:?}", is_x86_feature_detected!("avx")); println!("bmi1: {:?}", is_x86_feature_detected!("bmi1")); println!("bmi2: {:?}", is_x86_feature_detected!("bmi2")); println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b")); @@ -138,16 +147,17 @@ fn x86_all() { println!("rdseed: {:?}", is_x86_feature_detected!("rdseed")); println!("rtm: {:?}", is_x86_feature_detected!("rtm")); println!("sha: {:?}", is_x86_feature_detected!("sha")); - println!("sse: {:?}", is_x86_feature_detected!("sse")); println!("sse2: {:?}", is_x86_feature_detected!("sse2")); println!("sse3: {:?}", is_x86_feature_detected!("sse3")); println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1")); println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2")); println!("sse4a: {:?}", is_x86_feature_detected!("sse4a")); + println!("sse: {:?}", is_x86_feature_detected!("sse")); println!("ssse3: {:?}", is_x86_feature_detected!("ssse3")); println!("tbm: {:?}", is_x86_feature_detected!("tbm")); println!("xsave: {:?}", is_x86_feature_detected!("xsave")); println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt")); println!("xsaves: {:?}", is_x86_feature_detected!("xsaves")); + // tidy-alphabetical-end } diff --git a/library/stdarch/ci/android-install-sdk.sh b/library/stdarch/ci/android-install-sdk.sh index 1beeb312a..3383dcb7f 100644 --- a/library/stdarch/ci/android-install-sdk.sh +++ b/library/stdarch/ci/android-install-sdk.sh @@ -19,8 +19,8 @@ set -ex # which apparently magically accepts the licenses. mkdir sdk -curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip -O -unzip -d sdk sdk-tools-linux-3859397.zip +curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip -O +unzip -d sdk sdk-tools-linux-4333796.zip case "$1" in arm | armv7) diff --git a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile index 27bde89c5..6cf9b5061 100644 --- a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile +++ b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile @@ -1,17 +1,16 @@ -FROM ubuntu:16.04 +FROM ubuntu:22.04 -RUN dpkg --add-architecture i386 && \ - apt-get update && \ +RUN apt-get update && \ apt-get install -y --no-install-recommends \ file \ make \ curl \ ca-certificates \ - python \ + python-is-python3 \ unzip \ expect \ - openjdk-9-jre \ - libstdc++6:i386 \ + openjdk-8-jre \ + libstdc++6-i386-cross \ libpulse0 \ gcc \ libc6-dev diff --git a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile index 995a9e30e..fb1a0cecf 100644 --- a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile +++ b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile @@ -1,17 +1,16 @@ -FROM ubuntu:16.04 +FROM ubuntu:22.04 -RUN dpkg --add-architecture i386 && \ - apt-get update && \ +RUN apt-get update && \ apt-get install -y --no-install-recommends \ file \ make \ curl \ ca-certificates \ - python \ + python-is-python3 \ unzip \ expect \ - openjdk-9-jre \ - libstdc++6:i386 \ + openjdk-8-jre \ + libstdc++6-i386-cross \ libpulse0 \ gcc \ libc6-dev diff --git a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile index c2830b15f..82119be74 100644 --- a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile +++ b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:22.04 RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -6,7 +6,7 @@ RUN apt-get update && \ curl \ gcc \ libc-dev \ - python \ + python-is-python3 \ unzip \ file \ make diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 043f7ed51..0559aea83 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -12461,30 +12461,30 @@ mod tests { } #[simd_test(enable = "neon,i8mm")] unsafe fn test_vmmlaq_s32() { - let a: i32x4 = i32x4::new(1, 3, 4, 9); - let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16); - let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16); - let e: i32x4 = i32x4::new(1, 2, 3, 4); + let a = i32x4::new(1, 3, 4, -0x10000); + let b = i8x16::new(1, 21, 31, 14, 5, 6, -128, 8, 9, 13, 15, 12, 13, -1, 20, 16); + let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16); + let e = i32x4::new(123, -5353, 690, -65576); let r: i32x4 = transmute(vmmlaq_s32(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); } #[simd_test(enable = "neon,i8mm")] unsafe fn test_vmmlaq_u32() { - let a: u32x4 = u32x4::new(1, 3, 4, 9); - let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16); - let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16); - let e: u32x4 = u32x4::new(1, 2, 3, 4); + let a = u32x4::new(1, 3, 4, 0xffff0000); + let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16); + let c = u8x16::new(12, 22, 3, 4, 255, 56, 7, 8, 91, 10, 128, 15, 13, 14, 17, 16); + let e = u32x4::new(3195, 6935, 18354, 4294909144); let r: u32x4 = transmute(vmmlaq_u32(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); } #[simd_test(enable = "neon,i8mm")] unsafe fn test_vusmmlaq_s32() { - let a: i32x4 = i32x4::new(1, 3, 4, 9); - let b: i8x16 = i8x16::new(1, 21, 31, 14, 5, 6, 17, 8, 9, 13, 15, 12, 13, 19, 20, 16); - let c: i8x16 = i8x16::new(12, 22, 3, 4, 5, 56, 7, 8, 91, 10, 11, 15, 13, 14, 17, 16); - let e: i32x4 = i32x4::new(1, 2, 3, 4); + let a = i32x4::new(1, 3, 4, -0x10000); + let b = u8x16::new(1, 21, 31, 14, 5, 6, 128, 8, 9, 13, 15, 12, 13, 255, 20, 16); + let c = i8x16::new(12, 22, 3, 4, -1, 56, 7, 8, 91, 10, -128, 15, 13, 14, 17, 16); + let e = i32x4::new(1915, -1001, 15026, -61992); let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); } diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 9240d0e84..5a9727a0a 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -19,6 +19,7 @@ doc_cfg, tbm_target_feature, sse4a_target_feature, + riscv_target_feature, arm_target_feature, cmpxchg16b_target_feature, avx512_target_feature, @@ -30,8 +31,8 @@ f16c_target_feature, allow_internal_unstable, decl_macro, - bench_black_box, - asm_const + asm_const, + target_feature_11 )] #![cfg_attr(test, feature(test, abi_vectorcall))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index 1e6a3f405..1c917c52b 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -101,11 +101,11 @@ macro_rules! simd_shuffle2 { const IDX: [u32; 2] = $idx; } - simd_shuffle2($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 2] = $idx; - simd_shuffle2($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } @@ -117,11 +117,11 @@ macro_rules! simd_shuffle4 { const IDX: [u32; 4] = $idx; } - simd_shuffle4($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 4] = $idx; - simd_shuffle4($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } @@ -133,11 +133,11 @@ macro_rules! simd_shuffle8 { const IDX: [u32; 8] = $idx; } - simd_shuffle8($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 8] = $idx; - simd_shuffle8($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } @@ -149,11 +149,11 @@ macro_rules! simd_shuffle16 { const IDX: [u32; 16] = $idx; } - simd_shuffle16($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 16] = $idx; - simd_shuffle16($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } @@ -165,11 +165,11 @@ macro_rules! simd_shuffle32 { const IDX: [u32; 32] = $idx; } - simd_shuffle32($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 32] = $idx; - simd_shuffle32($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } @@ -181,10 +181,10 @@ macro_rules! simd_shuffle64 { const IDX: [u32; 64] = $idx; } - simd_shuffle64($x, $y, ConstParam::<$($imm),+>::IDX) + simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX) }}; ($x:expr, $y:expr, $idx:expr $(,)?) => {{ const IDX: [u32; 64] = $idx; - simd_shuffle64($x, $y, IDX) + simd_shuffle($x, $y, IDX) }}; } diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index 20751eeec..2f7af22cb 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -3,6 +3,9 @@ #[macro_use] mod macros; +#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] +mod riscv_shared; + #[cfg(any(target_arch = "arm", target_arch = "aarch64", doc))] mod arm_shared; @@ -276,10 +279,6 @@ mod aarch64; #[doc(cfg(any(target_arch = "arm")))] mod arm; -#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] -#[doc(cfg(any(target_arch = "riscv32", target_arch = "riscv64")))] -mod riscv_shared; - #[cfg(any(target_arch = "riscv64", doc))] #[doc(cfg(any(target_arch = "riscv64")))] mod riscv64; diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs index 347735df1..0e35fe1f1 100644 --- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs @@ -1,4 +1,7 @@ //! Shared RISC-V intrinsics +mod p; + +pub use p::*; use crate::arch::asm; @@ -469,6 +472,17 @@ pub unsafe fn hinval_gvma_vmid(vmid: usize) { asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack)) } +/// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. +/// +/// This fence specifies all guest physical addresses and all virtual machines. +#[inline] +pub unsafe fn hinval_gvma_all() { + asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack)) +} + /// Reads the floating-point control and status register `fcsr` /// /// Register `fcsr` is a 32-bit read/write register that selects the dynamic rounding mode @@ -574,17 +588,6 @@ pub fn fsflags(value: u32) -> u32 { original } -/// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses -/// -/// This instruction invalidates any address-translation cache entries that an -/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. -/// -/// This fence specifies all guest physical addresses and all virtual machines. -#[inline] -pub unsafe fn hinval_gvma_all() { - asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack)) -} - /// `P0` transformation function as is used in the SM3 hash algorithm /// /// This function is included in `Zksh` extension. It's defined as: @@ -602,12 +605,10 @@ pub unsafe fn hinval_gvma_all() { /// According to RISC-V Cryptography Extensions, Volume I, the execution latency of /// this instruction must always be independent from the data it operates on. #[inline] +#[target_feature(enable = "zksh")] pub fn sm3p0(x: u32) -> u32 { let ans: u32; - unsafe { - // asm!("sm3p0 {}, {}", out(reg) ans, in(reg) x, options(nomem, nostack)) - asm!(".insn i 0x13, 0x1, {}, {}, 0x108", out(reg) ans, in(reg) x, options(nomem, nostack)) - }; + unsafe { asm!("sm3p0 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) }; ans } @@ -634,12 +635,10 @@ pub fn sm3p0(x: u32) -> u32 { /// According to RISC-V Cryptography Extensions, Volume I, the execution latency of /// this instruction must always be independent from the data it operates on. #[inline] +#[target_feature(enable = "zksh")] pub fn sm3p1(x: u32) -> u32 { let ans: u32; - unsafe { - // asm!("sm3p1 {}, {}", out(reg) ans, in(reg) x, options(nomem, nostack)) - asm!(".insn i 0x13, 0x1, {}, {}, 0x109", out(reg) ans, in(reg) x, options(nomem, nostack)) - }; + unsafe { asm!("sm3p1 {}, {}", lateout(reg) ans, in(reg) x, options(pure, nomem, nostack)) }; ans } @@ -674,33 +673,28 @@ pub fn sm3p1(x: u32) -> u32 { /// It can be implemented by `sm4ed` instruction like: /// /// ```no_run +/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] +/// # fn round_function(x0: u32, x1: u32, x2: u32, x3: u32, rk: u32) -> u32 { +/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ed; +/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ed; /// let a = x1 ^ x2 ^ x3 ^ rk; /// let c0 = sm4ed::<0>(x0, a); /// let c1 = sm4ed::<1>(c0, a); // c1 represents c[0..=1], etc. /// let c2 = sm4ed::<2>(c1, a); /// let c3 = sm4ed::<3>(c2, a); /// return c3; // c3 represents c[0..=3] +/// # } /// ``` /// /// According to RISC-V Cryptography Extensions, Volume I, the execution latency of /// this instruction must always be independent from the data it operates on. +#[inline] +#[target_feature(enable = "zksed")] pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 { static_assert!(BS: u8 where BS <= 3); let ans: u32; - match BS { - 0 => unsafe { - asm!(".insn r 0x33, 0, 0x18, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack)) - }, - 1 => unsafe { - asm!(".insn r 0x33, 0, 0x38, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack)) - }, - 2 => unsafe { - asm!(".insn r 0x33, 0, 0x58, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack)) - }, - 3 => unsafe { - asm!(".insn r 0x33, 0, 0x78, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) a, options(nomem, nostack)) - }, - _ => unreachable!(), + unsafe { + asm!("sm4ed {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) a, const BS, options(pure, nomem, nostack)) }; ans } @@ -739,33 +733,28 @@ pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 { /// Hence, the key schedule operation can be implemented by `sm4ks` instruction like: /// /// ```no_run +/// # #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] +/// # fn key_schedule(k0: u32, k1: u32, k2: u32, k3: u32, ck_i: u32) -> u32 { +/// # #[cfg(target_arch = "riscv32")] use core::arch::riscv32::sm4ks; +/// # #[cfg(target_arch = "riscv64")] use core::arch::riscv64::sm4ks; /// let k = k1 ^ k2 ^ k3 ^ ck_i; /// let c0 = sm4ks::<0>(k0, k); /// let c1 = sm4ks::<1>(c0, k); // c1 represents c[0..=1], etc. /// let c2 = sm4ks::<2>(c1, k); /// let c3 = sm4ks::<3>(c2, k); /// return c3; // c3 represents c[0..=3] +/// # } /// ``` /// /// According to RISC-V Cryptography Extensions, Volume I, the execution latency of /// this instruction must always be independent from the data it operates on. +#[inline] +#[target_feature(enable = "zksed")] pub fn sm4ks<const BS: u8>(x: u32, k: u32) -> u32 { static_assert!(BS: u8 where BS <= 3); let ans: u32; - match BS { - 0 => unsafe { - asm!(".insn r 0x33, 0, 0x1A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack)) - }, - 1 => unsafe { - asm!(".insn r 0x33, 0, 0x3A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack)) - }, - 2 => unsafe { - asm!(".insn r 0x33, 0, 0x5A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack)) - }, - 3 => unsafe { - asm!(".insn r 0x33, 0, 0x7A, {}, {}, {}", out(reg) ans, in(reg) x, in(reg) k, options(nomem, nostack)) - }, - _ => unreachable!(), + unsafe { + asm!("sm4ks {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) k, const BS, options(pure, nomem, nostack)) }; ans } diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/p.rs b/library/stdarch/crates/core_arch/src/riscv_shared/p.rs new file mode 100644 index 000000000..a26044aee --- /dev/null +++ b/library/stdarch/crates/core_arch/src/riscv_shared/p.rs @@ -0,0 +1,1061 @@ +//! RISC-V Packed SIMD intrinsics; shared part. +//! +//! RV64 only part is placed in riscv64 folder. +use crate::arch::asm; + +/// Adds packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn add16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x20, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the sum of packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn radd16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x00, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the sum of packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn uradd16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x10, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kadd16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x08, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukadd16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x18, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn sub16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x21, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the subtraction result of packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn rsub16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x01, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the subtraction result of packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn ursub16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x11, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn ksub16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x09, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn uksub16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x19, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross adds and subtracts packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn cras16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x22, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross halves of adds and subtracts packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn rcras16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x02, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn urcras16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x12, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kcras16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukcras16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross subtracts and adds packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn crsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x23, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross halves of subtracts and adds packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn rcrsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x03, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn urcrsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x13, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kcrsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Cross subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukcrsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight adds and subtracts packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn stas16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x7A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight halves of adds and subtracts packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn rstas16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x5A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn urstas16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x6A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kstas16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x62, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukstas16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x72, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight subtracts and adds packed 16-bit signed numbers, discarding overflow bits +#[inline] +pub fn stsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x7B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight halves of subtracts and adds packed 16-bit signed numbers, dropping least bits +#[inline] +pub fn rstsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x5B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits +#[inline] +pub fn urstsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x6B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kstsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x63, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Straight subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukstsa16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x73, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds packed 8-bit signed numbers, discarding overflow bits +#[inline] +pub fn add8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x24, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the sum of packed 8-bit signed numbers, dropping least bits +#[inline] +pub fn radd8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x04, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the sum of packed 8-bit unsigned numbers, dropping least bits +#[inline] +pub fn uradd8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x14, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds packed 8-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn kadd8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds packed 8-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn ukadd8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 8-bit signed numbers, discarding overflow bits +#[inline] +pub fn sub8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x25, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the subtraction result of packed 8-bit signed numbers, dropping least bits +#[inline] +pub fn rsub8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x05, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Halves the subtraction result of packed 8-bit unsigned numbers, dropping least bits +#[inline] +pub fn ursub8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x15, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 8-bit signed numbers, saturating at the numeric bounds +#[inline] +pub fn ksub8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts packed 8-bit unsigned numbers, saturating at the numeric bounds +#[inline] +pub fn uksub8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Arithmetic right shift packed 16-bit elements without rounding up +#[inline] +pub fn sra16(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x28, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Arithmetic right shift packed 16-bit elements with rounding up +#[inline] +pub fn sra16u(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x30, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical right shift packed 16-bit elements without rounding up +#[inline] +pub fn srl16(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x29, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical right shift packed 16-bit elements with rounding up +#[inline] +pub fn srl16u(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x31, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical left shift packed 16-bit elements, discarding overflow bits +#[inline] +pub fn sll16(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical left shift packed 16-bit elements, saturating at the numeric bounds +#[inline] +pub fn ksll16(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x32, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical saturating left then arithmetic right shift packed 16-bit elements +#[inline] +pub fn kslra16(a: usize, b: i32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical saturating left then arithmetic right shift packed 16-bit elements +#[inline] +pub fn kslra16u(a: usize, b: i32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x33, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Arithmetic right shift packed 8-bit elements without rounding up +#[inline] +pub fn sra8(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Arithmetic right shift packed 8-bit elements with rounding up +#[inline] +pub fn sra8u(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x34, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical right shift packed 8-bit elements without rounding up +#[inline] +pub fn srl8(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical right shift packed 8-bit elements with rounding up +#[inline] +pub fn srl8u(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x35, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical left shift packed 8-bit elements, discarding overflow bits +#[inline] +pub fn sll8(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical left shift packed 8-bit elements, saturating at the numeric bounds +#[inline] +pub fn ksll8(a: usize, b: u32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x36, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical saturating left then arithmetic right shift packed 8-bit elements +#[inline] +pub fn kslra8(a: usize, b: i32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x2F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Logical saturating left then arithmetic right shift packed 8-bit elements +#[inline] +pub fn kslra8u(a: usize, b: i32) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x37, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare equality for packed 16-bit elements +#[inline] +pub fn cmpeq16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x26, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 16-bit packed signed integers are less than the others +#[inline] +pub fn scmplt16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x06, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 16-bit packed signed integers are less than or equal to the others +#[inline] +pub fn scmple16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 16-bit packed unsigned integers are less than the others +#[inline] +pub fn ucmplt16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x16, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 16-bit packed unsigned integers are less than or equal to the others +#[inline] +pub fn ucmple16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare equality for packed 8-bit elements +#[inline] +pub fn cmpeq8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x27, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 8-bit packed signed integers are less than the others +#[inline] +pub fn scmplt8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x07, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 8-bit packed signed integers are less than or equal to the others +#[inline] +pub fn scmple8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 8-bit packed unsigned integers are less than the others +#[inline] +pub fn ucmplt8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x17, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Compare whether 8-bit packed unsigned integers are less than or equal to the others +#[inline] +pub fn ucmple8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x1F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get minimum values from 16-bit packed signed integers +#[inline] +pub fn smin16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x40, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get minimum values from 16-bit packed unsigned integers +#[inline] +pub fn umin16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x48, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get maximum values from 16-bit packed signed integers +#[inline] +pub fn smax16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x41, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get maximum values from 16-bit packed unsigned integers +#[inline] +pub fn umax16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x49, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/* todo: sclip16, uclip16 */ + +/// Compute the absolute value of packed 16-bit signed integers +#[inline] +pub fn kabs16(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAD1", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of redundant sign bits of the packed 16-bit elements +#[inline] +pub fn clrs16(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAE8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of leading zero bits of the packed 16-bit elements +#[inline] +pub fn clz16(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAE9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Swap the 16-bit halfwords within each 32-bit word of a register +#[inline] +pub fn swap16(a: usize) -> usize { + let value: usize; + // this instruction is an alias for `pkbt rd, rs1, rs1`. + unsafe { + asm!(".insn r 0x77, 0x0, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Get minimum values from 8-bit packed signed integers +#[inline] +pub fn smin8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x44, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get minimum values from 8-bit packed unsigned integers +#[inline] +pub fn umin8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x4C, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get maximum values from 8-bit packed signed integers +#[inline] +pub fn smax8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x45, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Get maximum values from 8-bit packed unsigned integers +#[inline] +pub fn umax8(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x4D, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/* todo: sclip8, uclip8 */ + +/// Compute the absolute value of packed 8-bit signed integers +#[inline] +pub fn kabs8(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAD0", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of redundant sign bits of the packed 8-bit elements +#[inline] +pub fn clrs8(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAE0", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of leading zero bits of the packed 8-bit elements +#[inline] +pub fn clz8(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAE1", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Swap the 8-bit bytes within each 16-bit halfword of a register. +#[inline] +pub fn swap8(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAD8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack first and zeroth into two 16-bit signed halfwords in each 32-bit chunk +#[inline] +pub fn sunpkd810(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAC8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack second and zeroth into two 16-bit signed halfwords in each 32-bit chunk +#[inline] +pub fn sunpkd820(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAC9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and zeroth into two 16-bit signed halfwords in each 32-bit chunk +#[inline] +pub fn sunpkd830(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACA", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and first into two 16-bit signed halfwords in each 32-bit chunk +#[inline] +pub fn sunpkd831(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACB", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and second into two 16-bit signed halfwords in each 32-bit chunk +#[inline] +pub fn sunpkd832(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAD3", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack first and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk +#[inline] +pub fn zunpkd810(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACC", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack second and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk +#[inline] +pub fn zunpkd820(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACD", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk +#[inline] +pub fn zunpkd830(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACE", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and first into two 16-bit unsigned halfwords in each 32-bit chunk +#[inline] +pub fn zunpkd831(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xACF", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Unpack third and second into two 16-bit unsigned halfwords in each 32-bit chunk +#[inline] +pub fn zunpkd832(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAD7", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +// todo: pkbb16, pktt16 + +/// Pack two 16-bit data from bottom and top half from 32-bit chunks +#[inline] +pub fn pkbt16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x0F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Pack two 16-bit data from top and bottom half from 32-bit chunks +#[inline] +pub fn pktb16(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x1F, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of redundant sign bits of the packed 32-bit elements +#[inline] +pub fn clrs32(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAF8", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Count the number of leading zero bits of the packed 32-bit elements +#[inline] +pub fn clz32(a: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn i 0x77, 0x0, {}, {}, 0xAF9", lateout(reg) value, in(reg) a, options(pure, nomem, nostack)) + } + value +} + +/// Calculate the sum of absolute difference of unsigned 8-bit data elements +#[inline] +pub fn pbsad(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x7E, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Calculate and accumulate the sum of absolute difference of unsigned 8-bit data elements +#[inline] +pub fn pbsada(t: usize, a: usize, b: usize) -> usize { + let mut value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x7F, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Multiply signed 8-bit elements and add 16-bit elements on results for packed 32-bit chunks +#[inline] +pub fn smaqa(t: usize, a: usize, b: usize) -> usize { + let mut value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x64, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Multiply unsigned 8-bit elements and add 16-bit elements on results for packed 32-bit chunks +#[inline] +pub fn umaqa(t: usize, a: usize, b: usize) -> usize { + let mut value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x66, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Multiply signed to unsigned 8-bit and add 16-bit elements on results for packed 32-bit chunks +#[inline] +pub fn smaqasu(t: usize, a: usize, b: usize) -> usize { + let mut value: usize; + unsafe { + asm!(".insn r 0x77, 0x0, 0x65, {}, {}, {}", inlateout(reg) t => value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds signed lower 16-bit content of two registers with Q15 saturation +#[inline] +pub fn kaddh(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x02, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts signed lower 16-bit content of two registers with Q15 saturation +#[inline] +pub fn ksubh(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x03, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Adds signed lower 16-bit content of two registers with U16 saturation +#[inline] +pub fn ukaddh(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x0A, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} + +/// Subtracts signed lower 16-bit content of two registers with U16 saturation +#[inline] +pub fn uksubh(a: usize, b: usize) -> usize { + let value: usize; + unsafe { + asm!(".insn r 0x77, 0x1, 0x0B, {}, {}, {}", lateout(reg) value, in(reg) a, in(reg) b, options(pure, nomem, nostack)) + } + value +} diff --git a/library/stdarch/crates/core_arch/src/simd_llvm.rs b/library/stdarch/crates/core_arch/src/simd_llvm.rs index 1970e5c69..decdecaaf 100644 --- a/library/stdarch/crates/core_arch/src/simd_llvm.rs +++ b/library/stdarch/crates/core_arch/src/simd_llvm.rs @@ -9,13 +9,7 @@ extern "platform-intrinsic" { pub fn simd_gt<T, U>(x: T, y: T) -> U; pub fn simd_ge<T, U>(x: T, y: T) -> U; - pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U; - pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U; - pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U; - pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U; - pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U; - pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U; - pub fn simd_shuffle128<T, U>(x: T, y: T, idx: [u32; 128]) -> U; + pub fn simd_shuffle<T, U, V>(x: T, y: T, idx: U) -> V; #[rustc_const_unstable(feature = "const_simd_insert", issue = "none")] pub fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T; diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 24f9c0301..16add3dbb 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -2001,7 +2001,7 @@ pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 { - pmovmskb(a.as_i8x32()) + simd_bitmask::<_, u32>(a.as_i8x32()) as i32 } /// Computes the sum of absolute differences (SADs) of quadruplets of unsigned @@ -3642,8 +3642,6 @@ extern "C" { fn pminud(a: u32x8, b: u32x8) -> u32x8; #[link_name = "llvm.x86.avx2.pminu.b"] fn pminub(a: u8x32, b: u8x32) -> u8x32; - #[link_name = "llvm.x86.avx2.pmovmskb"] - fn pmovmskb(a: i8x32) -> i32; #[link_name = "llvm.x86.avx2.mpsadbw"] fn mpsadbw(a: u8x32, b: u8x32, imm8: i32) -> u16x16; #[link_name = "llvm.x86.avx2.pmulhu.w"] diff --git a/library/stdarch/crates/core_arch/src/x86/cpuid.rs b/library/stdarch/crates/core_arch/src/x86/cpuid.rs index 6b90295ef..2624e8bdf 100644 --- a/library/stdarch/crates/core_arch/src/x86/cpuid.rs +++ b/library/stdarch/crates/core_arch/src/x86/cpuid.rs @@ -62,27 +62,27 @@ pub unsafe fn __cpuid_count(leaf: u32, sub_leaf: u32) -> CpuidResult { #[cfg(target_arch = "x86")] { asm!( - "movl %ebx, {0}", + "mov {0}, ebx", "cpuid", - "xchgl %ebx, {0}", - lateout(reg) ebx, - inlateout("eax") leaf => eax, - inlateout("ecx") sub_leaf => ecx, - lateout("edx") edx, - options(nostack, preserves_flags, att_syntax), + "xchg {0}, ebx", + out(reg) ebx, + inout("eax") leaf => eax, + inout("ecx") sub_leaf => ecx, + out("edx") edx, + options(nostack, preserves_flags), ); } #[cfg(target_arch = "x86_64")] { asm!( - "movq %rbx, {0:r}", + "mov {0:r}, rbx", "cpuid", - "xchgq %rbx, {0:r}", - lateout(reg) ebx, - inlateout("eax") leaf => eax, - inlateout("ecx") sub_leaf => ecx, - lateout("edx") edx, - options(nostack, preserves_flags, att_syntax), + "xchg {0:r}, rbx", + out(reg) ebx, + inout("eax") leaf => eax, + inout("ecx") sub_leaf => ecx, + out("edx") edx, + options(nostack, preserves_flags), ); } CpuidResult { eax, ebx, ecx, edx } diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs index 547bfe67d..6b50e95b2 100644 --- a/library/stdarch/crates/core_arch/src/x86/mod.rs +++ b/library/stdarch/crates/core_arch/src/x86/mod.rs @@ -306,7 +306,7 @@ types! { /// 256-bit wide set of 16 'u16' types, x86-specific /// - /// This type is the same as the `__m128bh` type defined by Intel, + /// This type is the same as the `__m256bh` type defined by Intel, /// representing a 256-bit SIMD register which internally is consisted of /// 16 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. @@ -317,7 +317,7 @@ types! { /// 512-bit wide set of 32 'u16' types, x86-specific /// - /// This type is the same as the `__m128bh` type defined by Intel, + /// This type is the same as the `__m512bh` type defined by Intel, /// representing a 512-bit SIMD register which internally is consisted of /// 32 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs index d82b8641f..3e79b3539 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse2.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs @@ -1378,7 +1378,7 @@ pub unsafe fn _mm_insert_epi16<const IMM8: i32>(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 { - pmovmskb(a.as_i8x16()) + simd_bitmask::<_, u16>(a.as_i8x16()) as u32 as i32 } /// Shuffles 32-bit integers in `a` using the control in `IMM8`. @@ -2856,8 +2856,6 @@ extern "C" { fn packssdw(a: i32x4, b: i32x4) -> i16x8; #[link_name = "llvm.x86.sse2.packuswb.128"] fn packuswb(a: i16x8, b: i16x8) -> u8x16; - #[link_name = "llvm.x86.sse2.pmovmskb.128"] - fn pmovmskb(a: i8x16) -> i32; #[link_name = "llvm.x86.sse2.max.sd"] fn maxsd(a: __m128d, b: __m128d) -> __m128d; #[link_name = "llvm.x86.sse2.max.pd"] diff --git a/library/stdarch/crates/core_arch/src/x86/sse3.rs b/library/stdarch/crates/core_arch/src/x86/sse3.rs index ab0dd38fe..61f8a4e78 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse3.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse3.rs @@ -1,11 +1,7 @@ //! Streaming SIMD Extensions 3 (SSE3) use crate::{ - core_arch::{ - simd::*, - simd_llvm::{simd_shuffle2, simd_shuffle4}, - x86::*, - }, + core_arch::{simd::*, simd_llvm::simd_shuffle, x86::*}, mem::transmute, }; diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/stdarch/crates/std_detect/Cargo.toml index 1ca0d9c5d..3a482564e 100644 --- a/library/stdarch/crates/std_detect/Cargo.toml +++ b/library/stdarch/crates/std_detect/Cargo.toml @@ -22,7 +22,7 @@ maintenance = { status = "experimental" } [dependencies] libc = { version = "0.2", optional = true, default-features = false } -cfg-if = "0.1.10" +cfg-if = "1.0.0" # When built as part of libstd core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" } diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs index b6a2e5218..6c79ba86d 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs @@ -23,58 +23,62 @@ pub(crate) fn detect_features() -> cache::Initializer { /// The names match those used for cpuinfo. /// /// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/arm64/include/uapi/asm/hwcap.h +#[derive(Debug, Default, PartialEq)] struct AtHwcap { - fp: bool, // 0 - asimd: bool, // 1 - // evtstrm: bool, // 2 No LLVM support - aes: bool, // 3 - pmull: bool, // 4 - sha1: bool, // 5 - sha2: bool, // 6 - crc32: bool, // 7 - atomics: bool, // 8 - fphp: bool, // 9 - asimdhp: bool, // 10 - // cpuid: bool, // 11 No LLVM support - asimdrdm: bool, // 12 - jscvt: bool, // 13 - fcma: bool, // 14 - lrcpc: bool, // 15 - dcpop: bool, // 16 - sha3: bool, // 17 - sm3: bool, // 18 - sm4: bool, // 19 - asimddp: bool, // 20 - sha512: bool, // 21 - sve: bool, // 22 - fhm: bool, // 23 - dit: bool, // 24 - uscat: bool, // 25 - ilrcpc: bool, // 26 - flagm: bool, // 27 - ssbs: bool, // 28 - sb: bool, // 29 - paca: bool, // 30 - pacg: bool, // 31 - dcpodp: bool, // 32 - sve2: bool, // 33 - sveaes: bool, // 34 - // svepmull: bool, // 35 No LLVM support - svebitperm: bool, // 36 - svesha3: bool, // 37 - svesm4: bool, // 38 - // flagm2: bool, // 39 No LLVM support - frint: bool, // 40 - // svei8mm: bool, // 41 See i8mm feature - svef32mm: bool, // 42 - svef64mm: bool, // 43 - // svebf16: bool, // 44 See bf16 feature - i8mm: bool, // 45 - bf16: bool, // 46 - // dgh: bool, // 47 No LLVM support - rng: bool, // 48 - bti: bool, // 49 - mte: bool, // 50 + // AT_HWCAP + fp: bool, + asimd: bool, + // evtstrm: No LLVM support. + aes: bool, + pmull: bool, + sha1: bool, + sha2: bool, + crc32: bool, + atomics: bool, + fphp: bool, + asimdhp: bool, + // cpuid: No LLVM support. + asimdrdm: bool, + jscvt: bool, + fcma: bool, + lrcpc: bool, + dcpop: bool, + sha3: bool, + sm3: bool, + sm4: bool, + asimddp: bool, + sha512: bool, + sve: bool, + fhm: bool, + dit: bool, + uscat: bool, + ilrcpc: bool, + flagm: bool, + ssbs: bool, + sb: bool, + paca: bool, + pacg: bool, + + // AT_HWCAP2 + dcpodp: bool, + sve2: bool, + sveaes: bool, + // svepmull: No LLVM support. + svebitperm: bool, + svesha3: bool, + svesm4: bool, + // flagm2: No LLVM support. + frint: bool, + // svei8mm: See i8mm feature. + svef32mm: bool, + svef64mm: bool, + // svebf16: See bf16 feature. + i8mm: bool, + bf16: bool, + // dgh: No LLVM support. + rng: bool, + bti: bool, + mte: bool, } impl From<auxvec::AuxVec> for AtHwcap { @@ -113,25 +117,25 @@ impl From<auxvec::AuxVec> for AtHwcap { sb: bit::test(auxv.hwcap, 29), paca: bit::test(auxv.hwcap, 30), pacg: bit::test(auxv.hwcap, 31), - dcpodp: bit::test(auxv.hwcap, 32), - sve2: bit::test(auxv.hwcap, 33), - sveaes: bit::test(auxv.hwcap, 34), - // svepmull: bit::test(auxv.hwcap, 35), - svebitperm: bit::test(auxv.hwcap, 36), - svesha3: bit::test(auxv.hwcap, 37), - svesm4: bit::test(auxv.hwcap, 38), - // flagm2: bit::test(auxv.hwcap, 39), - frint: bit::test(auxv.hwcap, 40), - // svei8mm: bit::test(auxv.hwcap, 41), - svef32mm: bit::test(auxv.hwcap, 42), - svef64mm: bit::test(auxv.hwcap, 43), - // svebf16: bit::test(auxv.hwcap, 44), - i8mm: bit::test(auxv.hwcap, 45), - bf16: bit::test(auxv.hwcap, 46), - // dgh: bit::test(auxv.hwcap, 47), - rng: bit::test(auxv.hwcap, 48), - bti: bit::test(auxv.hwcap, 49), - mte: bit::test(auxv.hwcap, 50), + dcpodp: bit::test(auxv.hwcap2, 0), + sve2: bit::test(auxv.hwcap2, 1), + sveaes: bit::test(auxv.hwcap2, 2), + // svepmull: bit::test(auxv.hwcap2, 3), + svebitperm: bit::test(auxv.hwcap2, 4), + svesha3: bit::test(auxv.hwcap2, 5), + svesm4: bit::test(auxv.hwcap2, 6), + // flagm2: bit::test(auxv.hwcap2, 7), + frint: bit::test(auxv.hwcap2, 8), + // svei8mm: bit::test(auxv.hwcap2, 9), + svef32mm: bit::test(auxv.hwcap2, 10), + svef64mm: bit::test(auxv.hwcap2, 11), + // svebf16: bit::test(auxv.hwcap2, 12), + i8mm: bit::test(auxv.hwcap2, 13), + bf16: bit::test(auxv.hwcap2, 14), + // dgh: bit::test(auxv.hwcap2, 15), + rng: bit::test(auxv.hwcap2, 16), + bti: bit::test(auxv.hwcap2, 17), + mte: bit::test(auxv.hwcap2, 18), } } } @@ -288,3 +292,86 @@ impl AtHwcap { value } } + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "std_detect_file_io")] + mod auxv_from_file { + use super::auxvec::auxv_from_file; + use super::*; + // The baseline hwcaps used in the (artificial) auxv test files. + fn baseline_hwcaps() -> AtHwcap { + AtHwcap { + fp: true, + asimd: true, + aes: true, + pmull: true, + sha1: true, + sha2: true, + crc32: true, + atomics: true, + fphp: true, + asimdhp: true, + asimdrdm: true, + lrcpc: true, + dcpop: true, + asimddp: true, + ssbs: true, + ..AtHwcap::default() + } + } + + #[test] + fn linux_empty_hwcap2_aarch64() { + let file = concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv" + ); + println!("file: {}", file); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!(AtHwcap::from(v), baseline_hwcaps()); + } + #[test] + fn linux_no_hwcap2_aarch64() { + let file = concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv" + ); + println!("file: {}", file); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!(AtHwcap::from(v), baseline_hwcaps()); + } + #[test] + fn linux_hwcap2_aarch64() { + let file = concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/detect/test_data/linux-hwcap2-aarch64.auxv" + ); + println!("file: {}", file); + let v = auxv_from_file(file).unwrap(); + println!("HWCAP : 0x{:0x}", v.hwcap); + println!("HWCAP2: 0x{:0x}", v.hwcap2); + assert_eq!( + AtHwcap::from(v), + AtHwcap { + // Some other HWCAP bits. + paca: true, + pacg: true, + // HWCAP2-only bits. + dcpodp: true, + frint: true, + rng: true, + bti: true, + mte: true, + ..baseline_hwcaps() + } + ); + } + } +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs index e6447d0cd..c903903bd 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs @@ -7,6 +7,7 @@ pub(crate) const AT_NULL: usize = 0; pub(crate) const AT_HWCAP: usize = 16; /// Key to access the CPU Hardware capabilities 2 bitfield. #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" @@ -21,6 +22,7 @@ pub(crate) const AT_HWCAP2: usize = 26; pub(crate) struct AuxVec { pub hwcap: usize, #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" @@ -64,13 +66,14 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { if let Ok(hwcap) = getauxval(AT_HWCAP) { // Targets with only AT_HWCAP: #[cfg(any( - target_arch = "aarch64", target_arch = "riscv32", target_arch = "riscv64", target_arch = "mips", target_arch = "mips64" ))] { + // Zero could indicate that no features were detected, but it's also used to + // indicate an error. In either case, try the fallback. if hwcap != 0 { return Ok(AuxVec { hwcap }); } @@ -78,13 +81,18 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { // Targets with AT_HWCAP and AT_HWCAP2: #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" ))] { if let Ok(hwcap2) = getauxval(AT_HWCAP2) { - if hwcap != 0 && hwcap2 != 0 { + // Zero could indicate that no features were detected, but it's also used to + // indicate an error. In particular, on many platforms AT_HWCAP2 will be + // legitimately zero, since it contains the most recent feature flags. Use the + // fallback only if no features were detected at all. + if hwcap != 0 || hwcap2 != 0 { return Ok(AuxVec { hwcap, hwcap2 }); } } @@ -97,7 +105,6 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { { // Targets with only AT_HWCAP: #[cfg(any( - target_arch = "aarch64", target_arch = "riscv32", target_arch = "riscv64", target_arch = "mips", @@ -105,6 +112,8 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { ))] { let hwcap = unsafe { libc::getauxval(AT_HWCAP as libc::c_ulong) as usize }; + // Zero could indicate that no features were detected, but it's also used to indicate + // an error. In either case, try the fallback. if hwcap != 0 { return Ok(AuxVec { hwcap }); } @@ -112,6 +121,7 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { // Targets with AT_HWCAP and AT_HWCAP2: #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" @@ -119,7 +129,11 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> { { let hwcap = unsafe { libc::getauxval(AT_HWCAP as libc::c_ulong) as usize }; let hwcap2 = unsafe { libc::getauxval(AT_HWCAP2 as libc::c_ulong) as usize }; - if hwcap != 0 && hwcap2 != 0 { + // Zero could indicate that no features were detected, but it's also used to indicate + // an error. In particular, on many platforms AT_HWCAP2 will be legitimately zero, + // since it contains the most recent feature flags. Use the fallback only if no + // features were detected at all. + if hwcap != 0 || hwcap2 != 0 { return Ok(AuxVec { hwcap, hwcap2 }); } } @@ -158,7 +172,7 @@ fn getauxval(key: usize) -> Result<usize, ()> { /// Tries to read the auxiliary vector from the `file`. If this fails, this /// function returns `Err`. #[cfg(feature = "std_detect_file_io")] -fn auxv_from_file(file: &str) -> Result<AuxVec, ()> { +pub(super) fn auxv_from_file(file: &str) -> Result<AuxVec, ()> { let file = super::read_file(file)?; // See <https://github.com/torvalds/linux/blob/v3.19/include/uapi/linux/auxvec.h>. @@ -181,7 +195,6 @@ fn auxv_from_file(file: &str) -> Result<AuxVec, ()> { fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> { // Targets with only AT_HWCAP: #[cfg(any( - target_arch = "aarch64", target_arch = "riscv32", target_arch = "riscv64", target_arch = "mips", @@ -198,23 +211,25 @@ fn auxv_from_buf(buf: &[usize; 64]) -> Result<AuxVec, ()> { } // Targets with AT_HWCAP and AT_HWCAP2: #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" ))] { let mut hwcap = None; - let mut hwcap2 = None; + // For some platforms, AT_HWCAP2 was added recently, so let it default to zero. + let mut hwcap2 = 0; for el in buf.chunks(2) { match el[0] { AT_NULL => break, AT_HWCAP => hwcap = Some(el[1]), - AT_HWCAP2 => hwcap2 = Some(el[1]), + AT_HWCAP2 => hwcap2 = el[1], _ => (), } } - if let (Some(hwcap), Some(hwcap2)) = (hwcap, hwcap2) { + if let Some(hwcap) = hwcap { return Ok(AuxVec { hwcap, hwcap2 }); } } @@ -256,7 +271,6 @@ mod tests { // FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv // does not always contain the AT_HWCAP key under qemu. #[cfg(any( - target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" @@ -271,6 +285,7 @@ mod tests { // Targets with AT_HWCAP and AT_HWCAP2: #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" @@ -305,24 +320,31 @@ mod tests { } #[test] - #[should_panic] fn linux_macos_vb() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv"); println!("file: {}", file); + // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero. let v = auxv_from_file(file).unwrap(); - // this file is incomplete (contains hwcap but not hwcap2), we - // want to fall back to /proc/cpuinfo in this case, so - // reading should fail. assert_eq!(v.hwcap, 126614527); - // assert_eq!(v.hwcap2, 0); - let _ = v; + assert_eq!(v.hwcap, 126614527); + assert_eq!(v.hwcap2, 0); } } else if #[cfg(target_arch = "aarch64")] { #[test] - fn linux_x64() { - let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-x64-i7-6850k.auxv"); + fn linux_artificial_aarch64() { + let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv"); println!("file: {}", file); let v = auxv_from_file(file).unwrap(); - assert_eq!(v.hwcap, 3219913727); + assert_eq!(v.hwcap, 0x0123456789abcdef); + assert_eq!(v.hwcap2, 0x02468ace13579bdf); + } + #[test] + fn linux_no_hwcap2_aarch64() { + let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"); + println!("file: {}", file); + let v = auxv_from_file(file).unwrap(); + // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP. + assert_ne!(v.hwcap, 0); + assert_eq!(v.hwcap2, 0); } } } @@ -353,6 +375,7 @@ mod tests { // Targets with AT_HWCAP and AT_HWCAP2: #[cfg(any( + target_arch = "aarch64", target_arch = "arm", target_arch = "powerpc", target_arch = "powerpc64" diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv Binary files differnew file mode 100644 index 000000000..ec826afcf --- /dev/null +++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-artificial-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv Binary files differnew file mode 100644 index 000000000..95537b73f --- /dev/null +++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv Binary files differnew file mode 100644 index 000000000..1d87264b2 --- /dev/null +++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv Binary files differnew file mode 100644 index 000000000..35f01cc76 --- /dev/null +++ b/library/stdarch/crates/std_detect/src/detect/test_data/linux-no-hwcap2-aarch64.auxv diff --git a/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv b/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv Binary files differdeleted file mode 100644 index 6afe1b3b4..000000000 --- a/library/stdarch/crates/std_detect/src/detect/test_data/linux-x64-i7-6850k.auxv +++ /dev/null diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml index 9ac1057be..012b4e959 100644 --- a/library/stdarch/crates/stdarch-test/Cargo.toml +++ b/library/stdarch/crates/stdarch-test/Cargo.toml @@ -10,7 +10,7 @@ simd-test-macro = { path = "../simd-test-macro" } cc = "1.0" lazy_static = "1.0" rustc-demangle = "0.1.8" -cfg-if = "0.1" +cfg-if = "1.0" # We use a crates.io dependency to disassemble wasm binaries to look for # instructions for `#[assert_instr]`. Note that we use an `=` dependency here diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs index 078736c66..eba17771c 100644 --- a/library/stdarch/crates/stdarch-test/src/lib.rs +++ b/library/stdarch/crates/stdarch-test/src/lib.rs @@ -3,7 +3,6 @@ //! This basically just disassembles the current executable and then parses the //! output once globally and then provides the `assert` function which makes //! assertions about the disassembly of a function. -#![feature(bench_black_box)] // For black_box #![deny(rust_2018_idioms)] #![allow(clippy::missing_docs_in_private_items, clippy::print_stdout)] diff --git a/library/test/src/bench.rs b/library/test/src/bench.rs index 7869ba2c0..23925e6ea 100644 --- a/library/test/src/bench.rs +++ b/library/test/src/bench.rs @@ -49,12 +49,12 @@ impl Bencher { self.summary = Some(iter(&mut inner)); } - pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary> + pub fn bench<F>(&mut self, mut f: F) -> Result<Option<stats::Summary>, String> where - F: FnMut(&mut Bencher), + F: FnMut(&mut Bencher) -> Result<(), String>, { - f(self); - self.summary + let result = f(self); + result.map(|_| self.summary) } } @@ -195,7 +195,7 @@ pub fn benchmark<F>( nocapture: bool, f: F, ) where - F: FnMut(&mut Bencher), + F: FnMut(&mut Bencher) -> Result<(), String>, { let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 }; @@ -211,14 +211,14 @@ pub fn benchmark<F>( let test_result = match result { //bs.bench(f) { - Ok(Some(ns_iter_summ)) => { + Ok(Ok(Some(ns_iter_summ))) => { let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); let mb_s = bs.bytes * 1000 / ns_iter; let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize }; TestResult::TrBench(bs) } - Ok(None) => { + Ok(Ok(None)) => { // iter not called, so no data. // FIXME: error in this case? let samples: &mut [f64] = &mut [0.0_f64; 1]; @@ -226,6 +226,7 @@ pub fn benchmark<F>( TestResult::TrBench(bs) } Err(_) => TestResult::TrFailed, + Ok(Err(_)) => TestResult::TrFailed, }; let stdout = data.lock().unwrap().to_vec(); @@ -233,10 +234,10 @@ pub fn benchmark<F>( monitor_ch.send(message).unwrap(); } -pub fn run_once<F>(f: F) +pub fn run_once<F>(f: F) -> Result<(), String> where - F: FnMut(&mut Bencher), + F: FnMut(&mut Bencher) -> Result<(), String>, { let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 }; - bs.bench(f); + bs.bench(f).map(|_| ()) } diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs index f981b9c49..8be32183f 100644 --- a/library/test/src/cli.rs +++ b/library/test/src/cli.rs @@ -3,9 +3,9 @@ use std::env; use std::path::PathBuf; -use super::helpers::isatty; use super::options::{ColorConfig, Options, OutputFormat, RunIgnored}; use super::time::TestTimeOptions; +use std::io::{self, IsTerminal}; #[derive(Debug)] pub struct TestOpts { @@ -32,7 +32,7 @@ pub struct TestOpts { impl TestOpts { pub fn use_color(&self) -> bool { match self.color { - ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(), + ColorConfig::AutoColor => !self.nocapture && io::stdout().is_terminal(), ColorConfig::AlwaysColor => true, ColorConfig::NeverColor => false, } diff --git a/library/test/src/console.rs b/library/test/src/console.rs index e9dda9896..b1270c272 100644 --- a/library/test/src/console.rs +++ b/library/test/src/console.rs @@ -147,7 +147,7 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res let mut ntest = 0; let mut nbench = 0; - for test in filter_tests(&opts, tests) { + for test in filter_tests(&opts, tests).into_iter() { use crate::TestFn::*; let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test; diff --git a/library/test/src/helpers/isatty.rs b/library/test/src/helpers/isatty.rs deleted file mode 100644 index 874ecc376..000000000 --- a/library/test/src/helpers/isatty.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Helper module which provides a function to test -//! if stdout is a tty. - -cfg_if::cfg_if! { - if #[cfg(unix)] { - pub fn stdout_isatty() -> bool { - unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } - } - } else if #[cfg(windows)] { - pub fn stdout_isatty() -> bool { - type DWORD = u32; - type BOOL = i32; - type HANDLE = *mut u8; - type LPDWORD = *mut u32; - const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; - extern "system" { - fn GetStdHandle(which: DWORD) -> HANDLE; - fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; - } - unsafe { - let handle = GetStdHandle(STD_OUTPUT_HANDLE); - let mut out = 0; - GetConsoleMode(handle, &mut out) != 0 - } - } - } else { - // FIXME: Implement isatty on SGX - pub fn stdout_isatty() -> bool { - false - } - } -} diff --git a/library/test/src/helpers/mod.rs b/library/test/src/helpers/mod.rs index 049cadf86..6f366a911 100644 --- a/library/test/src/helpers/mod.rs +++ b/library/test/src/helpers/mod.rs @@ -3,6 +3,5 @@ pub mod concurrency; pub mod exit_code; -pub mod isatty; pub mod metrics; pub mod shuffle; diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs index 3b7193adc..141f16d17 100644 --- a/library/test/src/lib.rs +++ b/library/test/src/lib.rs @@ -6,7 +6,8 @@ //! benchmarks themselves) should be done via the `#[test]` and //! `#[bench]` attributes. //! -//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details. +//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more +//! details. // Currently, not much of this is meant for users. It is intended to // support the simplest interface possible for representing and @@ -15,10 +16,11 @@ #![unstable(feature = "test", issue = "50297")] #![doc(test(attr(deny(warnings))))] -#![feature(bench_black_box)] #![feature(internal_output_capture)] +#![feature(is_terminal)] #![feature(staged_api)] #![feature(process_exitcode_internals)] +#![feature(panic_can_unwind)] #![feature(test)] // Public reexports @@ -53,6 +55,7 @@ use std::{ collections::VecDeque, env, io, io::prelude::Write, + mem::ManuallyDrop, panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}, process::{self, Command, Termination}, sync::mpsc::{channel, Sender}, @@ -77,6 +80,7 @@ mod types; #[cfg(test)] mod tests; +use core::any::Any; use event::{CompletedTest, TestEvent}; use helpers::concurrency::get_concurrency; use helpers::exit_code::get_exit_code; @@ -110,6 +114,29 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Opt process::exit(ERROR_EXIT_CODE); } } else { + if !opts.nocapture { + // If we encounter a non-unwinding panic, flush any captured output from the current test, + // and stop capturing output to ensure that the non-unwinding panic message is visible. + // We also acquire the locks for both output streams to prevent output from other threads + // from interleaving with the panic message or appearing after it. + let builtin_panic_hook = panic::take_hook(); + let hook = Box::new({ + move |info: &'_ PanicInfo<'_>| { + if !info.can_unwind() { + std::mem::forget(std::io::stderr().lock()); + let mut stdout = ManuallyDrop::new(std::io::stdout().lock()); + if let Some(captured) = io::set_output_capture(None) { + if let Ok(data) = captured.lock() { + let _ = stdout.write_all(&data); + let _ = stdout.flush(); + } + } + } + builtin_panic_hook(info); + } + }); + panic::set_hook(hook); + } match console::run_tests_console(&opts, tests) { Ok(true) => {} Ok(false) => process::exit(ERROR_EXIT_CODE), @@ -176,17 +203,20 @@ fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { } } -/// Invoked when unit tests terminate. Should panic if the unit -/// Tests is considered a failure. By default, invokes `report()` -/// and checks for a `0` result. -pub fn assert_test_result<T: Termination>(result: T) { +/// Invoked when unit tests terminate. Returns `Result::Err` if the test is +/// considered a failure. By default, invokes `report() and checks for a `0` +/// result. +pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> { let code = result.report().to_i32(); - assert_eq!( - code, 0, - "the test returned a termination value with a non-zero status code ({}) \ - which indicates a failure", - code - ); + if code == 0 { + Ok(()) + } else { + Err(format!( + "the test returned a termination value with a non-zero status code \ + ({}) which indicates a failure", + code + )) + } } pub fn run_tests<F>( @@ -242,7 +272,7 @@ where let event = TestEvent::TeFiltered(filtered_descs, shuffle_seed); notify_about_test_event(event)?; - let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests + let (mut filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests .into_iter() .enumerate() .map(|(i, e)| (TestId(i), e)) @@ -250,12 +280,12 @@ where let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); - let mut remaining = filtered_tests; if let Some(shuffle_seed) = shuffle_seed { - shuffle_tests(shuffle_seed, &mut remaining); - } else { - remaining.reverse(); + shuffle_tests(shuffle_seed, &mut filtered_tests); } + // Store the tests in a VecDeque so we can efficiently remove the first element to run the + // tests in the order they were passed (unless shuffled). + let mut remaining = VecDeque::from(filtered_tests); let mut pending = 0; let (tx, rx) = channel::<CompletedTest>(); @@ -295,7 +325,7 @@ where if concurrency == 1 { while !remaining.is_empty() { - let (id, test) = remaining.pop().unwrap(); + let (id, test) = remaining.pop_front().unwrap(); let event = TestEvent::TeWait(test.desc.clone()); notify_about_test_event(event)?; let join_handle = @@ -309,7 +339,7 @@ where } else { while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { - let (id, test) = remaining.pop().unwrap(); + let (id, test) = remaining.pop_front().unwrap(); let timeout = time::get_default_test_timeout(); let desc = test.desc.clone(); @@ -421,9 +451,6 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA RunIgnored::No => {} } - // Sort the tests alphabetically - filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())); - filtered } @@ -479,7 +506,7 @@ pub fn run_test( id: TestId, desc: TestDesc, monitor_ch: Sender<CompletedTest>, - testfn: Box<dyn FnOnce() + Send>, + testfn: Box<dyn FnOnce() -> Result<(), String> + Send>, opts: TestRunOpts, ) -> Option<thread::JoinHandle<()>> { let concurrency = opts.concurrency; @@ -568,11 +595,11 @@ pub fn run_test( /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. #[inline(never)] -fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) { - f(); +fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T { + let result = f(); // prevent this frame from being tail-call optimised away - black_box(()); + black_box(result) } fn run_test_in_process( @@ -580,7 +607,7 @@ fn run_test_in_process( desc: TestDesc, nocapture: bool, report_time: bool, - testfn: Box<dyn FnOnce() + Send>, + testfn: Box<dyn FnOnce() -> Result<(), String> + Send>, monitor_ch: Sender<CompletedTest>, time_opts: Option<time::TestTimeOptions>, ) { @@ -592,7 +619,7 @@ fn run_test_in_process( } let start = report_time.then(Instant::now); - let result = catch_unwind(AssertUnwindSafe(testfn)); + let result = fold_err(catch_unwind(AssertUnwindSafe(testfn))); let exec_time = start.map(|start| { let duration = start.elapsed(); TestExecTime(duration) @@ -609,6 +636,19 @@ fn run_test_in_process( monitor_ch.send(message).unwrap(); } +fn fold_err<T, E>( + result: Result<Result<T, E>, Box<dyn Any + Send>>, +) -> Result<T, Box<dyn Any + Send>> +where + E: Send + 'static, +{ + match result { + Ok(Err(e)) => Err(Box::new(e)), + Ok(Ok(v)) => Ok(v), + Err(e) => Err(e), + } +} + fn spawn_test_subprocess( id: TestId, desc: TestDesc, @@ -664,7 +704,10 @@ fn spawn_test_subprocess( monitor_ch.send(message).unwrap(); } -fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! { +fn run_test_in_spawned_subprocess( + desc: TestDesc, + testfn: Box<dyn FnOnce() -> Result<(), String> + Send>, +) -> ! { let builtin_panic_hook = panic::take_hook(); let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| { let test_result = match panic_info { @@ -690,7 +733,9 @@ fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Sen }); let record_result2 = record_result.clone(); panic::set_hook(Box::new(move |info| record_result2(Some(&info)))); - testfn(); + if let Err(message) = testfn() { + panic!("{}", message); + } record_result(None); unreachable!("panic=abort callback should have exited the process") } diff --git a/library/test/src/term.rs b/library/test/src/term.rs index b256ab7b8..a14b0d4f5 100644 --- a/library/test/src/term.rs +++ b/library/test/src/term.rs @@ -39,7 +39,7 @@ pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> { pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> { TerminfoTerminal::new(io::stdout()) .map(|t| Box::new(t) as Box<StdoutTerminal>) - .or_else(|| WinConsole::new(io::stdout()).ok().map(|t| Box::new(t) as Box<StdoutTerminal>)) + .or_else(|| Some(Box::new(WinConsole::new(io::stdout())) as Box<StdoutTerminal>)) } /// Terminal color definitions diff --git a/library/test/src/term/win.rs b/library/test/src/term/win.rs index 4bdbd6ee7..55020141a 100644 --- a/library/test/src/term/win.rs +++ b/library/test/src/term/win.rs @@ -113,8 +113,7 @@ impl<T: Write + Send + 'static> WinConsole<T> { } } - /// Returns `None` whenever the terminal cannot be created for some reason. - pub(crate) fn new(out: T) -> io::Result<WinConsole<T>> { + pub(crate) fn new(out: T) -> WinConsole<T> { use std::mem::MaybeUninit; let fg; @@ -132,13 +131,13 @@ impl<T: Write + Send + 'static> WinConsole<T> { bg = color::BLACK; } } - Ok(WinConsole { + WinConsole { buf: out, def_foreground: fg, def_background: bg, foreground: fg, background: bg, - }) + } } } diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs index 0b81aff59..b54be64ef 100644 --- a/library/test/src/tests.rs +++ b/library/test/src/tests.rs @@ -67,7 +67,7 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> { no_run: false, test_type: TestType::Unknown, }, - testfn: DynTestFn(Box::new(move || {})), + testfn: DynTestFn(Box::new(move || Ok(()))), }, TestDescAndFn { desc: TestDesc { @@ -79,14 +79,14 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> { no_run: false, test_type: TestType::Unknown, }, - testfn: DynTestFn(Box::new(move || {})), + testfn: DynTestFn(Box::new(move || Ok(()))), }, ] } #[test] pub fn do_not_run_ignored_tests() { - fn f() { + fn f() -> Result<(), String> { panic!(); } let desc = TestDescAndFn { @@ -109,7 +109,9 @@ pub fn do_not_run_ignored_tests() { #[test] pub fn ignored_tests_result_in_ignored() { - fn f() {} + fn f() -> Result<(), String> { + Ok(()) + } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), @@ -132,7 +134,7 @@ pub fn ignored_tests_result_in_ignored() { #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic() { - fn f() { + fn f() -> Result<(), String> { panic!(); } let desc = TestDescAndFn { @@ -157,7 +159,7 @@ fn test_should_panic() { #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_good_message() { - fn f() { + fn f() -> Result<(), String> { panic!("an error message"); } let desc = TestDescAndFn { @@ -183,7 +185,7 @@ fn test_should_panic_good_message() { #[cfg(not(target_os = "emscripten"))] fn test_should_panic_bad_message() { use crate::tests::TrFailedMsg; - fn f() { + fn f() -> Result<(), String> { panic!("an error message"); } let expected = "foobar"; @@ -214,7 +216,7 @@ fn test_should_panic_bad_message() { fn test_should_panic_non_string_message_type() { use crate::tests::TrFailedMsg; use std::any::TypeId; - fn f() { + fn f() -> Result<(), String> { std::panic::panic_any(1i32); } let expected = "foobar"; @@ -249,7 +251,9 @@ fn test_should_panic_but_succeeds() { let should_panic_variants = [ShouldPanic::Yes, ShouldPanic::YesWithMessage("error message")]; for &should_panic in should_panic_variants.iter() { - fn f() {} + fn f() -> Result<(), String> { + Ok(()) + } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), @@ -283,7 +287,9 @@ fn test_should_panic_but_succeeds() { } fn report_time_test_template(report_time: bool) -> Option<TestExecTime> { - fn f() {} + fn f() -> Result<(), String> { + Ok(()) + } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), @@ -318,7 +324,9 @@ fn test_should_report_time() { } fn time_test_failure_template(test_type: TestType) -> TestResult { - fn f() {} + fn f() -> Result<(), String> { + Ok(()) + } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), @@ -480,7 +488,7 @@ pub fn exclude_should_panic_option() { no_run: false, test_type: TestType::Unknown, }, - testfn: DynTestFn(Box::new(move || {})), + testfn: DynTestFn(Box::new(move || Ok(()))), }); let filtered = filter_tests(&opts, tests); @@ -504,7 +512,7 @@ pub fn exact_filter_match() { no_run: false, test_type: TestType::Unknown, }, - testfn: DynTestFn(Box::new(move || {})), + testfn: DynTestFn(Box::new(move || Ok(()))), }) .collect() } @@ -580,7 +588,9 @@ fn sample_tests() -> Vec<TestDescAndFn> { "test::run_include_ignored_option".to_string(), "test::sort_tests".to_string(), ]; - fn testfn() {} + fn testfn() -> Result<(), String> { + Ok(()) + } let mut tests = Vec::new(); for name in &names { let test = TestDescAndFn { @@ -601,33 +611,6 @@ fn sample_tests() -> Vec<TestDescAndFn> { } #[test] -pub fn sort_tests() { - let mut opts = TestOpts::new(); - opts.run_tests = true; - - let tests = sample_tests(); - let filtered = filter_tests(&opts, tests); - - let expected = vec![ - "isize::test_pow".to_string(), - "isize::test_to_str".to_string(), - "sha1::test".to_string(), - "test::do_not_run_ignored_tests".to_string(), - "test::filter_for_ignored_option".to_string(), - "test::first_free_arg_should_be_a_filter".to_string(), - "test::ignored_tests_result_in_ignored".to_string(), - "test::parse_ignored_flag".to_string(), - "test::parse_include_ignored_flag".to_string(), - "test::run_include_ignored_option".to_string(), - "test::sort_tests".to_string(), - ]; - - for (a, b) in expected.iter().zip(filtered) { - assert_eq!(*a, b.desc.name.to_string()); - } -} - -#[test] pub fn shuffle_tests() { let mut opts = TestOpts::new(); opts.shuffle = true; @@ -717,21 +700,26 @@ pub fn test_metricmap_compare() { #[test] pub fn test_bench_once_no_iter() { - fn f(_: &mut Bencher) {} - bench::run_once(f); + fn f(_: &mut Bencher) -> Result<(), String> { + Ok(()) + } + bench::run_once(f).unwrap(); } #[test] pub fn test_bench_once_iter() { - fn f(b: &mut Bencher) { - b.iter(|| {}) + fn f(b: &mut Bencher) -> Result<(), String> { + b.iter(|| {}); + Ok(()) } - bench::run_once(f); + bench::run_once(f).unwrap(); } #[test] pub fn test_bench_no_iter() { - fn f(_: &mut Bencher) {} + fn f(_: &mut Bencher) -> Result<(), String> { + Ok(()) + } let (tx, rx) = channel(); @@ -751,8 +739,9 @@ pub fn test_bench_no_iter() { #[test] pub fn test_bench_iter() { - fn f(b: &mut Bencher) { - b.iter(|| {}) + fn f(b: &mut Bencher) -> Result<(), String> { + b.iter(|| {}); + Ok(()) } let (tx, rx) = channel(); @@ -821,3 +810,33 @@ fn should_sort_failures_before_printing_them() { let bpos = s.find("b").unwrap(); assert!(apos < bpos); } + +#[test] +#[cfg(not(target_os = "emscripten"))] +fn test_dyn_bench_returning_err_fails_when_run_as_test() { + fn f(_: &mut Bencher) -> Result<(), String> { + Result::Err("An error".into()) + } + let desc = TestDescAndFn { + desc: TestDesc { + name: StaticTestName("whatever"), + ignore: false, + ignore_message: None, + should_panic: ShouldPanic::No, + compile_fail: false, + no_run: false, + test_type: TestType::Unknown, + }, + testfn: DynBenchFn(Box::new(f)), + }; + let (tx, rx) = channel(); + let notify = move |event: TestEvent| { + if let TestEvent::TeResult(result) = event { + tx.send(result).unwrap(); + } + Ok(()) + }; + run_tests(&TestOpts { run_tests: true, ..TestOpts::new() }, vec![desc], notify).unwrap(); + let result = rx.recv().unwrap().result; + assert_eq!(result, TrFailed); +} diff --git a/library/test/src/types.rs b/library/test/src/types.rs index ffb1efe18..888afff79 100644 --- a/library/test/src/types.rs +++ b/library/test/src/types.rs @@ -75,14 +75,15 @@ impl fmt::Display for TestName { } // A function that runs a test. If the function returns successfully, -// the test succeeds; if the function panics then the test fails. We -// may need to come up with a more clever definition of test in order -// to support isolation of tests into threads. +// the test succeeds; if the function panics or returns Result::Err +// then the test fails. We may need to come up with a more clever +// definition of test in order to support isolation of tests into +// threads. pub enum TestFn { - StaticTestFn(fn()), - StaticBenchFn(fn(&mut Bencher)), - DynTestFn(Box<dyn FnOnce() + Send>), - DynBenchFn(Box<dyn Fn(&mut Bencher) + Send>), + StaticTestFn(fn() -> Result<(), String>), + StaticBenchFn(fn(&mut Bencher) -> Result<(), String>), + DynTestFn(Box<dyn FnOnce() -> Result<(), String> + Send>), + DynBenchFn(Box<dyn Fn(&mut Bencher) -> Result<(), String> + Send>), } impl TestFn { |