From 2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:19:50 +0200 Subject: Merging upstream version 1.69.0+dfsg1. Signed-off-by: Daniel Baumann --- library/core/benches/array.rs | 19 + library/core/benches/char/methods.rs | 40 +- library/core/benches/lib.rs | 1 + .../core/benches/num/flt2dec/strategy/dragon.rs | 18 +- library/core/benches/num/flt2dec/strategy/grisu.rs | 18 +- library/core/src/alloc/global.rs | 9 +- library/core/src/any.rs | 2 +- library/core/src/array/drain.rs | 76 + library/core/src/array/equality.rs | 73 +- library/core/src/array/mod.rs | 273 ++- library/core/src/cell.rs | 35 +- library/core/src/cell/once.rs | 4 + library/core/src/char/convert.rs | 28 +- library/core/src/char/decode.rs | 6 +- library/core/src/char/methods.rs | 24 +- library/core/src/char/mod.rs | 2 +- library/core/src/cmp.rs | 81 +- library/core/src/cmp/bytewise.rs | 83 + library/core/src/const_closure.rs | 78 - library/core/src/convert/mod.rs | 2 +- library/core/src/error.rs | 19 +- library/core/src/ffi/c_str.rs | 27 +- library/core/src/ffi/mod.rs | 1 + library/core/src/fmt/mod.rs | 87 +- library/core/src/fmt/rt/v1.rs | 18 + library/core/src/future/mod.rs | 45 - library/core/src/hint.rs | 3 +- library/core/src/intrinsics.rs | 6 +- library/core/src/intrinsics/mir.rs | 18 +- library/core/src/iter/adapters/array_chunks.rs | 20 +- library/core/src/iter/adapters/by_ref_sized.rs | 19 +- library/core/src/iter/adapters/cloned.rs | 15 +- library/core/src/iter/adapters/filter_map.rs | 2 +- library/core/src/iter/adapters/flatten.rs | 4 +- library/core/src/iter/adapters/map.rs | 15 +- library/core/src/iter/adapters/zip.rs | 9 +- library/core/src/iter/mod.rs | 8 +- library/core/src/iter/range.rs | 1 - library/core/src/iter/traits/double_ended.rs | 2 +- library/core/src/iter/traits/exact_size.rs | 10 + library/core/src/iter/traits/iterator.rs | 121 +- library/core/src/iter/traits/marker.rs | 11 + library/core/src/iter/traits/mod.rs | 3 + library/core/src/iter/traits/unchecked_iterator.rs | 36 + library/core/src/lib.rs | 15 +- library/core/src/marker.rs | 73 +- library/core/src/mem/mod.rs | 1 - library/core/src/net/display_buffer.rs | 40 + library/core/src/net/ip_addr.rs | 2070 ++++++++++++++++++++ library/core/src/net/mod.rs | 24 + library/core/src/net/parser.rs | 498 +++++ library/core/src/net/socket_addr.rs | 664 +++++++ library/core/src/num/dec2flt/mod.rs | 17 +- library/core/src/num/error.rs | 57 +- library/core/src/num/int_log10.rs | 8 + library/core/src/num/int_macros.rs | 80 +- library/core/src/num/mod.rs | 396 +++- library/core/src/num/shells/i128.rs | 4 +- library/core/src/num/shells/i16.rs | 4 +- library/core/src/num/shells/i32.rs | 4 +- library/core/src/num/shells/i64.rs | 4 +- library/core/src/num/shells/i8.rs | 4 +- library/core/src/num/shells/isize.rs | 4 +- library/core/src/num/shells/u128.rs | 4 +- library/core/src/num/shells/u16.rs | 4 +- library/core/src/num/shells/u32.rs | 4 +- library/core/src/num/shells/u64.rs | 4 +- library/core/src/num/shells/u8.rs | 4 +- library/core/src/num/shells/usize.rs | 4 +- library/core/src/num/uint_macros.rs | 63 +- library/core/src/ops/arith.rs | 22 +- library/core/src/ops/control_flow.rs | 43 - library/core/src/ops/range.rs | 98 +- library/core/src/ops/try_trait.rs | 26 +- library/core/src/option.rs | 141 +- library/core/src/panicking.rs | 5 +- library/core/src/primitive_docs.rs | 6 +- library/core/src/ptr/alignment.rs | 2 +- library/core/src/ptr/const_ptr.rs | 22 +- library/core/src/ptr/mut_ptr.rs | 24 +- library/core/src/result.rs | 48 +- library/core/src/slice/cmp.rs | 27 +- library/core/src/slice/iter.rs | 4 +- library/core/src/slice/iter/macros.rs | 9 + library/core/src/slice/memchr.rs | 15 +- library/core/src/slice/mod.rs | 70 +- library/core/src/slice/sort.rs | 526 ++--- library/core/src/str/iter.rs | 10 +- library/core/src/str/traits.rs | 4 - library/core/src/sync/atomic.rs | 196 +- library/core/src/task/poll.rs | 5 + library/core/src/task/wake.rs | 2 +- library/core/tests/array.rs | 25 + library/core/tests/iter/adapters/mod.rs | 2 +- library/core/tests/iter/range.rs | 1 - library/core/tests/iter/traits/iterator.rs | 3 + library/core/tests/lib.rs | 6 + library/core/tests/net/ip_addr.rs | 1035 ++++++++++ library/core/tests/net/mod.rs | 13 + library/core/tests/net/parser.rs | 149 ++ library/core/tests/net/socket_addr.rs | 233 +++ library/core/tests/num/dec2flt/mod.rs | 2 +- library/core/tests/ptr.rs | 6 +- 103 files changed, 6864 insertions(+), 1342 deletions(-) create mode 100644 library/core/benches/array.rs create mode 100644 library/core/src/array/drain.rs create mode 100644 library/core/src/cmp/bytewise.rs delete mode 100644 library/core/src/const_closure.rs create mode 100644 library/core/src/iter/traits/unchecked_iterator.rs create mode 100644 library/core/src/net/display_buffer.rs create mode 100644 library/core/src/net/ip_addr.rs create mode 100644 library/core/src/net/mod.rs create mode 100644 library/core/src/net/parser.rs create mode 100644 library/core/src/net/socket_addr.rs create mode 100644 library/core/tests/net/ip_addr.rs create mode 100644 library/core/tests/net/mod.rs create mode 100644 library/core/tests/net/parser.rs create mode 100644 library/core/tests/net/socket_addr.rs (limited to 'library/core') diff --git a/library/core/benches/array.rs b/library/core/benches/array.rs new file mode 100644 index 000000000..d8cc44d05 --- /dev/null +++ b/library/core/benches/array.rs @@ -0,0 +1,19 @@ +use test::black_box; +use test::Bencher; + +macro_rules! map_array { + ($func_name:ident, $start_item: expr, $map_item: expr, $arr_size: expr) => { + #[bench] + fn $func_name(b: &mut Bencher) { + let arr = [$start_item; $arr_size]; + b.iter(|| black_box(arr).map(|_| black_box($map_item))); + } + }; +} + +map_array!(map_8byte_8byte_8, 0u64, 1u64, 80); +map_array!(map_8byte_8byte_64, 0u64, 1u64, 640); +map_array!(map_8byte_8byte_256, 0u64, 1u64, 2560); + +map_array!(map_8byte_256byte_256, 0u64, [0u64; 4], 2560); +map_array!(map_256byte_8byte_256, [0u64; 4], 0u64, 2560); diff --git a/library/core/benches/char/methods.rs b/library/core/benches/char/methods.rs index 9408f83c3..5d4df1ac8 100644 --- a/library/core/benches/char/methods.rs +++ b/library/core/benches/char/methods.rs @@ -1,26 +1,26 @@ -use test::Bencher; +use test::{black_box, Bencher}; const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9']; const RADIX: [u32; 5] = [2, 8, 10, 16, 32]; #[bench] fn bench_to_digit_radix_2(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(2)).min()) } #[bench] fn bench_to_digit_radix_10(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(10)).min()) } #[bench] fn bench_to_digit_radix_16(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(16)).min()) } #[bench] fn bench_to_digit_radix_36(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(36)).min()) } #[bench] @@ -31,47 +31,59 @@ fn bench_to_digit_radix_var(b: &mut Bencher) { .cycle() .zip(RADIX.iter().cycle()) .take(10_000) - .map(|(c, radix)| c.to_digit(*radix)) + .map(|(c, radix)| black_box(c).to_digit(*radix)) .min() }) } #[bench] fn bench_to_ascii_uppercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_uppercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_uppercase()).min()) } #[bench] fn bench_to_ascii_lowercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_lowercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_lowercase()).min()) } #[bench] fn bench_ascii_mix_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_mix_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs index f1244d932..e4100120d 100644 --- a/library/core/benches/lib.rs +++ b/library/core/benches/lib.rs @@ -9,6 +9,7 @@ extern crate test; mod any; +mod array; mod ascii; mod char; mod fmt; diff --git a/library/core/benches/num/flt2dec/strategy/dragon.rs b/library/core/benches/num/flt2dec/strategy/dragon.rs index 319b9773e..377c99eff 100644 --- a/library/core/benches/num/flt2dec/strategy/dragon.rs +++ b/library/core/benches/num/flt2dec/strategy/dragon.rs @@ -1,14 +1,14 @@ use super::super::*; use core::num::flt2dec::strategy::dragon::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; #[bench] fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -17,7 +17,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -26,7 +26,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -35,7 +35,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -44,7 +44,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -53,7 +53,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -62,7 +62,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -71,6 +71,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/benches/num/flt2dec/strategy/grisu.rs b/library/core/benches/num/flt2dec/strategy/grisu.rs index 8e47a046c..6bea5e55d 100644 --- a/library/core/benches/num/flt2dec/strategy/grisu.rs +++ b/library/core/benches/num/flt2dec/strategy/grisu.rs @@ -1,7 +1,7 @@ use super::super::*; use core::num::flt2dec::strategy::grisu::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; pub fn decode_finite(v: T) -> Decoded { match decode(v).1 { @@ -15,7 +15,7 @@ fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -24,7 +24,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -33,7 +33,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -42,7 +42,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -51,7 +51,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -60,7 +60,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -69,7 +69,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -78,6 +78,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs index 1d80b8bf9..18da70451 100644 --- a/library/core/src/alloc/global.rs +++ b/library/core/src/alloc/global.rs @@ -203,7 +203,7 @@ pub unsafe trait GlobalAlloc { ptr } - /// Shrink or grow a block of memory to the given `new_size`. + /// Shrink or grow a block of memory to the given `new_size` in bytes. /// The block is described by the given `ptr` pointer and `layout`. /// /// If this returns a non-null pointer, then ownership of the memory block @@ -211,10 +211,11 @@ pub unsafe trait GlobalAlloc { /// Any access to the old `ptr` is Undefined Behavior, even if the /// allocation remained in-place. The newly returned pointer is the only valid pointer /// for accessing this memory now. + /// /// The new memory block is allocated with `layout`, - /// but with the `size` updated to `new_size`. This new layout must be - /// used when deallocating the new memory block with `dealloc`. The range - /// `0..min(layout.size(), new_size)` of the new memory block is + /// but with the `size` updated to `new_size` in bytes. + /// This new layout must be used when deallocating the new memory block with `dealloc`. + /// The range `0..min(layout.size(), new_size)` of the new memory block is /// guaranteed to have the same values as the original block. /// /// If this method returns null, then ownership of the memory diff --git a/library/core/src/any.rs b/library/core/src/any.rs index c0fb0d993..c27646b8f 100644 --- a/library/core/src/any.rs +++ b/library/core/src/any.rs @@ -56,7 +56,7 @@ //! let value_any = value as &dyn Any; //! //! // Try to convert our value to a `String`. If successful, we want to -//! // output the String`'s length as well as its value. If not, it's a +//! // output the `String`'s length as well as its value. If not, it's a //! // different type: just print it out unadorned. //! match value_any.downcast_ref::() { //! Some(as_string) => { diff --git a/library/core/src/array/drain.rs b/library/core/src/array/drain.rs new file mode 100644 index 000000000..5fadf907b --- /dev/null +++ b/library/core/src/array/drain.rs @@ -0,0 +1,76 @@ +use crate::iter::{TrustedLen, UncheckedIterator}; +use crate::mem::ManuallyDrop; +use crate::ptr::drop_in_place; +use crate::slice; + +/// A situationally-optimized version of `array.into_iter().for_each(func)`. +/// +/// [`crate::array::IntoIter`]s are great when you need an owned iterator, but +/// storing the entire array *inside* the iterator like that can sometimes +/// pessimize code. Notable, it can be more bytes than you really want to move +/// around, and because the array accesses index into it SRoA has a harder time +/// optimizing away the type than it does iterators that just hold a couple pointers. +/// +/// Thus this function exists, which gives a way to get *moved* access to the +/// elements of an array using a small iterator -- no bigger than a slice iterator. +/// +/// The function-taking-a-closure structure makes it safe, as it keeps callers +/// from looking at already-dropped elements. +pub(crate) fn drain_array_with( + array: [T; N], + func: impl for<'a> FnOnce(Drain<'a, T>) -> R, +) -> R { + let mut array = ManuallyDrop::new(array); + // SAFETY: Now that the local won't drop it, it's ok to construct the `Drain` which will. + let drain = Drain(array.iter_mut()); + func(drain) +} + +/// See [`drain_array_with`] -- this is `pub(crate)` only so it's allowed to be +/// mentioned in the signature of that method. (Otherwise it hits `E0446`.) +// INVARIANT: It's ok to drop the remainder of the inner iterator. +pub(crate) struct Drain<'a, T>(slice::IterMut<'a, T>); + +impl Drop for Drain<'_, T> { + fn drop(&mut self) { + // SAFETY: By the type invariant, we're allowed to drop all these. + unsafe { drop_in_place(self.0.as_mut_slice()) } + } +} + +impl Iterator for Drain<'_, T> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + let p: *const T = self.0.next()?; + // SAFETY: The iterator was already advanced, so we won't drop this later. + Some(unsafe { p.read() }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let n = self.len(); + (n, Some(n)) + } +} + +impl ExactSizeIterator for Drain<'_, T> { + #[inline] + fn len(&self) -> usize { + self.0.len() + } +} + +// SAFETY: This is a 1:1 wrapper for a slice iterator, which is also `TrustedLen`. +unsafe impl TrustedLen for Drain<'_, T> {} + +impl UncheckedIterator for Drain<'_, T> { + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Drain` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let p: *const T = unsafe { self.0.next_unchecked() }; + // SAFETY: The iterator was already advanced, so we won't drop this later. + unsafe { p.read() } + } +} diff --git a/library/core/src/array/equality.rs b/library/core/src/array/equality.rs index b2c895f88..d749865f7 100644 --- a/library/core/src/array/equality.rs +++ b/library/core/src/array/equality.rs @@ -1,6 +1,5 @@ +use crate::cmp::BytewiseEq; use crate::convert::TryInto; -use crate::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize}; -use crate::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}; #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq<[B; N]> for [A; N] @@ -144,74 +143,14 @@ impl, Other, const N: usize> SpecArrayEq for T { } } -impl, U, const N: usize> SpecArrayEq for T { +impl, U, const N: usize> SpecArrayEq for T { fn spec_eq(a: &[T; N], b: &[U; N]) -> bool { - // SAFETY: This is why `IsRawEqComparable` is an `unsafe trait`. - unsafe { - let b = &*b.as_ptr().cast::<[T; N]>(); - crate::intrinsics::raw_eq(a, b) - } + // SAFETY: Arrays are compared element-wise, and don't add any padding + // between elements, so when the elements are `BytewiseEq`, we can + // compare the entire array at once. + unsafe { crate::intrinsics::raw_eq(a, crate::mem::transmute(b)) } } fn spec_ne(a: &[T; N], b: &[U; N]) -> bool { !Self::spec_eq(a, b) } } - -/// `U` exists on here mostly because `min_specialization` didn't let me -/// repeat the `T` type parameter in the above specialization, so instead -/// the `T == U` constraint comes from the impls on this. -/// # Safety -/// - Neither `Self` nor `U` has any padding. -/// - `Self` and `U` have the same layout. -/// - `Self: PartialEq` is byte-wise (this means no floats, among other things) -#[rustc_specialization_trait] -unsafe trait IsRawEqComparable: PartialEq {} - -macro_rules! is_raw_eq_comparable { - ($($t:ty),+ $(,)?) => {$( - unsafe impl IsRawEqComparable<$t> for $t {} - )+}; -} - -// SAFETY: All the ordinary integer types have no padding, and are not pointers. -is_raw_eq_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); - -// SAFETY: bool and char have *niches*, but no *padding* (and these are not pointer types), so this -// is sound -is_raw_eq_comparable!(bool, char); - -// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers, -// and they compare like their underlying numeric type. -is_raw_eq_comparable!( - NonZeroU8, - NonZeroU16, - NonZeroU32, - NonZeroU64, - NonZeroU128, - NonZeroUsize, - NonZeroI8, - NonZeroI16, - NonZeroI32, - NonZeroI64, - NonZeroI128, - NonZeroIsize, -); - -// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus -// are also safe to equality-compare bitwise inside an `Option`. -// The way `PartialOrd` is defined for `Option` means that this wouldn't work -// for `<` or `>` on the signed types, but since we only do `==` it's fine. -is_raw_eq_comparable!( - Option, - Option, - Option, - Option, - Option, - Option, - Option, - Option, - Option, - Option, - Option, - Option, -); diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 2825e0bbb..1643842d6 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -10,16 +10,19 @@ use crate::convert::{Infallible, TryFrom}; use crate::error::Error; use crate::fmt; use crate::hash::{self, Hash}; -use crate::iter::TrustedLen; +use crate::iter::UncheckedIterator; use crate::mem::{self, MaybeUninit}; use crate::ops::{ ChangeOutputType, ControlFlow, FromResidual, Index, IndexMut, NeverShortCircuit, Residual, Try, }; use crate::slice::{Iter, IterMut}; +mod drain; mod equality; mod iter; +pub(crate) use drain::drain_array_with; + #[stable(feature = "array_value_iter", since = "1.51.0")] pub use iter::IntoIter; @@ -52,16 +55,11 @@ pub use iter::IntoIter; /// ``` #[inline] #[stable(feature = "array_from_fn", since = "1.63.0")] -pub fn from_fn(mut cb: F) -> [T; N] +pub fn from_fn(cb: F) -> [T; N] where F: FnMut(usize) -> T, { - let mut idx = 0; - [(); N].map(|_| { - let res = cb(idx); - idx += 1; - res - }) + try_from_fn(NeverShortCircuit::wrap_mut_1(cb)).0 } /// Creates an array `[T; N]` where each fallible array element `T` is returned by the `cb` call. @@ -101,9 +99,14 @@ where R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut (0..N).map(cb)) } + let mut array = MaybeUninit::uninit_array::(); + match try_from_fn_erased(&mut array, cb) { + ControlFlow::Break(r) => FromResidual::from_residual(r), + ControlFlow::Continue(()) => { + // SAFETY: All elements of the array were populated. + try { unsafe { MaybeUninit::array_assume_init(array) } } + } + } } /// Converts a reference to `T` into a reference to an array of length 1 (without copying). @@ -131,7 +134,8 @@ pub struct TryFromSliceError(()); impl fmt::Display for TryFromSliceError { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.__description(), f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -139,20 +143,6 @@ impl fmt::Display for TryFromSliceError { impl Error for TryFromSliceError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -impl TryFromSliceError { - #[unstable( - feature = "array_error_internals", - reason = "available through Error trait and this method should not \ - be exposed publicly", - issue = "none" - )] - #[inline] - #[doc(hidden)] - pub fn __description(&self) -> &str { "could not convert slice to array" } } @@ -427,9 +417,7 @@ trait SpecArrayClone: Clone { impl SpecArrayClone for T { #[inline] default fn clone(array: &[T; N]) -> [T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut array.iter().cloned()) } + from_trusted_iterator(array.iter().cloned()) } } @@ -513,9 +501,7 @@ impl [T; N] { where F: FnMut(T) -> U, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + self.try_map(NeverShortCircuit::wrap_mut_1(f)).0 } /// A fallible function `f` applied to each element on array `self` in order to @@ -552,9 +538,7 @@ impl [T; N] { R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f))) } /// 'Zips up' two arrays into a single array of pairs. @@ -575,11 +559,9 @@ impl [T; N] { /// ``` #[unstable(feature = "array_zip", issue = "80094")] pub fn zip(self, rhs: [U; N]) -> [(T, U); N] { - let mut iter = IntoIterator::into_iter(self).zip(rhs); - - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut iter) } + drain_array_with(self, |lhs| { + drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs))) + }) } /// Returns a slice containing the entire array. Equivalent to `&s[..]`. @@ -626,9 +608,7 @@ impl [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_ref(&self) -> [&T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter()) } + from_trusted_iterator(self.iter()) } /// Borrows each element mutably and returns an array of mutable references @@ -648,9 +628,7 @@ impl [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_mut(&mut self) -> [&mut T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter_mut()) } + from_trusted_iterator(self.iter_mut()) } /// Divides one array reference into two at an index. @@ -810,105 +788,71 @@ impl [T; N] { } } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, this function exhibits undefined behavior. +/// Populate an array from the first `N` elements of `iter` /// -/// See [`try_collect_into_array`] for more information. +/// # Panics /// +/// If the iterator doesn't actually have enough items. /// -/// # Safety -/// -/// It is up to the caller to guarantee that `iter` yields at least `N` items. -/// Violating this condition causes undefined behavior. -unsafe fn try_collect_into_array_unchecked(iter: &mut I) -> R::TryType -where - // Note: `TrustedLen` here is somewhat of an experiment. This is just an - // internal function, so feel free to remove if this bound turns out to be a - // bad idea. In that case, remember to also remove the lower bound - // `debug_assert!` below! - I: Iterator + TrustedLen, - I::Item: Try, - R: Residual<[T; N]>, -{ - debug_assert!(N <= iter.size_hint().1.unwrap_or(usize::MAX)); - debug_assert!(N <= iter.size_hint().0); - - // SAFETY: covered by the function contract. - unsafe { try_collect_into_array(iter).unwrap_unchecked() } +/// By depending on `TrustedLen`, however, we can do that check up-front (where +/// it easily optimizes away) so it doesn't impact the loop that fills the array. +#[inline] +fn from_trusted_iterator(iter: impl UncheckedIterator) -> [T; N] { + try_from_trusted_iterator(iter.map(NeverShortCircuit)).0 } -// Infallible version of `try_collect_into_array_unchecked`. -unsafe fn collect_into_array_unchecked(iter: &mut I) -> [I::Item; N] +#[inline] +fn try_from_trusted_iterator( + iter: impl UncheckedIterator, +) -> ChangeOutputType where - I: Iterator + TrustedLen, + R: Try, + R::Residual: Residual<[T; N]>, { - let mut map = iter.map(NeverShortCircuit); - - // SAFETY: The same safety considerations w.r.t. the iterator length - // apply for `try_collect_into_array_unchecked` as for - // `collect_into_array_unchecked` - match unsafe { try_collect_into_array_unchecked(&mut map) } { - NeverShortCircuit(array) => array, + assert!(iter.size_hint().0 >= N); + fn next(mut iter: impl UncheckedIterator) -> impl FnMut(usize) -> T { + move |_| { + // SAFETY: We know that `from_fn` will call this at most N times, + // and we checked to ensure that we have at least that many items. + unsafe { iter.next_unchecked() } + } } + + try_from_fn(next(iter)) } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, `Err` is returned containing an iterator over -/// the already yielded items. +/// Version of [`try_from_fn`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. /// -/// Since the iterator is passed as a mutable reference and this function calls -/// `next` at most `N` times, the iterator can still be used afterwards to -/// retrieve the remaining items. +/// This takes a generator rather than an iterator so that *at the type level* +/// it never needs to worry about running out of items. When combined with +/// an infallible `Try` type, that means the loop canonicalizes easily, allowing +/// it to optimize well. /// -/// If `iter.next()` panicks, all items already yielded by the iterator are -/// dropped. +/// It would be *possible* to unify this and [`iter_next_chunk_erased`] into one +/// function that does the union of both things, but last time it was that way +/// it resulted in poor codegen from the "are there enough source items?" checks +/// not optimizing away. So if you give it a shot, make sure to watch what +/// happens in the codegen tests. #[inline] -fn try_collect_into_array( - iter: &mut I, -) -> Result> +fn try_from_fn_erased( + buffer: &mut [MaybeUninit], + mut generator: impl FnMut(usize) -> R, +) -> ControlFlow where - I: Iterator, - I::Item: Try, - R: Residual<[T; N]>, + R: Try, { - if N == 0 { - // SAFETY: An empty array is always inhabited and has no validity invariants. - return Ok(Try::from_output(unsafe { mem::zeroed() })); - } + let mut guard = Guard { array_mut: buffer, initialized: 0 }; - let mut array = MaybeUninit::uninit_array::(); - let mut guard = Guard { array_mut: &mut array, initialized: 0 }; - - for _ in 0..N { - match iter.next() { - Some(item_rslt) => { - let item = match item_rslt.branch() { - ControlFlow::Break(r) => { - return Ok(FromResidual::from_residual(r)); - } - ControlFlow::Continue(elem) => elem, - }; - - // SAFETY: `guard.initialized` starts at 0, which means push can be called - // at most N times, which this loop does. - unsafe { - guard.push_unchecked(item); - } - } - None => { - let alive = 0..guard.initialized; - mem::forget(guard); - // SAFETY: `array` was initialized with exactly `initialized` - // number of elements. - return Err(unsafe { IntoIter::new_unchecked(array, alive) }); - } - } + while guard.initialized < guard.array_mut.len() { + let item = generator(guard.initialized).branch()?; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; } mem::forget(guard); - // SAFETY: All elements of the array were populated in the loop above. - let output = unsafe { array.transpose().assume_init() }; - Ok(Try::from_output(output)) + ControlFlow::Continue(()) } /// Panic guard for incremental initialization of arrays. @@ -922,14 +866,14 @@ where /// /// To minimize indirection fields are still pub but callers should at least use /// `push_unchecked` to signal that something unsafe is going on. -pub(crate) struct Guard<'a, T, const N: usize> { +struct Guard<'a, T> { /// The array to be initialized. - pub array_mut: &'a mut [MaybeUninit; N], + pub array_mut: &'a mut [MaybeUninit], /// The number of items that have been initialized so far. pub initialized: usize, } -impl Guard<'_, T, N> { +impl Guard<'_, T> { /// Adds an item to the array and updates the initialized item counter. /// /// # Safety @@ -947,28 +891,73 @@ impl Guard<'_, T, N> { } } -impl Drop for Guard<'_, T, N> { +impl Drop for Guard<'_, T> { fn drop(&mut self) { - debug_assert!(self.initialized <= N); + debug_assert!(self.initialized <= self.array_mut.len()); // SAFETY: this slice will contain only initialized objects. unsafe { crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut( - &mut self.array_mut.get_unchecked_mut(..self.initialized), + self.array_mut.get_unchecked_mut(..self.initialized), )); } } } -/// Returns the next chunk of `N` items from the iterator or errors with an -/// iterator over the remainder. Used for `Iterator::next_chunk`. +/// Pulls `N` items from `iter` and returns them as an array. If the iterator +/// yields fewer than `N` items, `Err` is returned containing an iterator over +/// the already yielded items. +/// +/// Since the iterator is passed as a mutable reference and this function calls +/// `next` at most `N` times, the iterator can still be used afterwards to +/// retrieve the remaining items. +/// +/// If `iter.next()` panicks, all items already yielded by the iterator are +/// dropped. +/// +/// Used for [`Iterator::next_chunk`]. +#[inline] +pub(crate) fn iter_next_chunk( + iter: &mut impl Iterator, +) -> Result<[T; N], IntoIter> { + let mut array = MaybeUninit::uninit_array::(); + let r = iter_next_chunk_erased(&mut array, iter); + match r { + Ok(()) => { + // SAFETY: All elements of `array` were populated. + Ok(unsafe { MaybeUninit::array_assume_init(array) }) + } + Err(initialized) => { + // SAFETY: Only the first `initialized` elements were populated + Err(unsafe { IntoIter::new_unchecked(array, 0..initialized) }) + } + } +} + +/// Version of [`iter_next_chunk`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. +/// +/// Unfortunately this loop has two exit conditions, the buffer filling up +/// or the iterator running out of items, making it tend to optimize poorly. #[inline] -pub(crate) fn iter_next_chunk( - iter: &mut I, -) -> Result<[I::Item; N], IntoIter> -where - I: Iterator, -{ - let mut map = iter.map(NeverShortCircuit); - try_collect_into_array(&mut map).map(|NeverShortCircuit(arr)| arr) +fn iter_next_chunk_erased( + buffer: &mut [MaybeUninit], + iter: &mut impl Iterator, +) -> Result<(), usize> { + let mut guard = Guard { array_mut: buffer, initialized: 0 }; + while guard.initialized < guard.array_mut.len() { + let Some(item) = iter.next() else { + // Unlike `try_from_fn_erased`, we want to keep the partial results, + // so we need to defuse the guard instead of using `?`. + let initialized = guard.initialized; + mem::forget(guard); + return Err(initialized) + }; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; + } + + mem::forget(guard); + Ok(()) } diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index 129213fde..897d03595 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -196,7 +196,7 @@ use crate::cmp::Ordering; use crate::fmt::{self, Debug, Display}; use crate::marker::{PhantomData, Unsize}; use crate::mem; -use crate::ops::{CoerceUnsized, Deref, DerefMut}; +use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn}; use crate::ptr::{self, NonNull}; mod lazy; @@ -571,6 +571,16 @@ impl Cell { #[unstable(feature = "coerce_unsized", issue = "18598")] impl, U> CoerceUnsized> for Cell {} +// Allow types that wrap `Cell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `Cell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: Cell<&Self>` won't work +// `self: CellWrapper` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl, U> DispatchFromDyn> for Cell {} + impl Cell<[T]> { /// Returns a `&[Cell]` from a `&Cell<[T]>` /// @@ -622,7 +632,7 @@ pub struct RefCell { // Stores the location of the earliest currently active borrow. // This gets updated whenever we go from having zero borrows // to having a single borrow. When a borrow occurs, this gets included - // in the generated `BorrowError/`BorrowMutError` + // in the generated `BorrowError`/`BorrowMutError` #[cfg(feature = "debug_refcell")] borrowed_at: Cell>>, value: UnsafeCell, @@ -2078,6 +2088,16 @@ impl const From for UnsafeCell { #[unstable(feature = "coerce_unsized", issue = "18598")] impl, U> CoerceUnsized> for UnsafeCell {} +// Allow types that wrap `UnsafeCell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `UnsafeCell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: UnsafeCell<&Self>` won't work +// `self: UnsafeCellWrapper` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl, U> DispatchFromDyn> for UnsafeCell {} + /// [`UnsafeCell`], but [`Sync`]. /// /// This is just an `UnsafeCell`, except it implements `Sync` @@ -2169,6 +2189,17 @@ impl const From for SyncUnsafeCell { //#[unstable(feature = "sync_unsafe_cell", issue = "95439")] impl, U> CoerceUnsized> for SyncUnsafeCell {} +// Allow types that wrap `SyncUnsafeCell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `SyncUnsafeCell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: SyncUnsafeCell<&Self>` won't work +// `self: SyncUnsafeCellWrapper` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +//#[unstable(feature = "sync_unsafe_cell", issue = "95439")] +impl, U> DispatchFromDyn> for SyncUnsafeCell {} + #[allow(unused)] fn assert_coerce_unsized( a: UnsafeCell<&i32>, diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs index 7757068a4..f74e563f1 100644 --- a/library/core/src/cell/once.rs +++ b/library/core/src/cell/once.rs @@ -298,3 +298,7 @@ impl const From for OnceCell { OnceCell { inner: UnsafeCell::new(Some(value)) } } } + +// Just like for `Cell` this isn't needed, but results in nicer error messages. +#[unstable(feature = "once_cell", issue = "74465")] +impl !Sync for OnceCell {} diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index f1a51a550..136bbcb8b 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -2,6 +2,7 @@ use crate::char::TryFromCharError; use crate::convert::TryFrom; +use crate::error::Error; use crate::fmt; use crate::mem::transmute; use crate::str::FromStr; @@ -150,14 +151,16 @@ pub struct ParseCharError { kind: CharErrorKind, } -impl ParseCharError { - #[unstable( - feature = "char_error_internals", - reason = "this method should not be available publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum CharErrorKind { + EmptyString, + TooManyChars, +} + +#[stable(feature = "char_from_str", since = "1.20.0")] +impl Error for ParseCharError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { CharErrorKind::EmptyString => "cannot parse char from empty string", CharErrorKind::TooManyChars => "too many characters in string", @@ -165,16 +168,11 @@ impl ParseCharError { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum CharErrorKind { - EmptyString, - TooManyChars, -} - #[stable(feature = "char_from_str", since = "1.20.0")] impl fmt::Display for ParseCharError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs index eeb088030..dbfe251f2 100644 --- a/library/core/src/char/decode.rs +++ b/library/core/src/char/decode.rs @@ -3,8 +3,6 @@ use crate::error::Error; use crate::fmt; -use super::from_u32_unchecked; - /// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. /// /// This `struct` is created by the [`decode_utf16`] method on [`char`]. See its @@ -49,7 +47,7 @@ impl> Iterator for DecodeUtf16 { if !u.is_utf16_surrogate() { // SAFETY: not a surrogate - Some(Ok(unsafe { from_u32_unchecked(u as u32) })) + Some(Ok(unsafe { char::from_u32_unchecked(u as u32) })) } else if u >= 0xDC00 { // a trailing surrogate Some(Err(DecodeUtf16Error { code: u })) @@ -69,7 +67,7 @@ impl> Iterator for DecodeUtf16 { // all ok, so lets decode it. let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000; // SAFETY: we checked that it's a legal unicode value - Some(Ok(unsafe { from_u32_unchecked(c) })) + Some(Ok(unsafe { char::from_u32_unchecked(c) })) } } diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index 3e7383b4c..9bc97ea0b 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -53,15 +53,13 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char::decode_utf16; - /// /// // 𝄞music /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) + /// char::decode_utf16(v) /// .map(|r| r.map_err(|e| e.unpaired_surrogate())) /// .collect::>(), /// vec![ @@ -77,16 +75,14 @@ impl char { /// A lossy decoder can be obtained by replacing `Err` results with the replacement character: /// /// ``` - /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; - /// /// // 𝄞music /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) - /// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)) + /// char::decode_utf16(v) + /// .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) /// .collect::(), /// "𝄞mus�ic�" /// ); @@ -123,8 +119,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x2764); /// /// assert_eq!(Some('❤'), c); @@ -133,8 +127,6 @@ impl char { /// Returning `None` when the input is not a valid `char`: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x110000); /// /// assert_eq!(None, c); @@ -176,8 +168,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = unsafe { char::from_u32_unchecked(0x2764) }; /// /// assert_eq!('❤', c); @@ -210,8 +200,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(4, 10); /// /// assert_eq!(Some('4'), c); @@ -225,8 +213,6 @@ impl char { /// Returning `None` when the input is not a digit: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(20, 10); /// /// assert_eq!(None, c); @@ -235,8 +221,6 @@ impl char { /// Passing a large radix, causing a panic: /// /// ```should_panic - /// use std::char; - /// /// // this panics /// let _c = char::from_digit(1, 37); /// ``` @@ -1786,7 +1770,7 @@ pub fn encode_utf16_raw(mut code: u32, dst: &mut [u16]) -> &mut [u16] { } else { panic!( "encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", - from_u32_unchecked(code).len_utf16(), + char::from_u32_unchecked(code).len_utf16(), code, dst.len(), ) diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index af98059cf..8ec78e887 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -189,7 +189,7 @@ impl Iterator for EscapeUnicode { } EscapeUnicodeState::Value => { let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; - let c = from_digit(hex_digit, 16).unwrap(); + let c = char::from_digit(hex_digit, 16).unwrap(); if self.hex_digit_idx == 0 { self.state = EscapeUnicodeState::RightBrace; } else { diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index a7d6fec7d..068637d1a 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -22,7 +22,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use crate::const_closure::ConstFnMutClosure; +mod bytewise; +pub(crate) use bytewise::BytewiseEq; + use crate::marker::Destruct; use self::Ordering::*; @@ -798,12 +800,6 @@ pub trait Ord: Eq + PartialOrd { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - max_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] match self.cmp(&other) { Ordering::Less | Ordering::Equal => other, Ordering::Greater => self, @@ -828,12 +824,6 @@ pub trait Ord: Eq + PartialOrd { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - min_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] match self.cmp(&other) { Ordering::Less | Ordering::Equal => self, Ordering::Greater => other, @@ -1200,12 +1190,7 @@ pub const fn min(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn min_by Ordering>(v1: T, v2: T, compare: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, -{ +pub fn min_by Ordering>(v1: T, v2: T, compare: F) -> T { match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v1, Ordering::Greater => v2, @@ -1227,30 +1212,8 @@ where #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn min_by_key K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, - K: ~const Destruct, -{ - cfg_if! { - if #[cfg(bootstrap)] { - const fn imp K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) - } else { - min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2))) - } - } +pub fn min_by_key K, K: Ord>(v1: T, v2: T, mut f: F) -> T { + min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) } /// Compares and returns the maximum of two values. @@ -1291,12 +1254,7 @@ pub const fn max(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn max_by Ordering>(v1: T, v2: T, compare: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, -{ +pub fn max_by Ordering>(v1: T, v2: T, compare: F) -> T { match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v2, Ordering::Greater => v1, @@ -1318,24 +1276,8 @@ where #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn max_by_key K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, - K: ~const Destruct, -{ - const fn imp K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - max_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) +pub fn max_by_key K, K: Ord>(v1: T, v2: T, mut f: F) -> T { + max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) } // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types @@ -1536,9 +1478,10 @@ mod impls { } } #[stable(feature = "rust1", since = "1.0.0")] - impl PartialOrd<&B> for &A + #[rustc_const_unstable(feature = "const_cmp", issue = "92391")] + impl const PartialOrd<&B> for &A where - A: PartialOrd, + A: ~const PartialOrd, { #[inline] fn partial_cmp(&self, other: &&B) -> Option { diff --git a/library/core/src/cmp/bytewise.rs b/library/core/src/cmp/bytewise.rs new file mode 100644 index 000000000..2548d9e24 --- /dev/null +++ b/library/core/src/cmp/bytewise.rs @@ -0,0 +1,83 @@ +use crate::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize}; +use crate::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}; + +/// Types where `==` & `!=` are equivalent to comparing their underlying bytes. +/// +/// Importantly, this means no floating-point types, as those have different +/// byte representations (like `-0` and `+0`) which compare as the same. +/// Since byte arrays are `Eq`, that implies that these types are probably also +/// `Eq`, but that's not technically required to use this trait. +/// +/// `Rhs` is *de facto* always `Self`, but the separate parameter is important +/// to avoid the `specializing impl repeats parameter` error when consuming this. +/// +/// # Safety +/// +/// - `Self` and `Rhs` have no padding. +/// - `Self` and `Rhs` have the same layout (size and alignment). +/// - Neither `Self` nor `Rhs` have provenance, so integer comparisons are correct. +/// - `>::{eq,ne}` are equivalent to comparing the bytes. +#[rustc_specialization_trait] +pub(crate) unsafe trait BytewiseEq: PartialEq + Sized {} + +macro_rules! is_bytewise_comparable { + ($($t:ty),+ $(,)?) => {$( + unsafe impl BytewiseEq for $t {} + )+}; +} + +// SAFETY: All the ordinary integer types have no padding, and are not pointers. +is_bytewise_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); + +// SAFETY: These have *niches*, but no *padding* and no *provenance*, +// so we can compare them directly. +is_bytewise_comparable!(bool, char, super::Ordering); + +// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers, +// and they compare like their underlying numeric type. +is_bytewise_comparable!( + NonZeroU8, + NonZeroU16, + NonZeroU32, + NonZeroU64, + NonZeroU128, + NonZeroUsize, + NonZeroI8, + NonZeroI16, + NonZeroI32, + NonZeroI64, + NonZeroI128, + NonZeroIsize, +); + +// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus +// are also safe to equality-compare bitwise inside an `Option`. +// The way `PartialOrd` is defined for `Option` means that this wouldn't work +// for `<` or `>` on the signed types, but since we only do `==` it's fine. +is_bytewise_comparable!( + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, +); + +macro_rules! is_bytewise_comparable_array_length { + ($($n:literal),+ $(,)?) => {$( + // SAFETY: Arrays have no padding between elements, so if the elements are + // `BytewiseEq`, then the whole array can be too. + unsafe impl, U> BytewiseEq<[U; $n]> for [T; $n] {} + )+}; +} + +// Frustratingly, this can't be made const-generic as it gets +// error: specializing impl repeats parameter `N` +// so just do it for a couple of plausibly-common ones. +is_bytewise_comparable_array_length!(0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64); diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs deleted file mode 100644 index 97900a486..000000000 --- a/library/core/src/const_closure.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::marker::Destruct; -use crate::marker::Tuple; - -/// Struct representing a closure with mutably borrowed data. -/// -/// Example: -/// ```no_build -/// #![feature(const_mut_refs)] -/// use crate::const_closure::ConstFnMutClosure; -/// const fn imp(state: &mut i32, (arg,): (i32,)) -> i32 { -/// *state += arg; -/// *state -/// } -/// let mut i = 5; -/// let mut cl = ConstFnMutClosure::new(&mut i, imp); -/// -/// assert!(7 == cl(2)); -/// assert!(8 == cl(1)); -/// ``` -pub(crate) struct ConstFnMutClosure { - /// The Data captured by the Closure. - /// Must be either a (mutable) reference or a tuple of (mutable) references. - pub data: CapturedData, - /// The Function of the Closure, must be: Fn(CapturedData, ClosureArgs) -> ClosureReturn - pub func: Function, -} -impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData, Function> { - /// Function for creating a new closure. - /// - /// `data` is the a mutable borrow of data that is captured from the environment. - /// If you want Data to be a tuple of mutable Borrows, the struct must be constructed manually. - /// - /// `func` is the function of the closure, it gets the data and a tuple of the arguments closure - /// and return the return value of the closure. - pub(crate) const fn new( - data: &'a mut CapturedData, - func: Function, - ) -> Self - where - Function: ~const Fn(&mut CapturedData, ClosureArguments) -> ClosureReturnValue, - { - Self { data, func } - } -} - -macro_rules! impl_fn_mut_tuple { - ($($var:ident)*) => { - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnOnce for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct, - { - type Output = ClosureReturnValue; - - extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output { - self.call_mut(args) - } - } - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnMut for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue, - { - extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output { - #[allow(non_snake_case)] - let ($($var),*) = &mut self.data; - (self.func)(($($var),*), args) - } - } - }; -} -impl_fn_mut_tuple!(A); -impl_fn_mut_tuple!(A B); -impl_fn_mut_tuple!(A B C); -impl_fn_mut_tuple!(A B C D); -impl_fn_mut_tuple!(A B C D E); diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs index f95b880df..805354be0 100644 --- a/library/core/src/convert/mod.rs +++ b/library/core/src/convert/mod.rs @@ -542,7 +542,7 @@ pub trait Into: Sized { #[const_trait] pub trait From: Sized { /// Converts to this type from the input type. - #[lang = "from"] + #[rustc_diagnostic_item = "from_fn"] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn from(value: T) -> Self; diff --git a/library/core/src/error.rs b/library/core/src/error.rs index 7152300ab..d4103183c 100644 --- a/library/core/src/error.rs +++ b/library/core/src/error.rs @@ -28,6 +28,7 @@ use crate::fmt::{Debug, Display}; #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "Error")] #[rustc_has_incoherent_inherent_impls] +#[cfg_attr(not(bootstrap), allow(multiple_supertrait_upcastable))] pub trait Error: Debug + Display { /// The lower-level source of this error, if any. /// @@ -485,26 +486,10 @@ impl Error for crate::char::CharTryFromError { } } -#[stable(feature = "char_from_str", since = "1.20.0")] -impl Error for crate::char::ParseCharError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "duration_checked_float", since = "1.66.0")] impl Error for crate::time::TryFromFloatSecsError {} -#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl Error for crate::ffi::FromBytesWithNulError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] impl Error for crate::ffi::FromBytesUntilNulError {} #[unstable(feature = "get_many_mut", issue = "104642")] diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 15dd9ea7e..fe8abdf7f 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -1,4 +1,5 @@ use crate::cmp::Ordering; +use crate::error::Error; use crate::ffi::c_char; use crate::fmt; use crate::intrinsics; @@ -129,10 +130,12 @@ impl FromBytesWithNulError { const fn not_nul_terminated() -> FromBytesWithNulError { FromBytesWithNulError { kind: FromBytesWithNulErrorKind::NotNulTerminated } } +} - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __description(&self) -> &str { +#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] +impl Error for FromBytesWithNulError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FromBytesWithNulErrorKind::InteriorNul(..) => { "data provided contains an interior nul byte" @@ -150,10 +153,10 @@ impl FromBytesWithNulError { /// This error is created by the [`CStr::from_bytes_until_nul`] method. /// #[derive(Clone, PartialEq, Eq, Debug)] -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] pub struct FromBytesUntilNulError(()); -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] impl fmt::Display for FromBytesUntilNulError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "data provided does not contain a nul") @@ -180,7 +183,7 @@ impl Default for &CStr { impl fmt::Display for FromBytesWithNulError { #[allow(deprecated, deprecated_in_future)] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.__description())?; + f.write_str(self.description())?; if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind { write!(f, " at byte pos {pos}")?; } @@ -306,8 +309,6 @@ impl CStr { /// /// # Examples /// ``` - /// #![feature(cstr_from_bytes_until_nul)] - /// /// use std::ffi::CStr; /// /// let mut buffer = [0u8; 16]; @@ -322,8 +323,9 @@ impl CStr { /// assert_eq!(c_str.to_str().unwrap(), "AAAAAAAA"); /// ``` /// - #[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] - #[rustc_const_unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] + #[rustc_allow_const_fn_unstable(const_slice_index)] + #[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] + #[rustc_const_stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] pub const fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> { let nul_pos = memchr::memchr(0, bytes); match nul_pos { @@ -455,6 +457,10 @@ impl CStr { /// to a contiguous region of memory terminated with a 0 byte to represent /// the end of the string. /// + /// The type of the returned pointer is + /// [`*const c_char`][crate::ffi::c_char], and whether it's + /// an alias for `*const i8` or `*const u8` is platform-specific. + /// /// **WARNING** /// /// The returned pointer is read-only; writing to it (including passing it @@ -468,6 +474,7 @@ impl CStr { /// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)] /// use std::ffi::CString; /// + /// // Do not do this: /// let ptr = CString::new("Hello").expect("CString::new failed").as_ptr(); /// unsafe { /// // `ptr` is dangling diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs index 76daceecd..27f665904 100644 --- a/library/core/src/ffi/mod.rs +++ b/library/core/src/ffi/mod.rs @@ -144,6 +144,7 @@ mod c_char_definition { ) ), all(target_os = "fuchsia", target_arch = "aarch64"), + all(target_os = "nto", target_arch = "aarch64"), target_os = "horizon" ))] { pub type c_char = u8; diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs index fa5073e33..c9821bf81 100644 --- a/library/core/src/fmt/mod.rs +++ b/library/core/src/fmt/mod.rs @@ -267,6 +267,7 @@ extern "C" { /// family of functions. It contains a function to format the given value. At /// compile time it is ensured that the function and the value have the correct /// types, and then this struct is used to canonicalize arguments to one type. +#[cfg_attr(not(bootstrap), lang = "format_argument")] #[derive(Copy, Clone)] #[allow(missing_debug_implementations)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")] @@ -279,6 +280,7 @@ pub struct ArgumentV1<'a> { /// This struct represents the unsafety of constructing an `Arguments`. /// It exists, rather than an unsafe function, in order to simplify the expansion /// of `format_args!(..)` and reduce the scope of the `unsafe` block. +#[cfg_attr(not(bootstrap), lang = "format_unsafe_arg")] #[allow(missing_debug_implementations)] #[doc(hidden)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")] @@ -473,8 +475,8 @@ impl<'a> Arguments<'a> { /// ``` /// /// [`format()`]: ../../std/fmt/fn.format.html +#[cfg_attr(not(bootstrap), lang = "format_arguments")] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(test), rustc_diagnostic_item = "Arguments")] #[derive(Copy, Clone)] pub struct Arguments<'a> { // Format string pieces to print. @@ -489,9 +491,26 @@ pub struct Arguments<'a> { } impl<'a> Arguments<'a> { - /// Get the formatted string, if it has no arguments to be formatted. + /// Get the formatted string, if it has no arguments to be formatted at runtime. /// - /// This can be used to avoid allocations in the most trivial case. + /// This can be used to avoid allocations in some cases. + /// + /// # Guarantees + /// + /// For `format_args!("just a literal")`, this function is guaranteed to + /// return `Some("just a literal")`. + /// + /// For most cases with placeholders, this function will return `None`. + /// + /// However, the compiler may perform optimizations that can cause this + /// function to return `Some(_)` even if the format string contains + /// placeholders. For example, `format_args!("Hello, {}!", "world")` may be + /// optimized to `format_args!("Hello, world!")`, such that `as_str()` + /// returns `Some("Hello, world!")`. + /// + /// The behavior for anything but the trivial case (without placeholders) + /// is not guaranteed, and should not be relied upon for anything other + /// than optimization. /// /// # Examples /// @@ -512,7 +531,7 @@ impl<'a> Arguments<'a> { /// ```rust /// assert_eq!(format_args!("hello").as_str(), Some("hello")); /// assert_eq!(format_args!("").as_str(), Some("")); - /// assert_eq!(format_args!("{}", 1).as_str(), None); + /// assert_eq!(format_args!("{:?}", std::env::current_dir()).as_str(), None); /// ``` #[stable(feature = "fmt_as_str", since = "1.52.0")] #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "103900")] @@ -1355,11 +1374,11 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{}", Foo::new(2)), "2"); - /// assert_eq!(&format!("{}", Foo::new(-1)), "-1"); - /// assert_eq!(&format!("{}", Foo::new(0)), "0"); - /// assert_eq!(&format!("{:#}", Foo::new(-1)), "-Foo 1"); - /// assert_eq!(&format!("{:0>#8}", Foo::new(-1)), "00-Foo 1"); + /// assert_eq!(format!("{}", Foo::new(2)), "2"); + /// assert_eq!(format!("{}", Foo::new(-1)), "-1"); + /// assert_eq!(format!("{}", Foo::new(0)), "0"); + /// assert_eq!(format!("{:#}", Foo::new(-1)), "-Foo 1"); + /// assert_eq!(format!("{:0>#8}", Foo::new(-1)), "00-Foo 1"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pad_integral(&mut self, is_nonnegative: bool, prefix: &str, buf: &str) -> Result { @@ -1452,8 +1471,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo:<4}"), "Foo "); - /// assert_eq!(&format!("{Foo:0>4}"), "0Foo"); + /// assert_eq!(format!("{Foo:<4}"), "Foo "); + /// assert_eq!(format!("{Foo:0>4}"), "0Foo"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pad(&mut self, s: &str) -> Result { @@ -1636,8 +1655,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo}"), "Foo"); - /// assert_eq!(&format!("{Foo:0>8}"), "Foo"); + /// assert_eq!(format!("{Foo}"), "Foo"); + /// assert_eq!(format!("{Foo:0>8}"), "Foo"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn write_str(&mut self, data: &str) -> Result { @@ -1659,8 +1678,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{}", Foo(-1)), "Foo -1"); - /// assert_eq!(&format!("{:0>8}", Foo(2)), "Foo 2"); + /// assert_eq!(format!("{}", Foo(-1)), "Foo -1"); + /// assert_eq!(format!("{:0>8}", Foo(2)), "Foo 2"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result { @@ -1703,8 +1722,8 @@ impl<'a> Formatter<'a> { /// } /// /// // We set alignment to the right with ">". - /// assert_eq!(&format!("{Foo:G>3}"), "GGG"); - /// assert_eq!(&format!("{Foo:t>6}"), "tttttt"); + /// assert_eq!(format!("{Foo:G>3}"), "GGG"); + /// assert_eq!(format!("{Foo:t>6}"), "tttttt"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1738,10 +1757,10 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo:<}"), "left"); - /// assert_eq!(&format!("{Foo:>}"), "right"); - /// assert_eq!(&format!("{Foo:^}"), "center"); - /// assert_eq!(&format!("{Foo}"), "into the void"); + /// assert_eq!(format!("{Foo:<}"), "left"); + /// assert_eq!(format!("{Foo:>}"), "right"); + /// assert_eq!(format!("{Foo:^}"), "center"); + /// assert_eq!(format!("{Foo}"), "into the void"); /// ``` #[must_use] #[stable(feature = "fmt_flags_align", since = "1.28.0")] @@ -1767,7 +1786,7 @@ impl<'a> Formatter<'a> { /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { /// if let Some(width) = formatter.width() { /// // If we received a width, we use it - /// write!(formatter, "{:width$}", &format!("Foo({})", self.0), width = width) + /// write!(formatter, "{:width$}", format!("Foo({})", self.0), width = width) /// } else { /// // Otherwise we do nothing special /// write!(formatter, "Foo({})", self.0) @@ -1775,8 +1794,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:10}", Foo(23)), "Foo(23) "); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:10}", Foo(23)), "Foo(23) "); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1806,8 +1825,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:.4}", Foo(23.2)), "Foo(23.2000)"); - /// assert_eq!(&format!("{}", Foo(23.2)), "Foo(23.20)"); + /// assert_eq!(format!("{:.4}", Foo(23.2)), "Foo(23.2000)"); + /// assert_eq!(format!("{}", Foo(23.2)), "Foo(23.20)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1837,9 +1856,9 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)"); - /// assert_eq!(&format!("{:+}", Foo(-23)), "Foo(-23)"); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:+}", Foo(23)), "Foo(+23)"); + /// assert_eq!(format!("{:+}", Foo(-23)), "Foo(-23)"); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1867,8 +1886,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:-}", Foo(23)), "-Foo(23)"); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:-}", Foo(23)), "-Foo(23)"); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1895,8 +1914,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:#}", Foo(23)), "Foo(23)"); - /// assert_eq!(&format!("{}", Foo(23)), "23"); + /// assert_eq!(format!("{:#}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{}", Foo(23)), "23"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1922,7 +1941,7 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:04}", Foo(23)), "23"); + /// assert_eq!(format!("{:04}", Foo(23)), "23"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] diff --git a/library/core/src/fmt/rt/v1.rs b/library/core/src/fmt/rt/v1.rs index 37202b277..11a50951a 100644 --- a/library/core/src/fmt/rt/v1.rs +++ b/library/core/src/fmt/rt/v1.rs @@ -5,7 +5,9 @@ //! these can be statically allocated and are slightly optimized for the runtime #![allow(missing_debug_implementations)] +#[cfg_attr(not(bootstrap), lang = "format_placeholder")] #[derive(Copy, Clone)] +// FIXME: Rename this to Placeholder pub struct Argument { pub position: usize, pub format: FormatSpec, @@ -20,7 +22,22 @@ pub struct FormatSpec { pub width: Count, } +impl Argument { + #[inline(always)] + pub const fn new( + position: usize, + fill: char, + align: Alignment, + flags: u32, + precision: Count, + width: Count, + ) -> Self { + Self { position, format: FormatSpec { fill, align, flags, precision, width } } + } +} + /// Possible alignments that can be requested as part of a formatting directive. +#[cfg_attr(not(bootstrap), lang = "format_alignment")] #[derive(Copy, Clone, PartialEq, Eq)] pub enum Alignment { /// Indication that contents should be left-aligned. @@ -34,6 +51,7 @@ pub enum Alignment { } /// Used by [width](https://doc.rust-lang.org/std/fmt/#width) and [precision](https://doc.rust-lang.org/std/fmt/#precision) specifiers. +#[cfg_attr(not(bootstrap), lang = "format_count")] #[derive(Copy, Clone)] pub enum Count { /// Specified with a literal number, stores the value diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs index c4fb36209..46cbcd435 100644 --- a/library/core/src/future/mod.rs +++ b/library/core/src/future/mod.rs @@ -56,51 +56,6 @@ unsafe impl Send for ResumeTy {} #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Sync for ResumeTy {} -/// Wrap a generator in a future. -/// -/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give -/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`). -// This is `const` to avoid extra errors after we recover from `const async fn` -#[doc(hidden)] -#[unstable(feature = "gen_future", issue = "50547")] -#[rustc_const_unstable(feature = "gen_future", issue = "50547")] -#[inline] -pub const fn from_generator(gen: T) -> impl Future -where - T: crate::ops::Generator, -{ - use crate::{ - ops::{Generator, GeneratorState}, - pin::Pin, - task::Poll, - }; - - #[rustc_diagnostic_item = "gen_future"] - struct GenFuture>(T); - - // We rely on the fact that async/await futures are immovable in order to create - // self-referential borrows in the underlying generator. - impl> !Unpin for GenFuture {} - - impl> Future for GenFuture { - type Output = T::Return; - #[track_caller] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection. - let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) }; - - // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The - // `.await` lowering will safely cast that back to a `&mut Context`. - match gen.resume(ResumeTy(NonNull::from(cx).cast::>())) { - GeneratorState::Yielded(()) => Poll::Pending, - GeneratorState::Complete(x) => Poll::Ready(x), - } - } - } - - GenFuture(gen) -} - #[lang = "get_context"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs index 5a76e8669..ee13dae60 100644 --- a/library/core/src/hint.rs +++ b/library/core/src/hint.rs @@ -216,7 +216,8 @@ pub fn spin_loop() { /// /// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The /// extent to which it can block optimisations may vary depending upon the platform and code-gen -/// backend used. Programs cannot rely on `black_box` for *correctness* in any way. +/// backend used. Programs cannot rely on `black_box` for *correctness*, beyond it behaving as the +/// identity function. /// /// [`std::convert::identity`]: crate::convert::identity /// diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index a315a28fb..18a90599c 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -58,7 +58,6 @@ use crate::marker::DiscriminantKind; use crate::marker::Tuple; use crate::mem; -#[cfg(not(bootstrap))] pub mod mir; // These imports are used for simplifying intra-doc links @@ -963,7 +962,6 @@ extern "rust-intrinsic" { /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")] #[rustc_safe_intrinsic] - #[cfg(not(bootstrap))] pub fn assert_mem_uninitialized_valid(); /// Gets a reference to a static `Location` indicating where it was called. @@ -2095,6 +2093,10 @@ extern "rust-intrinsic" { /// Above some backend-decided threshold this will emit calls to `memcmp`, /// like slice equality does, instead of causing massive code size. /// + /// Since this works by comparing the underlying bytes, the actual `T` is + /// not particularly important. It will be used for its size and alignment, + /// but any validity restrictions will be ignored, not enforced. + /// /// # Safety /// /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized or carry a diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs index e3157b669..72db1d87c 100644 --- a/library/core/src/intrinsics/mir.rs +++ b/library/core/src/intrinsics/mir.rs @@ -42,7 +42,7 @@ //! another function. The `dialect` and `phase` parameters indicate which [version of MIR][dialect //! docs] you are inserting here. Generally you'll want to use `#![custom_mir(dialect = "built")]` //! if you want your MIR to be modified by the full MIR pipeline, or `#![custom_mir(dialect = -//! "runtime", phase = "optimized")] if you don't. +//! "runtime", phase = "optimized")]` if you don't. //! //! [dialect docs]: //! https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/enum.MirPhase.html @@ -60,8 +60,7 @@ //! //! # Examples //! -#![cfg_attr(bootstrap, doc = "```rust,compile_fail")] -#![cfg_attr(not(bootstrap), doc = "```rust")] +//! ```rust //! #![feature(core_intrinsics, custom_mir)] //! //! extern crate core; @@ -211,13 +210,16 @@ //! //! #### Statements //! - Assign statements work via normal Rust assignment. -//! - [`Retag`] statements have an associated function. +//! - [`Retag`], [`StorageLive`], [`StorageDead`], [`Deinit`] statements have an associated function. //! //! #### Rvalues //! //! - Operands implicitly convert to `Use` rvalues. //! - `&`, `&mut`, `addr_of!`, and `addr_of_mut!` all work to create their associated rvalue. -//! - [`Discriminant`] has an associated function. +//! - [`Discriminant`] and [`Len`] have associated functions. +//! - Unary and binary operations use their normal Rust syntax - `a * b`, `!c`, etc. +//! - Checked binary operations are represented by wrapping the associated binop in [`Checked`]. +//! - Array repetition syntax (`[foo; 10]`) creates the associated rvalue. //! //! #### Terminators //! @@ -261,6 +263,9 @@ define!("mir_drop_and_replace", fn DropAndReplace(place: T, value: T, goto: B define!("mir_call", fn Call(place: T, goto: BasicBlock, call: T)); define!("mir_storage_live", fn StorageLive(local: T)); define!("mir_storage_dead", fn StorageDead(local: T)); +define!("mir_deinit", fn Deinit(place: T)); +define!("mir_checked", fn Checked(binop: T) -> (T, bool)); +define!("mir_len", fn Len(place: T) -> usize); define!("mir_retag", fn Retag(place: T)); define!("mir_move", fn Move(place: T) -> T); define!("mir_static", fn Static(s: T) -> &'static T); @@ -294,8 +299,7 @@ define!( /// /// # Examples /// - #[cfg_attr(bootstrap, doc = "```rust,compile_fail")] - #[cfg_attr(not(bootstrap), doc = "```rust")] + /// ```rust /// #![feature(custom_mir, core_intrinsics)] /// /// extern crate core; diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs index 5e4211058..13719c727 100644 --- a/library/core/src/iter/adapters/array_chunks.rs +++ b/library/core/src/iter/adapters/array_chunks.rs @@ -1,7 +1,5 @@ use crate::array; -use crate::const_closure::ConstFnMutClosure; use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce}; -use crate::mem::{self, MaybeUninit}; use crate::ops::{ControlFlow, NeverShortCircuit, Try}; /// An iterator over `N` elements of the iterator at a time. @@ -189,13 +187,12 @@ where I: Iterator, { #[inline] - default fn fold(mut self, init: B, mut f: F) -> B + default fn fold(mut self, init: B, f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { - let fold = ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp); - self.try_fold(init, fold).0 + self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 } } @@ -214,19 +211,14 @@ where let mut i = 0; // Use a while loop because (0..len).step_by(N) doesn't optimize well. while inner_len - i >= N { - let mut chunk = MaybeUninit::uninit_array(); - let mut guard = array::Guard { array_mut: &mut chunk, initialized: 0 }; - while guard.initialized < N { + let chunk = crate::array::from_fn(|local| { // SAFETY: The method consumes the iterator and the loop condition ensures that // all accesses are in bounds and only happen once. unsafe { - let idx = i + guard.initialized; - guard.push_unchecked(self.iter.__iterator_get_unchecked(idx)); + let idx = i + local; + self.iter.__iterator_get_unchecked(idx) } - } - mem::forget(guard); - // SAFETY: The loop above initialized all elements - let chunk = unsafe { MaybeUninit::array_assume_init(chunk) }; + }); accum = f(accum, chunk); i += N; } diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs index 1945e402f..477e7117c 100644 --- a/library/core/src/iter/adapters/by_ref_sized.rs +++ b/library/core/src/iter/adapters/by_ref_sized.rs @@ -1,7 +1,4 @@ -use crate::{ - const_closure::ConstFnMutClosure, - ops::{NeverShortCircuit, Try}, -}; +use crate::ops::{NeverShortCircuit, Try}; /// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics. /// @@ -39,13 +36,12 @@ impl Iterator for ByRefSized<'_, I> { } #[inline] - fn fold(self, init: B, mut f: F) -> B + fn fold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `fold` needs ownership, so this can't forward directly. - I::try_fold(self.0, init, ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp)) - .0 + I::try_fold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] @@ -76,17 +72,12 @@ impl DoubleEndedIterator for ByRefSized<'_, I> { } #[inline] - fn rfold(self, init: B, mut f: F) -> B + fn rfold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `rfold` needs ownership, so this can't forward directly. - I::try_rfold( - self.0, - init, - ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp), - ) - .0 + I::try_rfold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs index aba24a79d..914ff86c1 100644 --- a/library/core/src/iter/adapters/cloned.rs +++ b/library/core/src/iter/adapters/cloned.rs @@ -1,7 +1,7 @@ use crate::iter::adapters::{ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, TrustedLen}; +use crate::iter::{FusedIterator, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that clones the elements of an underlying iterator. @@ -140,3 +140,16 @@ where T: Clone, { } + +impl<'a, I, T: 'a> UncheckedIterator for Cloned +where + I: UncheckedIterator, + T: Clone, +{ + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.it.next_unchecked() }; + item.clone() + } +} diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs index e0d665c9e..6bdf53f7f 100644 --- a/library/core/src/iter/adapters/filter_map.rs +++ b/library/core/src/iter/adapters/filter_map.rs @@ -99,7 +99,7 @@ where ) -> impl FnMut((), T) -> ControlFlow + '_ { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs index 307016c26..b040a0ea9 100644 --- a/library/core/src/iter/adapters/flatten.rs +++ b/library/core/src/iter/adapters/flatten.rs @@ -539,7 +539,7 @@ where #[rustc_inherit_overflow_checks] fn advance(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } @@ -629,7 +629,7 @@ where #[rustc_inherit_overflow_checks] fn advance(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_back_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs index 9e25dbe46..31d02a4da 100644 --- a/library/core/src/iter/adapters/map.rs +++ b/library/core/src/iter/adapters/map.rs @@ -2,7 +2,7 @@ use crate::fmt; use crate::iter::adapters::{ zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that maps the values of `iter` with `f`. @@ -187,6 +187,19 @@ where { } +impl UncheckedIterator for Map +where + I: UncheckedIterator, + F: FnMut(I::Item) -> B, +{ + unsafe fn next_unchecked(&mut self) -> B { + // SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.iter.next_unchecked() }; + (self.f)(item) + } +} + #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] unsafe impl TrustedRandomAccess for Map where I: TrustedRandomAccess {} diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index 8153c8cfe..b6b0c90cb 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -1,7 +1,7 @@ use crate::cmp; use crate::fmt::{self, Debug}; use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator}; -use crate::iter::{InPlaceIterable, SourceIter, TrustedLen}; +use crate::iter::{InPlaceIterable, SourceIter, TrustedLen, UncheckedIterator}; /// An iterator that iterates two other iterators simultaneously. /// @@ -417,6 +417,13 @@ where { } +impl UncheckedIterator for Zip +where + A: UncheckedIterator, + B: UncheckedIterator, +{ +} + // Arbitrarily selects the left side of the zip iteration as extractable "source" // it would require negative trait bounds to be able to try both #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index bb35d50b4..ae00232c1 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -278,6 +278,7 @@ //! //! ``` //! # #![allow(unused_must_use)] +//! # #![cfg_attr(not(bootstrap), allow(map_unit_fn))] //! let v = vec![1, 2, 3, 4, 5]; //! v.iter().map(|x| println!("{x}")); //! ``` @@ -362,15 +363,13 @@ macro_rules! impl_fold_via_try_fold { }; (@internal $fold:ident -> $try_fold:ident) => { #[inline] - fn $fold(mut self, init: AAA, mut fold: FFF) -> AAA + fn $fold(mut self, init: AAA, fold: FFF) -> AAA where FFF: FnMut(AAA, Self::Item) -> AAA, { - use crate::const_closure::ConstFnMutClosure; use crate::ops::NeverShortCircuit; - let fold = ConstFnMutClosure::new(&mut fold, NeverShortCircuit::wrap_mut_2_imp); - self.$try_fold(init, fold).0 + self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0 } }; } @@ -452,6 +451,7 @@ pub use self::adapters::{ pub use self::adapters::{Intersperse, IntersperseWith}; pub(crate) use self::adapters::try_process; +pub(crate) use self::traits::UncheckedIterator; mod adapters; mod range; diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index b5739f2f3..78e27d730 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -1,4 +1,3 @@ -use crate::char; use crate::convert::TryFrom; use crate::mem; use crate::ops::{self, Try}; diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs index bdf94c792..ed23873cd 100644 --- a/library/core/src/iter/traits/double_ended.rs +++ b/library/core/src/iter/traits/double_ended.rs @@ -352,7 +352,7 @@ pub trait DoubleEndedIterator: Iterator { #[inline] fn check(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } diff --git a/library/core/src/iter/traits/exact_size.rs b/library/core/src/iter/traits/exact_size.rs index 1757e37ec..908830d8a 100644 --- a/library/core/src/iter/traits/exact_size.rs +++ b/library/core/src/iter/traits/exact_size.rs @@ -21,6 +21,16 @@ /// /// [`len`]: ExactSizeIterator::len /// +/// # When *shouldn't* an adapter be `ExactSizeIterator`? +/// +/// If an adapter makes an iterator *longer*, then it's usually incorrect for +/// that adapter to implement `ExactSizeIterator`. The inner exact-sized +/// iterator might already be `usize::MAX`-long, and thus the length of the +/// longer adapted iterator would no longer be exactly representable in `usize`. +/// +/// This is why [`Chain`](crate::iter::Chain) isn't `ExactSizeIterator`, +/// even when `A` and `B` are both `ExactSizeIterator`. +/// /// # Examples /// /// Basic usage: diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index a4a665d48..b8e7d0a68 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -69,6 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} #[doc(notable_trait)] #[rustc_diagnostic_item = "Iterator"] #[must_use = "iterators are lazy and do nothing unless consumed"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Iterator { /// The type of the elements being iterated over. #[rustc_diagnostic_item = "IteratorItem"] @@ -141,6 +142,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_next_chunk", reason = "recently added", issue = "98326")] + #[rustc_do_not_const_check] fn next_chunk( &mut self, ) -> Result<[Self::Item; N], array::IntoIter> @@ -218,6 +220,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn size_hint(&self) -> (usize, Option) { (0, None) } @@ -255,6 +258,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn count(self) -> usize where Self: Sized, @@ -285,6 +289,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn last(self) -> Option where Self: Sized, @@ -331,6 +336,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")] + #[rustc_do_not_const_check] fn advance_by(&mut self, n: usize) -> Result<(), usize> { for i in 0..n { self.next().ok_or(i)?; @@ -379,6 +385,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn nth(&mut self, n: usize) -> Option { self.advance_by(n).ok()?; self.next() @@ -431,6 +438,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] + #[rustc_do_not_const_check] fn step_by(self, step: usize) -> StepBy where Self: Sized, @@ -502,6 +510,7 @@ pub trait Iterator { /// [`OsStr`]: ../../std/ffi/struct.OsStr.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn chain(self, other: U) -> Chain where Self: Sized, @@ -620,6 +629,7 @@ pub trait Iterator { /// [`zip`]: crate::iter::zip #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn zip(self, other: U) -> Zip where Self: Sized, @@ -662,6 +672,7 @@ pub trait Iterator { /// [`intersperse_with`]: Iterator::intersperse_with #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse(self, separator: Self::Item) -> Intersperse where Self: Sized, @@ -720,6 +731,7 @@ pub trait Iterator { /// [`intersperse`]: Iterator::intersperse #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse_with(self, separator: G) -> IntersperseWith where Self: Sized, @@ -777,8 +789,10 @@ pub trait Iterator { /// println!("{x}"); /// } /// ``` + #[rustc_diagnostic_item = "IteratorMap"] #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn map(self, f: F) -> Map where Self: Sized, @@ -824,6 +838,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] + #[rustc_do_not_const_check] fn for_each(self, f: F) where Self: Sized, @@ -899,6 +914,7 @@ pub trait Iterator { /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter

(self, predicate: P) -> Filter where Self: Sized, @@ -944,6 +960,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter_map(self, f: F) -> FilterMap where Self: Sized, @@ -990,6 +1007,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn enumerate(self) -> Enumerate where Self: Sized, @@ -1061,6 +1079,7 @@ pub trait Iterator { /// [`next`]: Iterator::next #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn peekable(self) -> Peekable where Self: Sized, @@ -1126,6 +1145,7 @@ pub trait Iterator { #[inline] #[doc(alias = "drop_while")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip_while

(self, predicate: P) -> SkipWhile where Self: Sized, @@ -1207,6 +1227,7 @@ pub trait Iterator { /// the iteration should stop, but wasn't placed back into the iterator. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take_while

(self, predicate: P) -> TakeWhile where Self: Sized, @@ -1295,6 +1316,7 @@ pub trait Iterator { /// [`fuse`]: Iterator::fuse #[inline] #[stable(feature = "iter_map_while", since = "1.57.0")] + #[rustc_do_not_const_check] fn map_while(self, predicate: P) -> MapWhile where Self: Sized, @@ -1326,6 +1348,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip(self, n: usize) -> Skip where Self: Sized, @@ -1379,6 +1402,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take(self, n: usize) -> Take where Self: Sized, @@ -1428,6 +1452,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn scan(self, initial_state: St, f: F) -> Scan where Self: Sized, @@ -1468,6 +1493,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn flat_map(self, f: F) -> FlatMap where Self: Sized, @@ -1552,6 +1578,7 @@ pub trait Iterator { /// [`flat_map()`]: Iterator::flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] + #[rustc_do_not_const_check] fn flatten(self) -> Flatten where Self: Sized, @@ -1620,6 +1647,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fuse(self) -> Fuse where Self: Sized, @@ -1704,6 +1732,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn inspect(self, f: F) -> Inspect where Self: Sized, @@ -1734,6 +1763,7 @@ pub trait Iterator { /// assert_eq!(of_rust, vec!["of", "Rust"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn by_ref(&mut self) -> &mut Self where Self: Sized, @@ -1853,6 +1883,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] #[cfg_attr(not(test), rustc_diagnostic_item = "iterator_collect_fn")] + #[rustc_do_not_const_check] fn collect>(self) -> B where Self: Sized, @@ -1931,6 +1962,7 @@ pub trait Iterator { /// [`collect`]: Iterator::collect #[inline] #[unstable(feature = "iterator_try_collect", issue = "94047")] + #[rustc_do_not_const_check] fn try_collect(&mut self) -> ChangeOutputType where Self: Sized, @@ -2004,6 +2036,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_collect_into", reason = "new API", issue = "94780")] + #[rustc_do_not_const_check] fn collect_into>(self, collection: &mut E) -> &mut E where Self: Sized, @@ -2038,6 +2071,7 @@ pub trait Iterator { /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn partition(self, f: F) -> (B, B) where Self: Sized, @@ -2100,6 +2134,7 @@ pub trait Iterator { /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds /// ``` #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")] + #[rustc_do_not_const_check] fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize where Self: Sized + DoubleEndedIterator, @@ -2157,6 +2192,7 @@ pub trait Iterator { /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase)); /// ``` #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")] + #[rustc_do_not_const_check] fn is_partitioned

(mut self, mut predicate: P) -> bool where Self: Sized, @@ -2251,6 +2287,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_fold(&mut self, init: B, mut f: F) -> R where Self: Sized, @@ -2309,6 +2346,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_for_each(&mut self, f: F) -> R where Self: Sized, @@ -2428,6 +2466,7 @@ pub trait Iterator { #[doc(alias = "inject", alias = "foldl")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fold(mut self, init: B, mut f: F) -> B where Self: Sized, @@ -2465,6 +2504,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_fold_self", since = "1.51.0")] + #[rustc_do_not_const_check] fn reduce(mut self, f: F) -> Option where Self: Sized, @@ -2536,6 +2576,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iterator_try_reduce", reason = "new API", issue = "87053")] + #[rustc_do_not_const_check] fn try_reduce(&mut self, f: F) -> ChangeOutputType> where Self: Sized, @@ -2593,6 +2634,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn all(&mut self, f: F) -> bool where Self: Sized, @@ -2601,10 +2643,10 @@ pub trait Iterator { #[inline] fn check(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } - self.try_fold((), check(f)) == ControlFlow::CONTINUE + self.try_fold((), check(f)) == ControlFlow::Continue(()) } /// Tests if any element of the iterator matches a predicate. @@ -2646,6 +2688,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn any(&mut self, f: F) -> bool where Self: Sized, @@ -2654,11 +2697,11 @@ pub trait Iterator { #[inline] fn check(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) } } } - self.try_fold((), check(f)) == ControlFlow::BREAK + self.try_fold((), check(f)) == ControlFlow::Break(()) } /// Searches for an element of an iterator that satisfies a predicate. @@ -2709,6 +2752,7 @@ pub trait Iterator { /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn find

(&mut self, predicate: P) -> Option where Self: Sized, @@ -2717,7 +2761,7 @@ pub trait Iterator { #[inline] fn check(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } @@ -2740,6 +2784,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] + #[rustc_do_not_const_check] fn find_map(&mut self, f: F) -> Option where Self: Sized, @@ -2749,7 +2794,7 @@ pub trait Iterator { fn check(mut f: impl FnMut(T) -> Option) -> impl FnMut((), T) -> ControlFlow { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } @@ -2796,6 +2841,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "try_find", reason = "new API", issue = "63178")] + #[rustc_do_not_const_check] fn try_find(&mut self, f: F) -> ChangeOutputType> where Self: Sized, @@ -2812,7 +2858,7 @@ pub trait Iterator { R: Residual>, { move |(), x| match f(&x).branch() { - ControlFlow::Continue(false) => ControlFlow::CONTINUE, + ControlFlow::Continue(false) => ControlFlow::Continue(()), ControlFlow::Continue(true) => ControlFlow::Break(Try::from_output(Some(x))), ControlFlow::Break(r) => ControlFlow::Break(FromResidual::from_residual(r)), } @@ -2878,6 +2924,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn position

(&mut self, predicate: P) -> Option where Self: Sized, @@ -2935,6 +2982,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rposition

(&mut self, predicate: P) -> Option where P: FnMut(Self::Item) -> bool, @@ -2986,6 +3034,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn max(self) -> Option where Self: Sized, @@ -3024,6 +3073,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn min(self) -> Option where Self: Sized, @@ -3046,6 +3096,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn max_by_key(self, f: F) -> Option where Self: Sized, @@ -3079,6 +3130,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn max_by(self, compare: F) -> Option where Self: Sized, @@ -3106,6 +3158,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn min_by_key(self, f: F) -> Option where Self: Sized, @@ -3139,6 +3192,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn min_by(self, compare: F) -> Option where Self: Sized, @@ -3176,6 +3230,7 @@ pub trait Iterator { #[inline] #[doc(alias = "reverse")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rev(self) -> Rev where Self: Sized + DoubleEndedIterator, @@ -3214,6 +3269,7 @@ pub trait Iterator { /// assert_eq!(z, [3, 6]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn unzip(self) -> (FromA, FromB) where FromA: Default + Extend, @@ -3246,6 +3302,7 @@ pub trait Iterator { /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "iter_copied", since = "1.36.0")] + #[rustc_do_not_const_check] fn copied<'a, T: 'a>(self) -> Copied where Self: Sized + Iterator, @@ -3293,6 +3350,7 @@ pub trait Iterator { /// assert_eq!(&[vec![23]], &faster[..]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn cloned<'a, T: 'a>(self) -> Cloned where Self: Sized + Iterator, @@ -3327,6 +3385,7 @@ pub trait Iterator { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_do_not_const_check] fn cycle(self) -> Cycle where Self: Sized + Clone, @@ -3370,6 +3429,7 @@ pub trait Iterator { /// ``` #[track_caller] #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")] + #[rustc_do_not_const_check] fn array_chunks(self) -> ArrayChunks where Self: Sized, @@ -3400,6 +3460,7 @@ pub trait Iterator { /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn sum(self) -> S where Self: Sized, @@ -3429,6 +3490,7 @@ pub trait Iterator { /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn product

(self) -> P where Self: Sized, @@ -3450,6 +3512,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn cmp(self, other: I) -> Ordering where I: IntoIterator, @@ -3479,6 +3542,7 @@ pub trait Iterator { /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn cmp_by(self, other: I, cmp: F) -> Ordering where Self: Sized, @@ -3491,7 +3555,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Ordering, { move |x, y| match cmp(x, y) { - Ordering::Equal => ControlFlow::CONTINUE, + Ordering::Equal => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3502,8 +3566,10 @@ pub trait Iterator { } } - /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those - /// of another. + /// [Lexicographically](Ord#lexicographical-comparison) compares the [`PartialOrd`] elements of + /// this [`Iterator`] with those of another. The comparison works like short-circuit + /// evaluation, returning a result without comparing the remaining elements. + /// As soon as an order can be determined, the evaluation stops and a result is returned. /// /// # Examples /// @@ -3513,10 +3579,27 @@ pub trait Iterator { /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal)); /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less)); /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater)); + /// ``` /// + /// For floating-point numbers, NaN does not have a total order and will result + /// in `None` when compared: + /// + /// ``` /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None); /// ``` + /// + /// The results are determined by the order of evaluation. + /// + /// ``` + /// use std::cmp::Ordering; + /// + /// assert_eq!([1.0, f64::NAN].iter().partial_cmp([2.0, f64::NAN].iter()), Some(Ordering::Less)); + /// assert_eq!([2.0, f64::NAN].iter().partial_cmp([1.0, f64::NAN].iter()), Some(Ordering::Greater)); + /// assert_eq!([f64::NAN, 1.0].iter().partial_cmp([f64::NAN, 2.0].iter()), None); + /// ``` + /// #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn partial_cmp(self, other: I) -> Option where I: IntoIterator, @@ -3555,6 +3638,7 @@ pub trait Iterator { /// ); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn partial_cmp_by(self, other: I, partial_cmp: F) -> Option where Self: Sized, @@ -3567,7 +3651,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Option, { move |x, y| match partial_cmp(x, y) { - Some(Ordering::Equal) => ControlFlow::CONTINUE, + Some(Ordering::Equal) => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3588,6 +3672,7 @@ pub trait Iterator { /// assert_eq!([1].iter().eq([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn eq(self, other: I) -> bool where I: IntoIterator, @@ -3613,6 +3698,7 @@ pub trait Iterator { /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y)); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn eq_by(self, other: I, eq: F) -> bool where Self: Sized, @@ -3625,7 +3711,7 @@ pub trait Iterator { F: FnMut(X, Y) -> bool, { move |x, y| { - if eq(x, y) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if eq(x, y) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } @@ -3645,6 +3731,7 @@ pub trait Iterator { /// assert_eq!([1].iter().ne([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ne(self, other: I) -> bool where I: IntoIterator, @@ -3666,6 +3753,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn lt(self, other: I) -> bool where I: IntoIterator, @@ -3687,6 +3775,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().le([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn le(self, other: I) -> bool where I: IntoIterator, @@ -3708,6 +3797,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn gt(self, other: I) -> bool where I: IntoIterator, @@ -3729,6 +3819,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ge(self, other: I) -> bool where I: IntoIterator, @@ -3760,6 +3851,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted(self) -> bool where Self: Sized, @@ -3788,6 +3880,7 @@ pub trait Iterator { /// /// [`is_sorted`]: Iterator::is_sorted #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by(mut self, compare: F) -> bool where Self: Sized, @@ -3834,6 +3927,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by_key(self, f: F) -> bool where Self: Sized, @@ -3849,6 +3943,7 @@ pub trait Iterator { #[inline] #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] + #[rustc_do_not_const_check] unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, @@ -3859,7 +3954,7 @@ pub trait Iterator { /// Compares two iterators element-wise using the given function. /// -/// If `ControlFlow::CONTINUE` is returned from the function, the comparison moves on to the next +/// If `ControlFlow::Continue(())` is returned from the function, the comparison moves on to the next /// elements of both iterators. Returning `ControlFlow::Break(x)` short-circuits the iteration and /// returns `ControlFlow::Break(x)`. If one of the iterators runs out of elements, /// `ControlFlow::Continue(ord)` is returned where `ord` is the result of comparing the lengths of diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs index da7537457..af0284823 100644 --- a/library/core/src/iter/traits/marker.rs +++ b/library/core/src/iter/traits/marker.rs @@ -31,6 +31,17 @@ impl FusedIterator for &mut I {} /// The iterator must produce exactly the number of elements it reported /// or diverge before reaching the end. /// +/// # When *shouldn't* an adapter be `TrustedLen`? +/// +/// If an adapter makes an iterator *shorter* by a given amount, then it's +/// usually incorrect for that adapter to implement `TrustedLen`. The inner +/// iterator might return more than `usize::MAX` items, but there's no way to +/// know what `k` elements less than that will be, since the `size_hint` from +/// the inner iterator has already saturated and lost that information. +/// +/// This is why [`Skip`](crate::iter::Skip) isn't `TrustedLen`, even when +/// `I` implements `TrustedLen`. +/// /// # Safety /// /// This trait must only be implemented when the contract is upheld. Consumers diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs index ed0fb634d..41ea29e6a 100644 --- a/library/core/src/iter/traits/mod.rs +++ b/library/core/src/iter/traits/mod.rs @@ -4,6 +4,7 @@ mod double_ended; mod exact_size; mod iterator; mod marker; +mod unchecked_iterator; #[stable(feature = "rust1", since = "1.0.0")] pub use self::{ @@ -19,3 +20,5 @@ pub use self::{ pub use self::marker::InPlaceIterable; #[unstable(feature = "trusted_step", issue = "85731")] pub use self::marker::TrustedStep; + +pub(crate) use self::unchecked_iterator::UncheckedIterator; diff --git a/library/core/src/iter/traits/unchecked_iterator.rs b/library/core/src/iter/traits/unchecked_iterator.rs new file mode 100644 index 000000000..ae4bfcad4 --- /dev/null +++ b/library/core/src/iter/traits/unchecked_iterator.rs @@ -0,0 +1,36 @@ +use crate::iter::TrustedLen; + +/// [`TrustedLen`] cannot have methods, so this allows augmenting it. +/// +/// It currently requires `TrustedLen` because it's unclear whether it's +/// reasonably possible to depend on the `size_hint` of anything else. +pub(crate) trait UncheckedIterator: TrustedLen { + /// Gets the next item from a non-empty iterator. + /// + /// Because there's always a value to return, that means it can return + /// the `Item` type directly, without wrapping it in an `Option`. + /// + /// # Safety + /// + /// This can only be called if `size_hint().0 != 0`, guaranteeing that + /// there's at least one item available. + /// + /// Otherwise (aka when `size_hint().1 == Some(0)`), this is UB. + /// + /// # Note to Implementers + /// + /// This has a default implementation using [`Option::unwrap_unchecked`]. + /// That's probably sufficient if your `next` *always* returns `Some`, + /// such as for infinite iterators. In more complicated situations, however, + /// sometimes there can still be `insertvalue`/`assume`/`extractvalue` + /// instructions remaining in the IR from the `Option` handling, at which + /// point you might want to implement this manually instead. + #[unstable(feature = "trusted_len_next_unchecked", issue = "37572")] + #[inline] + unsafe fn next_unchecked(&mut self) -> Self::Item { + let opt = self.next(); + // SAFETY: The caller promised that we're not empty, and + // `Self: TrustedLen` so we can actually trust the `size_hint`. + unsafe { opt.unwrap_unchecked() } + } +} diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 8790649ab..24bad799f 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -95,6 +95,7 @@ #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] #![allow(incomplete_features)] +#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))] // // Library features: #![feature(const_align_offset)] @@ -123,6 +124,8 @@ #![feature(const_inherent_unchecked_arith)] #![feature(const_int_unchecked_arith)] #![feature(const_intrinsic_forget)] +#![feature(const_ipv4)] +#![feature(const_ipv6)] #![feature(const_likely)] #![feature(const_maybe_uninit_uninit_array)] #![feature(const_maybe_uninit_as_mut_ptr)] @@ -133,6 +136,7 @@ #![feature(const_option)] #![feature(const_option_ext)] #![feature(const_pin)] +#![feature(const_pointer_byte_offsets)] #![feature(const_pointer_is_aligned)] #![feature(const_ptr_sub_ptr)] #![feature(const_replace)] @@ -178,6 +182,7 @@ #![feature(const_slice_index)] #![feature(const_is_char_boundary)] #![feature(const_cstr_methods)] +#![feature(ip)] #![feature(is_ascii_octdigit)] // // Language features: @@ -191,8 +196,9 @@ #![feature(cfg_sanitize)] #![feature(cfg_target_has_atomic)] #![feature(cfg_target_has_atomic_equal_alignment)] -#![cfg_attr(not(bootstrap), feature(const_closures))] +#![feature(const_closures)] #![feature(const_fn_floating_point_arithmetic)] +#![feature(const_for)] #![feature(const_mut_refs)] #![feature(const_precise_live_drops)] #![feature(const_refs_to_cell)] @@ -235,11 +241,11 @@ #![feature(unsized_fn_params)] #![feature(asm_const)] #![feature(const_transmute_copy)] +#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))] // // Target features: #![feature(arm_target_feature)] #![feature(avx512_target_feature)] -#![feature(cmpxchg16b_target_feature)] #![feature(hexagon_target_feature)] #![feature(mips_target_feature)] #![feature(powerpc_target_feature)] @@ -248,7 +254,7 @@ #![feature(sse4a_target_feature)] #![feature(tbm_target_feature)] #![feature(wasm_target_feature)] -#![cfg_attr(bootstrap, feature(f16c_target_feature))] +#![cfg_attr(bootstrap, feature(cmpxchg16b_target_feature))] // allow using `core::` in intra-doc links #[allow(unused_extern_crates)] @@ -347,6 +353,7 @@ pub mod cell; pub mod char; pub mod ffi; pub mod iter; +pub mod net; pub mod option; pub mod panic; pub mod panicking; @@ -375,8 +382,6 @@ mod bool; mod tuple; mod unit; -mod const_closure; - #[stable(feature = "core_primitive", since = "1.43.0")] pub mod primitive; diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 1326fc9ab..520ae0edb 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -97,6 +97,7 @@ unsafe impl Send for &T {} #[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable #[rustc_specialization_trait] #[rustc_deny_explicit_impl] +#[cfg_attr(not(bootstrap), rustc_coinductive)] pub trait Sized { // Empty. } @@ -469,6 +470,62 @@ pub macro Copy($item:item) { #[cfg_attr(not(test), rustc_diagnostic_item = "Sync")] #[lang = "sync"] #[rustc_on_unimplemented( + on( + _Self = "std::cell::OnceCell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::OnceLock` instead" + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU8` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU16` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU32` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU64` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicUsize` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI8` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI16` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI32` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI64` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicIsize` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicBool` instead", + ), + on( + _Self = "std::cell::Cell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock`", + ), + on( + _Self = "std::cell::RefCell", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` instead", + ), message = "`{Self}` cannot be shared between threads safely", label = "`{Self}` cannot be shared between threads safely" )] @@ -815,14 +872,18 @@ pub trait Destruct {} #[rustc_deny_explicit_impl] pub trait Tuple {} -/// A marker for things -#[unstable(feature = "pointer_sized_trait", issue = "none")] -#[lang = "pointer_sized"] +/// A marker for pointer-like types. +/// +/// All types that have the same size and alignment as a `usize` or +/// `*const ()` automatically implement this trait. +#[unstable(feature = "pointer_like_trait", issue = "none")] +#[cfg_attr(bootstrap, lang = "pointer_sized")] +#[cfg_attr(not(bootstrap), lang = "pointer_like")] #[rustc_on_unimplemented( - message = "`{Self}` needs to be a pointer-sized type", - label = "`{Self}` needs to be a pointer-sized type" + message = "`{Self}` needs to have the same alignment and size as a pointer", + label = "`{Self}` needs to be a pointer-like type" )] -pub trait PointerSized {} +pub trait PointerLike {} /// Implementations of `Copy` for primitive types. /// diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 5e01ccc07..a67df7ed5 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -682,7 +682,6 @@ pub unsafe fn zeroed() -> T { pub unsafe fn uninitialized() -> T { // SAFETY: the caller must guarantee that an uninitialized value is valid for `T`. unsafe { - #[cfg(not(bootstrap))] // If the compiler hits this itself then it deserves the UB. intrinsics::assert_mem_uninitialized_valid::(); let mut val = MaybeUninit::::uninit(); diff --git a/library/core/src/net/display_buffer.rs b/library/core/src/net/display_buffer.rs new file mode 100644 index 000000000..7aadf06e9 --- /dev/null +++ b/library/core/src/net/display_buffer.rs @@ -0,0 +1,40 @@ +use crate::fmt; +use crate::mem::MaybeUninit; +use crate::str; + +/// Used for slow path in `Display` implementations when alignment is required. +pub struct DisplayBuffer { + buf: [MaybeUninit; SIZE], + len: usize, +} + +impl DisplayBuffer { + #[inline] + pub const fn new() -> Self { + Self { buf: MaybeUninit::uninit_array(), len: 0 } + } + + #[inline] + pub fn as_str(&self) -> &str { + // SAFETY: `buf` is only written to by the `fmt::Write::write_str` implementation + // which writes a valid UTF-8 string to `buf` and correctly sets `len`. + unsafe { + let s = MaybeUninit::slice_assume_init_ref(&self.buf[..self.len]); + str::from_utf8_unchecked(s) + } + } +} + +impl fmt::Write for DisplayBuffer { + fn write_str(&mut self, s: &str) -> fmt::Result { + let bytes = s.as_bytes(); + + if let Some(buf) = self.buf.get_mut(self.len..(self.len + bytes.len())) { + MaybeUninit::write_slice(buf, bytes); + self.len += bytes.len(); + Ok(()) + } else { + Err(fmt::Error) + } + } +} diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs new file mode 100644 index 000000000..954d88d54 --- /dev/null +++ b/library/core/src/net/ip_addr.rs @@ -0,0 +1,2070 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write}; +use crate::mem::transmute; + +use super::display_buffer::DisplayBuffer; + +/// An IP address, either IPv4 or IPv6. +/// +/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their +/// respective documentation for more details. +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// +/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); +/// +/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4)); +/// assert_eq!("::1".parse(), Ok(localhost_v6)); +/// +/// assert_eq!(localhost_v4.is_ipv6(), false); +/// assert_eq!(localhost_v4.is_ipv4(), true); +/// ``` +#[cfg_attr(not(test), rustc_diagnostic_item = "IpAddr")] +#[stable(feature = "ip_addr", since = "1.7.0")] +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + /// An IPv4 address. + #[stable(feature = "ip_addr", since = "1.7.0")] + V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr), + /// An IPv6 address. + #[stable(feature = "ip_addr", since = "1.7.0")] + V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr), +} + +/// An IPv4 address. +/// +/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791]. +/// They are usually represented as four octets. +/// +/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. +/// +/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791 +/// +/// # Textual representation +/// +/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal +/// notation, divided by `.` (this is called "dot-decimal notation"). +/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which +/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943]. +/// +/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1 +/// [`FromStr`]: crate::str::FromStr +/// +/// # Examples +/// +/// ``` +/// use std::net::Ipv4Addr; +/// +/// let localhost = Ipv4Addr::new(127, 0, 0, 1); +/// assert_eq!("127.0.0.1".parse(), Ok(localhost)); +/// assert_eq!(localhost.is_loopback(), true); +/// assert!("012.004.002.000".parse::().is_err()); // all octets are in octal +/// assert!("0000000.0.0.0".parse::().is_err()); // first octet is a zero in octal +/// assert!("0xcb.0x0.0x71.0x00".parse::().is_err()); // all octets are in hex +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Ipv4Addr { + octets: [u8; 4], +} + +/// An IPv6 address. +/// +/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291]. +/// They are usually represented as eight 16-bit segments. +/// +/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 +/// +/// # Embedding IPv4 Addresses +/// +/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. +/// +/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined: +/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated. +/// +/// Both types of addresses are not assigned any special meaning by this implementation, +/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`, +/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is. +/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address. +/// +/// ### IPv4-Compatible IPv6 Addresses +/// +/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated. +/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows: +/// +/// ```text +/// | 80 bits | 16 | 32 bits | +/// +--------------------------------------+--------------------------+ +/// |0000..............................0000|0000| IPv4 address | +/// +--------------------------------------+----+---------------------+ +/// ``` +/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`. +/// +/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`]. +/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address. +/// +/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1 +/// +/// ### IPv4-Mapped IPv6 Addresses +/// +/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2]. +/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows: +/// +/// ```text +/// | 80 bits | 16 | 32 bits | +/// +--------------------------------------+--------------------------+ +/// |0000..............................0000|FFFF| IPv4 address | +/// +--------------------------------------+----+---------------------+ +/// ``` +/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`. +/// +/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`]. +/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address. +/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use +/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. +/// +/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2 +/// +/// # Textual representation +/// +/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent +/// an IPv6 address in text, but in general, each segments is written in hexadecimal +/// notation, and segments are separated by `:`. For more information, see +/// [IETF RFC 5952]. +/// +/// [`FromStr`]: crate::str::FromStr +/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952 +/// +/// # Examples +/// +/// ``` +/// use std::net::Ipv6Addr; +/// +/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); +/// assert_eq!("::1".parse(), Ok(localhost)); +/// assert_eq!(localhost.is_loopback(), true); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Ipv6Addr { + octets: [u8; 16], +} + +/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2]. +/// +/// # Stability Guarantees +/// +/// Not all possible values for a multicast scope have been assigned. +/// Future RFCs may introduce new scopes, which will be added as variants to this enum; +/// because of this the enum is marked as `#[non_exhaustive]`. +/// +/// # Examples +/// ``` +/// #![feature(ip)] +/// +/// use std::net::Ipv6Addr; +/// use std::net::Ipv6MulticastScope::*; +/// +/// // An IPv6 multicast address with global scope (`ff0e::`). +/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0); +/// +/// // Will print "Global scope". +/// match address.multicast_scope() { +/// Some(InterfaceLocal) => println!("Interface-Local scope"), +/// Some(LinkLocal) => println!("Link-Local scope"), +/// Some(RealmLocal) => println!("Realm-Local scope"), +/// Some(AdminLocal) => println!("Admin-Local scope"), +/// Some(SiteLocal) => println!("Site-Local scope"), +/// Some(OrganizationLocal) => println!("Organization-Local scope"), +/// Some(Global) => println!("Global scope"), +/// Some(_) => println!("Unknown scope"), +/// None => println!("Not a multicast address!") +/// } +/// +/// ``` +/// +/// [IPv6 multicast address]: Ipv6Addr +/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2 +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[unstable(feature = "ip", issue = "27709")] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + /// Interface-Local scope. + InterfaceLocal, + /// Link-Local scope. + LinkLocal, + /// Realm-Local scope. + RealmLocal, + /// Admin-Local scope. + AdminLocal, + /// Site-Local scope. + SiteLocal, + /// Organization-Local scope. + OrganizationLocal, + /// Global scope. + Global, +} + +impl IpAddr { + /// Returns [`true`] for the special 'unspecified' address. + /// + /// See the documentation for [`Ipv4Addr::is_unspecified()`] and + /// [`Ipv6Addr::is_unspecified()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + + /// Returns [`true`] if this is a loopback address. + /// + /// See the documentation for [`Ipv4Addr::is_loopback()`] and + /// [`Ipv6Addr::is_loopback()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + + /// Returns [`true`] if the address appears to be globally routable. + /// + /// See the documentation for [`Ipv4Addr::is_global()`] and + /// [`Ipv6Addr::is_global()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + + /// Returns [`true`] if this is a multicast address. + /// + /// See the documentation for [`Ipv4Addr::is_multicast()`] and + /// [`Ipv6Addr::is_multicast()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + + /// Returns [`true`] if this address is in a range designated for documentation. + /// + /// See the documentation for [`Ipv4Addr::is_documentation()`] and + /// [`Ipv6Addr::is_documentation()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(), + /// true + /// ); + /// ``` + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + + /// Returns [`true`] if this address is in a range designated for benchmarking. + /// + /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and + /// [`Ipv6Addr::is_benchmarking()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true); + /// ``` + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + + /// Returns [`true`] if this address is an [`IPv4` address], and [`false`] + /// otherwise. + /// + /// [`IPv4` address]: IpAddr::V4 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ipaddr_checker", since = "1.16.0")] + #[must_use] + #[inline] + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + + /// Returns [`true`] if this address is an [`IPv6` address], and [`false`] + /// otherwise. + /// + /// [`IPv6` address]: IpAddr::V6 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ipaddr_checker", since = "1.16.0")] + #[must_use] + #[inline] + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + + /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it + /// return `self` as-is. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true); + /// ``` + #[inline] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} + +impl Ipv4Addr { + /// Creates a new IPv4 address from four eight-bit octets. + /// + /// The result will represent the IP address `a`.`b`.`c`.`d`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { octets: [a, b, c, d] } + } + + /// An IPv4 address with the address pointing to localhost: `127.0.0.1` + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::LOCALHOST; + /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1)); + /// ``` + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + + /// An IPv4 address representing an unspecified address: `0.0.0.0` + /// + /// This corresponds to the constant `INADDR_ANY` in other languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::UNSPECIFIED; + /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0)); + /// ``` + #[doc(alias = "INADDR_ANY")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + + /// An IPv4 address representing the broadcast address: `255.255.255.255` + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::BROADCAST; + /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255)); + /// ``` + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + + /// Returns the four eight-bit integers that make up this address. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// assert_eq!(addr.octets(), [127, 0, 0, 1]); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn octets(&self) -> [u8; 4] { + self.octets + } + + /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`). + /// + /// This property is defined in _UNIX Network Programming, Second Edition_, + /// W. Richard Stevens, p. 891; see also [ip7]. + /// + /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true); + /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + u32::from_be_bytes(self.octets) == 0 + } + + /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`). + /// + /// This property is defined by [IETF RFC 1122]. + /// + /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true); + /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + + /// Returns [`true`] if this is a private address. + /// + /// The private address ranges are defined in [IETF RFC 1918] and include: + /// + /// - `10.0.0.0/8` + /// - `172.16.0.0/12` + /// - `192.168.0.0/16` + /// + /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true); + /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false); + /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true); + /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + + /// Returns [`true`] if the address is link-local (`169.254.0.0/16`). + /// + /// This property is defined by [IETF RFC 3927]. + /// + /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true); + /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true); + /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv4 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// + /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) + /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) + /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) + /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) + /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) + /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) + /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) + /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) + /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// + /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + /// [unspecified address]: Ipv4Addr::UNSPECIFIED + /// [broadcast address]: Ipv4Addr::BROADCAST + + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv4Addr; + /// + /// // Most IPv4 addresses are globally reachable: + /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`0.0.0.0`) + /// assert_eq!(Ipv4Addr::UNSPECIFIED.is_global(), false); + /// + /// // Addresses reserved for private use (`10.0.0.0/8`, `172.16.0.0/12`, 192.168.0.0/16) + /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false); + /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false); + /// + /// // Addresses in the shared address space (`100.64.0.0/10`) + /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false); + /// + /// // The loopback addresses (`127.0.0.0/8`) + /// assert_eq!(Ipv4Addr::LOCALHOST.is_global(), false); + /// + /// // Link-local addresses (`169.254.0.0/16`) + /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false); + /// + /// // Addresses reserved for documentation (`192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24`) + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false); + /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false); + /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false); + /// + /// // Addresses reserved for benchmarking (`198.18.0.0/15`) + /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false); + /// + /// // Reserved addresses (`240.0.0.0/4`) + /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false); + /// + /// // The broadcast address (`255.255.255.255`) + /// assert_eq!(Ipv4Addr::BROADCAST.is_global(), false); + /// + /// // For a complete overview see the IANA IPv4 Special-Purpose Address Registry. + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + !(self.octets()[0] == 0 // "This network" + || self.is_private() + || self.is_shared() + || self.is_loopback() + || self.is_link_local() + // addresses reserved for future protocols (`192.0.0.0/24`) + ||(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) + || self.is_documentation() + || self.is_benchmarking() + || self.is_reserved() + || self.is_broadcast()) + } + + /// Returns [`true`] if this address is part of the Shared Address Space defined in + /// [IETF RFC 6598] (`100.64.0.0/10`). + /// + /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true); + /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true); + /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + + /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for + /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0` + /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`. + /// + /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544 + /// [errata 423]: https://www.rfc-editor.org/errata/eid423 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false); + /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true); + /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true); + /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + + /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112] + /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the + /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since + /// it is obviously not reserved for future use. + /// + /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112 + /// + /// # Warning + /// + /// As IANA assigns new addresses, this method will be + /// updated. This may result in non-reserved addresses being + /// treated as reserved in code that relies on an outdated version + /// of this method. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true); + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true); + /// + /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false); + /// // The broadcast address is not considered as reserved for future use by this implementation + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + + /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`). + /// + /// Multicast addresses have a most significant octet between `224` and `239`, + /// and is defined by [IETF RFC 5771]. + /// + /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true); + /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + + /// Returns [`true`] if this is a broadcast address (`255.255.255.255`). + /// + /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919]. + /// + /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true); + /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + + /// Returns [`true`] if this address is in a range designated for documentation. + /// + /// This is defined in [IETF RFC 5737]: + /// + /// - `192.0.2.0/24` (TEST-NET-1) + /// - `198.51.100.0/24` (TEST-NET-2) + /// - `203.0.113.0/24` (TEST-NET-3) + /// + /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) + } + + /// Converts this address to an [IPv4-compatible] [`IPv6` address]. + /// + /// `a.b.c.d` becomes `::a.b.c.d` + /// + /// Note that IPv4-compatible addresses have been officially deprecated. + /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead. + /// + /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!( + /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(), + /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff) + /// ); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] } + } + + /// Converts this address to an [IPv4-mapped] [`IPv6` address]. + /// + /// `a.b.c.d` becomes `::ffff:a.b.c.d` + /// + /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(), + /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff)); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] } + } +} + +#[stable(feature = "ip_addr", since = "1.7.0")] +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} + +#[stable(feature = "ip_addr", since = "1.7.0")] +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From for IpAddr { + /// Copies this address to a new `IpAddr::V4`. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr}; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// + /// assert_eq!( + /// IpAddr::V4(addr), + /// IpAddr::from(addr) + /// ) + /// ``` + #[inline] + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From for IpAddr { + /// Copies this address to a new `IpAddr::V6`. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); + /// + /// assert_eq!( + /// IpAddr::V6(addr), + /// IpAddr::from(addr) + /// ); + /// ``` + #[inline] + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + } else { + const LONGEST_IPV4_ADDR: &str = "255.255.255.255"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv4 address, so this should never fail. + write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap(); + + fmt.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq for IpAddr { + #[inline] + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq for Ipv4Addr { + #[inline] + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Ipv4Addr { + #[inline] + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd for IpAddr { + #[inline] + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd for Ipv4Addr { + #[inline] + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Ipv4Addr { + #[inline] + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + self.octets.cmp(&other.octets) + } +} + +#[stable(feature = "ip_u32", since = "1.1.0")] +impl From for u32 { + /// Converts an `Ipv4Addr` into a host byte order `u32`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78); + /// assert_eq!(0x12345678, u32::from(addr)); + /// ``` + #[inline] + fn from(ip: Ipv4Addr) -> u32 { + u32::from_be_bytes(ip.octets) + } +} + +#[stable(feature = "ip_u32", since = "1.1.0")] +impl From for Ipv4Addr { + /// Converts a host byte order `u32` into an `Ipv4Addr`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::from(0x12345678); + /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr); + /// ``` + #[inline] + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr { octets: ip.to_be_bytes() } + } +} + +#[stable(feature = "from_slice_v4", since = "1.9.0")] +impl From<[u8; 4]> for Ipv4Addr { + /// Creates an `Ipv4Addr` from a four element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]); + /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr); + /// ``` + #[inline] + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr { octets } + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u8; 4]> for IpAddr { + /// Creates an `IpAddr::V4` from a four element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr}; + /// + /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]); + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr); + /// ``` + #[inline] + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} + +impl Ipv6Addr { + /// Creates a new IPv6 address from eight 16-bit segments. + /// + /// The result will represent the IP address `a:b:c:d:e:f:g:h`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + // All elements in `addr16` are big endian. + // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`. + octets: unsafe { transmute::<_, [u8; 16]>(addr16) }, + } + } + + /// An IPv6 address representing localhost: `::1`. + /// + /// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other + /// languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::LOCALHOST; + /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + /// ``` + #[doc(alias = "IN6ADDR_LOOPBACK_INIT")] + #[doc(alias = "in6addr_loopback")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + /// An IPv6 address representing the unspecified address: `::` + /// + /// This corresponds to constant `IN6ADDR_ANY_INIT` or `in6addr_any` in other languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::UNSPECIFIED; + /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); + /// ``` + #[doc(alias = "IN6ADDR_ANY_INIT")] + #[doc(alias = "in6addr_any")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + + /// Returns the eight 16-bit segments that make up this address. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(), + /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn segments(&self) -> [u16; 8] { + // All elements in `self.octets` must be big endian. + // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`. + let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) }; + // We want native endian u16 + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + + /// Returns [`true`] for the special 'unspecified' address (`::`). + /// + /// This property is defined in [IETF RFC 4291]. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + + /// Returns [`true`] if this is the [loopback address] (`::1`), + /// as defined in [IETF RFC 4291 section 2.5.3]. + /// + /// Contrary to IPv4, in IPv6 there is only one loopback address. + /// + /// [loopback address]: Ipv6Addr::LOCALHOST + /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv6 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) + /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) + /// - IPv4-mapped addresses + /// - Addresses reserved for benchmarking + /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) + /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) + /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// + /// Note that an address having global scope is not the same as being globally reachable, + /// and there is no direct relation between the two concepts: There exist addresses with global scope + /// that are not globally reachable (for example unique local addresses), + /// and addresses that are globally reachable without having global scope + /// (multicast addresses with non-global scope). + /// + /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + /// [unspecified address]: Ipv6Addr::UNSPECIFIED + /// [loopback address]: Ipv6Addr::LOCALHOST + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // Most IPv6 addresses are globally reachable: + /// assert_eq!(Ipv6Addr::new(0x26, 0, 0x1c9, 0, 0, 0xafc8, 0x10, 0x1).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`::`) + /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_global(), false); + /// + /// // The loopback address (`::1`) + /// assert_eq!(Ipv6Addr::LOCALHOST.is_global(), false); + /// + /// // IPv4-mapped addresses (`::ffff:0:0/96`) + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), false); + /// + /// // Addresses reserved for benchmarking (`2001:2::/48`) + /// assert_eq!(Ipv6Addr::new(0x2001, 2, 0, 0, 0, 0, 0, 1,).is_global(), false); + /// + /// // Addresses reserved for documentation (`2001:db8::/32`) + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unique local addresses (`fc00::/7`) + /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unicast addresses with link-local scope (`fe80::/10`) + /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // For a complete overview see the IANA IPv6 Special-Purpose Address Registry. + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + !(self.is_unspecified() + || self.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(self.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(self.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(self.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(self.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(self.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || self.is_documentation() + || self.is_unique_local() + || self.is_unicast_link_local()) + } + + /// Returns [`true`] if this is a unique local address (`fc00::/7`). + /// + /// This property is defined in [IETF RFC 4193]. + /// + /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false); + /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + + /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291]. + /// Any address that is not a [multicast address] (`ff00::/8`) is unicast. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// [multicast address]: Ipv6Addr::is_multicast + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // The unspecified and loopback addresses are unicast. + /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true); + /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true); + /// + /// // Any address that is not a multicast address (`ff00::/8`) is unicast. + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true); + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + + /// Returns `true` if the address is a unicast address with link-local scope, + /// as defined in [RFC 4291]. + /// + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. + /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], + /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// + /// ```text + /// | 10 bits | 54 bits | 64 bits | + /// +----------+-------------------------+----------------------------+ + /// |1111111010| 0 | interface ID | + /// +----------+-------------------------+----------------------------+ + /// ``` + /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, + /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, + /// and those addresses will have link-local scope. + /// + /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", + /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// + /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 + /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 + /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6 + /// [loopback address]: Ipv6Addr::LOCALHOST + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // The loopback address (`::1`) does not actually have link-local scope. + /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false); + /// + /// // Only addresses in `fe80::/10` have link-local scope. + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false); + /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); + /// + /// // Addresses outside the stricter `fe80::/64` also have link-local scope. + /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true); + /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + + /// Returns [`true`] if this is an address reserved for documentation + /// (`2001:db8::/32`). + /// + /// This property is defined in [IETF RFC 3849]. + /// + /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false); + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + + /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`). + /// + /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`. + /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`. + /// + /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180 + /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752 + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false); + /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true); + /// ``` + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) + } + + /// Returns [`true`] if the address is a globally routable unicast address. + /// + /// The following return false: + /// + /// - the loopback address + /// - the link-local addresses + /// - unique local addresses + /// - the unspecified address + /// - the address range reserved for documentation + /// + /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7] + /// + /// ```no_rust + /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer + /// be supported in new implementations (i.e., new implementations must treat this prefix as + /// Global Unicast). + /// ``` + /// + /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() + && !self.is_loopback() + && !self.is_unicast_link_local() + && !self.is_unique_local() + && !self.is_unspecified() + && !self.is_documentation() + && !self.is_benchmarking() + } + + /// Returns the address's multicast scope if the address is multicast. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{Ipv6Addr, Ipv6MulticastScope}; + /// + /// assert_eq!( + /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(), + /// Some(Ipv6MulticastScope::Global) + /// ); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + + /// Returns [`true`] if this is a multicast address (`ff00::/8`). + /// + /// This property is defined by [IETF RFC 4291]. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + + /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address, + /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`]. + /// + /// `::ffff:a.b.c.d` becomes `a.b.c.d`. + /// All addresses *not* starting with `::ffff` will return `None`. + /// + /// [`IPv4` address]: Ipv4Addr + /// [IPv4-mapped]: Ipv6Addr + /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(), + /// Some(Ipv4Addr::new(192, 10, 2, 255))); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + + /// Converts this address to an [`IPv4` address] if it is either + /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1], + /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2], + /// otherwise returns [`None`]. + /// + /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use + /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. + /// + /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`. + /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`. + /// + /// [`IPv4` address]: Ipv4Addr + /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses + /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses + /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1 + /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(), + /// Some(Ipv4Addr::new(192, 10, 2, 255))); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(), + /// Some(Ipv4Addr::new(0, 0, 0, 1))); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + + /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it + /// returns self wrapped in an `IpAddr::V6`. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + + /// Returns the sixteen eight-bit integers the IPv6 address consists of. + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(), + /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "ipv6_to_octets", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn octets(&self) -> [u8; 16] { + self.octets + } +} + +/// Write an Ipv6Addr, conforming to the canonical style described by +/// [RFC 5952](https://tools.ietf.org/html/rfc5952). +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + + // Special case for :: and ::1; otherwise they get written with the + // IPv4 formatter + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + // IPv4 Compatible address + 0 => write!(f, "::{}", ipv4), + // IPv4 Mapped address + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + + // Find the inner 0 span + let zeroes = { + let mut longest = Span::default(); + let mut current = Span::default(); + + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { + current.start = i; + } + + current.len += 1; + + if current.len > longest.len { + longest = current; + } + } else { + current = Span::default(); + } + } + + longest + }; + + /// Write a colon-separated part of the address + #[inline] + fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { + fmt_subslice(f, &segments) + } + } + } else { + const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv6 address, so this should never fail. + write!(buf, "{}", self).unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq for Ipv6Addr { + #[inline] + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq for IpAddr { + #[inline] + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Ipv6Addr { + #[inline] + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd for IpAddr { + #[inline] + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd for Ipv6Addr { + #[inline] + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Ipv6Addr { + #[inline] + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} + +#[stable(feature = "i128", since = "1.26.0")] +impl From for u128 { + /// Convert an `Ipv6Addr` into a host byte order `u128`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::new( + /// 0x1020, 0x3040, 0x5060, 0x7080, + /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, + /// ); + /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr)); + /// ``` + #[inline] + fn from(ip: Ipv6Addr) -> u128 { + u128::from_be_bytes(ip.octets) + } +} +#[stable(feature = "i128", since = "1.26.0")] +impl From for Ipv6Addr { + /// Convert a host byte order `u128` into an `Ipv6Addr`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x1020, 0x3040, 0x5060, 0x7080, + /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, + /// ), + /// addr); + /// ``` + #[inline] + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} + +#[stable(feature = "ipv6_from_octets", since = "1.9.0")] +impl From<[u8; 16]> for Ipv6Addr { + /// Creates an `Ipv6Addr` from a sixteen element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from([ + /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, + /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, + /// ]); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x1918, 0x1716, + /// 0x1514, 0x1312, + /// 0x1110, 0x0f0e, + /// 0x0d0c, 0x0b0a + /// ), + /// addr + /// ); + /// ``` + #[inline] + fn from(octets: [u8; 16]) -> Ipv6Addr { + Ipv6Addr { octets } + } +} + +#[stable(feature = "ipv6_from_segments", since = "1.16.0")] +impl From<[u16; 8]> for Ipv6Addr { + /// Creates an `Ipv6Addr` from an eight element 16-bit array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from([ + /// 525u16, 524u16, 523u16, 522u16, + /// 521u16, 520u16, 519u16, 518u16, + /// ]); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x20d, 0x20c, + /// 0x20b, 0x20a, + /// 0x209, 0x208, + /// 0x207, 0x206 + /// ), + /// addr + /// ); + /// ``` + #[inline] + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u8; 16]> for IpAddr { + /// Creates an `IpAddr::V6` from a sixteen element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = IpAddr::from([ + /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, + /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, + /// ]); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new( + /// 0x1918, 0x1716, + /// 0x1514, 0x1312, + /// 0x1110, 0x0f0e, + /// 0x0d0c, 0x0b0a + /// )), + /// addr + /// ); + /// ``` + #[inline] + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u16; 8]> for IpAddr { + /// Creates an `IpAddr::V6` from an eight element 16-bit array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = IpAddr::from([ + /// 525u16, 524u16, 523u16, 522u16, + /// 521u16, 520u16, 519u16, 518u16, + /// ]); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new( + /// 0x20d, 0x20c, + /// 0x20b, 0x20a, + /// 0x209, 0x208, + /// 0x207, 0x206 + /// )), + /// addr + /// ); + /// ``` + #[inline] + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff --git a/library/core/src/net/mod.rs b/library/core/src/net/mod.rs new file mode 100644 index 000000000..31f5f5d3c --- /dev/null +++ b/library/core/src/net/mod.rs @@ -0,0 +1,24 @@ +//! Networking primitives for IP communication. +//! +//! This module provides types for IP and socket addresses. +//! +//! # Organization +//! +//! * [`IpAddr`] represents IP addresses of either IPv4 or IPv6; [`Ipv4Addr`] and +//! [`Ipv6Addr`] are respectively IPv4 and IPv6 addresses +//! * [`SocketAddr`] represents socket addresses of either IPv4 or IPv6; [`SocketAddrV4`] +//! and [`SocketAddrV6`] are respectively IPv4 and IPv6 socket addresses + +#![unstable(feature = "ip_in_core", issue = "108443")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::ip_addr::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::parser::AddrParseError; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::socket_addr::{SocketAddr, SocketAddrV4, SocketAddrV6}; + +mod display_buffer; +mod ip_addr; +mod parser; +mod socket_addr; diff --git a/library/core/src/net/parser.rs b/library/core/src/net/parser.rs new file mode 100644 index 000000000..a08d2792d --- /dev/null +++ b/library/core/src/net/parser.rs @@ -0,0 +1,498 @@ +//! A private parser implementation of IPv4, IPv6, and socket addresses. +//! +//! This module is "publicly exported" through the `FromStr` implementations +//! below. + +use crate::convert::TryInto; +use crate::error::Error; +use crate::fmt; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use crate::str::FromStr; + +trait ReadNumberHelper: crate::marker::Sized { + const ZERO: Self; + fn checked_mul(&self, other: u32) -> Option; + fn checked_add(&self, other: u32) -> Option; +} + +macro_rules! impl_helper { + ($($t:ty)*) => ($(impl ReadNumberHelper for $t { + const ZERO: Self = 0; + #[inline] + fn checked_mul(&self, other: u32) -> Option { + Self::checked_mul(*self, other.try_into().ok()?) + } + #[inline] + fn checked_add(&self, other: u32) -> Option { + Self::checked_add(*self, other.try_into().ok()?) + } + })*) +} + +impl_helper! { u8 u16 u32 } + +struct Parser<'a> { + // Parsing as ASCII, so can use byte array. + state: &'a [u8], +} + +impl<'a> Parser<'a> { + fn new(input: &'a [u8]) -> Parser<'a> { + Parser { state: input } + } + + /// Run a parser, and restore the pre-parse state if it fails. + fn read_atomically(&mut self, inner: F) -> Option + where + F: FnOnce(&mut Parser<'_>) -> Option, + { + let state = self.state; + let result = inner(self); + if result.is_none() { + self.state = state; + } + result + } + + /// Run a parser, but fail if the entire input wasn't consumed. + /// Doesn't run atomically. + fn parse_with(&mut self, inner: F, kind: AddrKind) -> Result + where + F: FnOnce(&mut Parser<'_>) -> Option, + { + let result = inner(self); + if self.state.is_empty() { result } else { None }.ok_or(AddrParseError(kind)) + } + + /// Peek the next character from the input + fn peek_char(&self) -> Option { + self.state.first().map(|&b| char::from(b)) + } + + /// Read the next character from the input + fn read_char(&mut self) -> Option { + self.state.split_first().map(|(&b, tail)| { + self.state = tail; + char::from(b) + }) + } + + #[must_use] + /// Read the next character from the input if it matches the target. + fn read_given_char(&mut self, target: char) -> Option<()> { + self.read_atomically(|p| { + p.read_char().and_then(|c| if c == target { Some(()) } else { None }) + }) + } + + /// Helper for reading separators in an indexed loop. Reads the separator + /// character iff index > 0, then runs the parser. When used in a loop, + /// the separator character will only be read on index > 0 (see + /// read_ipv4_addr for an example) + fn read_separator(&mut self, sep: char, index: usize, inner: F) -> Option + where + F: FnOnce(&mut Parser<'_>) -> Option, + { + self.read_atomically(move |p| { + if index > 0 { + p.read_given_char(sep)?; + } + inner(p) + }) + } + + // Read a number off the front of the input in the given radix, stopping + // at the first non-digit character or eof. Fails if the number has more + // digits than max_digits or if there is no number. + fn read_number( + &mut self, + radix: u32, + max_digits: Option, + allow_zero_prefix: bool, + ) -> Option { + self.read_atomically(move |p| { + let mut result = T::ZERO; + let mut digit_count = 0; + let has_leading_zero = p.peek_char() == Some('0'); + + while let Some(digit) = p.read_atomically(|p| p.read_char()?.to_digit(radix)) { + result = result.checked_mul(radix)?; + result = result.checked_add(digit)?; + digit_count += 1; + if let Some(max_digits) = max_digits { + if digit_count > max_digits { + return None; + } + } + } + + if digit_count == 0 { + None + } else if !allow_zero_prefix && has_leading_zero && digit_count > 1 { + None + } else { + Some(result) + } + }) + } + + /// Read an IPv4 address. + fn read_ipv4_addr(&mut self) -> Option { + self.read_atomically(|p| { + let mut groups = [0; 4]; + + for (i, slot) in groups.iter_mut().enumerate() { + *slot = p.read_separator('.', i, |p| { + // Disallow octal number in IP string. + // https://tools.ietf.org/html/rfc6943#section-3.1.1 + p.read_number(10, Some(3), false) + })?; + } + + Some(groups.into()) + }) + } + + /// Read an IPv6 Address. + fn read_ipv6_addr(&mut self) -> Option { + /// Read a chunk of an IPv6 address into `groups`. Returns the number + /// of groups read, along with a bool indicating if an embedded + /// trailing IPv4 address was read. Specifically, read a series of + /// colon-separated IPv6 groups (0x0000 - 0xFFFF), with an optional + /// trailing embedded IPv4 address. + fn read_groups(p: &mut Parser<'_>, groups: &mut [u16]) -> (usize, bool) { + let limit = groups.len(); + + for (i, slot) in groups.iter_mut().enumerate() { + // Try to read a trailing embedded IPv4 address. There must be + // at least two groups left. + if i < limit - 1 { + let ipv4 = p.read_separator(':', i, |p| p.read_ipv4_addr()); + + if let Some(v4_addr) = ipv4 { + let [one, two, three, four] = v4_addr.octets(); + groups[i + 0] = u16::from_be_bytes([one, two]); + groups[i + 1] = u16::from_be_bytes([three, four]); + return (i + 2, true); + } + } + + let group = p.read_separator(':', i, |p| p.read_number(16, Some(4), true)); + + match group { + Some(g) => *slot = g, + None => return (i, false), + } + } + (groups.len(), false) + } + + self.read_atomically(|p| { + // Read the front part of the address; either the whole thing, or up + // to the first :: + let mut head = [0; 8]; + let (head_size, head_ipv4) = read_groups(p, &mut head); + + if head_size == 8 { + return Some(head.into()); + } + + // IPv4 part is not allowed before `::` + if head_ipv4 { + return None; + } + + // Read `::` if previous code parsed less than 8 groups. + // `::` indicates one or more groups of 16 bits of zeros. + p.read_given_char(':')?; + p.read_given_char(':')?; + + // Read the back part of the address. The :: must contain at least one + // set of zeroes, so our max length is 7. + let mut tail = [0; 7]; + let limit = 8 - (head_size + 1); + let (tail_size, _) = read_groups(p, &mut tail[..limit]); + + // Concat the head and tail of the IP address + head[(8 - tail_size)..8].copy_from_slice(&tail[..tail_size]); + + Some(head.into()) + }) + } + + /// Read an IP Address, either IPv4 or IPv6. + fn read_ip_addr(&mut self) -> Option { + self.read_ipv4_addr().map(IpAddr::V4).or_else(move || self.read_ipv6_addr().map(IpAddr::V6)) + } + + /// Read a `:` followed by a port in base 10. + fn read_port(&mut self) -> Option { + self.read_atomically(|p| { + p.read_given_char(':')?; + p.read_number(10, None, true) + }) + } + + /// Read a `%` followed by a scope ID in base 10. + fn read_scope_id(&mut self) -> Option { + self.read_atomically(|p| { + p.read_given_char('%')?; + p.read_number(10, None, true) + }) + } + + /// Read an IPv4 address with a port. + fn read_socket_addr_v4(&mut self) -> Option { + self.read_atomically(|p| { + let ip = p.read_ipv4_addr()?; + let port = p.read_port()?; + Some(SocketAddrV4::new(ip, port)) + }) + } + + /// Read an IPv6 address with a port. + fn read_socket_addr_v6(&mut self) -> Option { + self.read_atomically(|p| { + p.read_given_char('[')?; + let ip = p.read_ipv6_addr()?; + let scope_id = p.read_scope_id().unwrap_or(0); + p.read_given_char(']')?; + + let port = p.read_port()?; + Some(SocketAddrV6::new(ip, port, 0, scope_id)) + }) + } + + /// Read an IP address with a port + fn read_socket_addr(&mut self) -> Option { + self.read_socket_addr_v4() + .map(SocketAddr::V4) + .or_else(|| self.read_socket_addr_v6().map(SocketAddr::V6)) + } +} + +impl IpAddr { + /// Parse an IP address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + /// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + /// + /// assert_eq!(IpAddr::parse_ascii(b"127.0.0.1"), Ok(localhost_v4)); + /// assert_eq!(IpAddr::parse_ascii(b"::1"), Ok(localhost_v6)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + Parser::new(b).parse_with(|p| p.read_ip_addr(), AddrKind::Ip) + } +} + +#[stable(feature = "ip_addr", since = "1.7.0")] +impl FromStr for IpAddr { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +impl Ipv4Addr { + /// Parse an IPv4 address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::Ipv4Addr; + /// + /// let localhost = Ipv4Addr::new(127, 0, 0, 1); + /// + /// assert_eq!(Ipv4Addr::parse_ascii(b"127.0.0.1"), Ok(localhost)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + // don't try to parse if too long + if b.len() > 15 { + Err(AddrParseError(AddrKind::Ipv4)) + } else { + Parser::new(b).parse_with(|p| p.read_ipv4_addr(), AddrKind::Ipv4) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl FromStr for Ipv4Addr { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +impl Ipv6Addr { + /// Parse an IPv6 address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::Ipv6Addr; + /// + /// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + /// + /// assert_eq!(Ipv6Addr::parse_ascii(b"::1"), Ok(localhost)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + Parser::new(b).parse_with(|p| p.read_ipv6_addr(), AddrKind::Ipv6) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl FromStr for Ipv6Addr { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +impl SocketAddrV4 { + /// Parse an IPv4 socket address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::{Ipv4Addr, SocketAddrV4}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// + /// assert_eq!(SocketAddrV4::parse_ascii(b"127.0.0.1:8080"), Ok(socket)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + Parser::new(b).parse_with(|p| p.read_socket_addr_v4(), AddrKind::SocketV4) + } +} + +#[stable(feature = "socket_addr_from_str", since = "1.5.0")] +impl FromStr for SocketAddrV4 { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +impl SocketAddrV6 { + /// Parse an IPv6 socket address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::{Ipv6Addr, SocketAddrV6}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// + /// assert_eq!(SocketAddrV6::parse_ascii(b"[2001:db8::1]:8080"), Ok(socket)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + Parser::new(b).parse_with(|p| p.read_socket_addr_v6(), AddrKind::SocketV6) + } +} + +#[stable(feature = "socket_addr_from_str", since = "1.5.0")] +impl FromStr for SocketAddrV6 { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +impl SocketAddr { + /// Parse a socket address from a slice of bytes. + /// + /// ``` + /// #![feature(addr_parse_ascii)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + /// + /// let socket_v4 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// let socket_v6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080); + /// + /// assert_eq!(SocketAddr::parse_ascii(b"127.0.0.1:8080"), Ok(socket_v4)); + /// assert_eq!(SocketAddr::parse_ascii(b"[::1]:8080"), Ok(socket_v6)); + /// ``` + #[unstable(feature = "addr_parse_ascii", issue = "101035")] + pub fn parse_ascii(b: &[u8]) -> Result { + Parser::new(b).parse_with(|p| p.read_socket_addr(), AddrKind::Socket) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl FromStr for SocketAddr { + type Err = AddrParseError; + fn from_str(s: &str) -> Result { + Self::parse_ascii(s.as_bytes()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum AddrKind { + Ip, + Ipv4, + Ipv6, + Socket, + SocketV4, + SocketV6, +} + +/// An error which can be returned when parsing an IP address or a socket address. +/// +/// This error is used as the error type for the [`FromStr`] implementation for +/// [`IpAddr`], [`Ipv4Addr`], [`Ipv6Addr`], [`SocketAddr`], [`SocketAddrV4`], and +/// [`SocketAddrV6`]. +/// +/// # Potential causes +/// +/// `AddrParseError` may be thrown because the provided string does not parse as the given type, +/// often because it includes information only handled by a different address type. +/// +/// ```should_panic +/// use std::net::IpAddr; +/// let _foo: IpAddr = "127.0.0.1:8080".parse().expect("Cannot handle the socket port"); +/// ``` +/// +/// [`IpAddr`] doesn't handle the port. Use [`SocketAddr`] instead. +/// +/// ``` +/// use std::net::SocketAddr; +/// +/// // No problem, the `panic!` message has disappeared. +/// let _foo: SocketAddr = "127.0.0.1:8080".parse().expect("unreachable panic"); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AddrParseError(AddrKind); + +#[stable(feature = "addr_parse_error_error", since = "1.4.0")] +impl fmt::Display for AddrParseError { + #[allow(deprecated, deprecated_in_future)] + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str(self.description()) + } +} + +#[stable(feature = "addr_parse_error_error", since = "1.4.0")] +impl Error for AddrParseError { + #[allow(deprecated)] + fn description(&self) -> &str { + match self.0 { + AddrKind::Ip => "invalid IP address syntax", + AddrKind::Ipv4 => "invalid IPv4 address syntax", + AddrKind::Ipv6 => "invalid IPv6 address syntax", + AddrKind::Socket => "invalid socket address syntax", + AddrKind::SocketV4 => "invalid IPv4 socket address syntax", + AddrKind::SocketV6 => "invalid IPv6 socket address syntax", + } + } +} diff --git a/library/core/src/net/socket_addr.rs b/library/core/src/net/socket_addr.rs new file mode 100644 index 000000000..2d48e2715 --- /dev/null +++ b/library/core/src/net/socket_addr.rs @@ -0,0 +1,664 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write}; +use crate::hash; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + +use super::display_buffer::DisplayBuffer; + +/// An internet socket address, either IPv4 or IPv6. +/// +/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well +/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and +/// [`SocketAddrV6`]'s respective documentation for more details. +/// +/// The size of a `SocketAddr` instance may vary depending on the target operating +/// system. +/// +/// [IP address]: IpAddr +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +/// +/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); +/// +/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); +/// assert_eq!(socket.port(), 8080); +/// assert_eq!(socket.is_ipv4(), true); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[stable(feature = "rust1", since = "1.0.0")] +pub enum SocketAddr { + /// An IPv4 socket address. + #[stable(feature = "rust1", since = "1.0.0")] + V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4), + /// An IPv6 socket address. + #[stable(feature = "rust1", since = "1.0.0")] + V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6), +} + +/// An IPv4 socket address. +/// +/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as +/// stated in [IETF RFC 793]. +/// +/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. +/// +/// The size of a `SocketAddrV4` struct may vary depending on the target operating +/// system. Do not assume that this type has the same memory layout as the underlying +/// system representation. +/// +/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793 +/// [`IPv4` address]: Ipv4Addr +/// +/// # Examples +/// +/// ``` +/// use std::net::{Ipv4Addr, SocketAddrV4}; +/// +/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); +/// +/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); +/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); +/// assert_eq!(socket.port(), 8080); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct SocketAddrV4 { + ip: Ipv4Addr, + port: u16, +} + +/// An IPv6 socket address. +/// +/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well +/// as fields containing the traffic class, the flow label, and a scope identifier +/// (see [IETF RFC 2553, Section 3.3] for more details). +/// +/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. +/// +/// The size of a `SocketAddrV6` struct may vary depending on the target operating +/// system. Do not assume that this type has the same memory layout as the underlying +/// system representation. +/// +/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 +/// [`IPv6` address]: Ipv6Addr +/// +/// # Examples +/// +/// ``` +/// use std::net::{Ipv6Addr, SocketAddrV6}; +/// +/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0); +/// +/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket)); +/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)); +/// assert_eq!(socket.port(), 8080); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct SocketAddrV6 { + ip: Ipv6Addr, + port: u16, + flowinfo: u32, + scope_id: u32, +} + +impl SocketAddr { + /// Creates a new socket address from an [IP address] and a port number. + /// + /// [IP address]: IpAddr + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[stable(feature = "ip_addr", since = "1.7.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: IpAddr, port: u16) -> SocketAddr { + match ip { + IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)), + IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)), + } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + /// ``` + #[must_use] + #[stable(feature = "ip_addr", since = "1.7.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> IpAddr { + match *self { + SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()), + SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()), + } + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: IpAddr) { + // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away. + match (self, new_ip) { + (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip), + (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip), + (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()), + } + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + match *self { + SocketAddr::V4(ref a) => a.port(), + SocketAddr::V6(ref a) => a.port(), + } + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// socket.set_port(1025); + /// assert_eq!(socket.port(), 1025); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + match *self { + SocketAddr::V4(ref mut a) => a.set_port(new_port), + SocketAddr::V6(ref mut a) => a.set_port(new_port), + } + } + + /// Returns [`true`] if the [IP address] in this `SocketAddr` is an + /// [`IPv4` address], and [`false`] otherwise. + /// + /// [IP address]: IpAddr + /// [`IPv4` address]: IpAddr::V4 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.is_ipv4(), true); + /// assert_eq!(socket.is_ipv6(), false); + /// ``` + #[must_use] + #[stable(feature = "sockaddr_checker", since = "1.16.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn is_ipv4(&self) -> bool { + matches!(*self, SocketAddr::V4(_)) + } + + /// Returns [`true`] if the [IP address] in this `SocketAddr` is an + /// [`IPv6` address], and [`false`] otherwise. + /// + /// [IP address]: IpAddr + /// [`IPv6` address]: IpAddr::V6 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080); + /// assert_eq!(socket.is_ipv4(), false); + /// assert_eq!(socket.is_ipv6(), true); + /// ``` + #[must_use] + #[stable(feature = "sockaddr_checker", since = "1.16.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn is_ipv6(&self) -> bool { + matches!(*self, SocketAddr::V6(_)) + } +} + +impl SocketAddrV4 { + /// Creates a new socket address from an [`IPv4` address] and a port number. + /// + /// [`IPv4` address]: Ipv4Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 { + SocketAddrV4 { ip, port } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> &Ipv4Addr { + &self.ip + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1)); + /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1)); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: Ipv4Addr) { + self.ip = new_ip; + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + self.port + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// socket.set_port(4242); + /// assert_eq!(socket.port(), 4242); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + self.port = new_port; + } +} + +impl SocketAddrV6 { + /// Creates a new socket address from an [`IPv6` address], a 16-bit port number, + /// and the `flowinfo` and `scope_id` fields. + /// + /// For more information on the meaning and layout of the `flowinfo` and `scope_id` + /// parameters, see [IETF RFC 2553, Section 3.3]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 { + SocketAddrV6 { ip, port, flowinfo, scope_id } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> &Ipv6Addr { + &self.ip + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); + /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: Ipv6Addr) { + self.ip = new_ip; + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + self.port + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// socket.set_port(4242); + /// assert_eq!(socket.port(), 4242); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + self.port = new_port; + } + + /// Returns the flow information associated with this address. + /// + /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`, + /// as specified in [IETF RFC 2553, Section 3.3]. + /// It combines information about the flow label and the traffic class as specified + /// in [IETF RFC 2460], respectively [Section 6] and [Section 7]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460 + /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6 + /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7 + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); + /// assert_eq!(socket.flowinfo(), 10); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn flowinfo(&self) -> u32 { + self.flowinfo + } + + /// Changes the flow information associated with this socket address. + /// + /// See [`SocketAddrV6::flowinfo`]'s documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); + /// socket.set_flowinfo(56); + /// assert_eq!(socket.flowinfo(), 56); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_flowinfo(&mut self, new_flowinfo: u32) { + self.flowinfo = new_flowinfo; + } + + /// Returns the scope ID associated with this address. + /// + /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`, + /// as specified in [IETF RFC 2553, Section 3.3]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); + /// assert_eq!(socket.scope_id(), 78); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn scope_id(&self) -> u32 { + self.scope_id + } + + /// Changes the scope ID associated with this socket address. + /// + /// See [`SocketAddrV6::scope_id`]'s documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); + /// socket.set_scope_id(42); + /// assert_eq!(socket.scope_id(), 42); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_scope_id(&mut self, new_scope_id: u32) { + self.scope_id = new_scope_id; + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From for SocketAddr { + /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`]. + fn from(sock4: SocketAddrV4) -> SocketAddr { + SocketAddr::V4(sock4) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From for SocketAddr { + /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`]. + fn from(sock6: SocketAddrV6) -> SocketAddr { + SocketAddr::V6(sock6) + } +} + +#[stable(feature = "addr_from_into_ip", since = "1.17.0")] +impl> From<(I, u16)> for SocketAddr { + /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`]. + /// + /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`] + /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`]. + /// + /// `u16` is treated as port of the newly created [`SocketAddr`]. + fn from(pieces: (I, u16)) -> SocketAddr { + SocketAddr::new(pieces.0.into(), pieces.1) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + SocketAddr::V4(ref a) => a.fmt(f), + SocketAddr::V6(ref a) => a.fmt(f), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddrV4 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the socket address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + write!(f, "{}:{}", self.ip(), self.port()) + } else { + const LONGEST_IPV4_SOCKET_ADDR: &str = "255.255.255.255:65536"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV4_SOCKET_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv4 socket address, so this should never fail. + write!(buf, "{}:{}", self.ip(), self.port()).unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddrV4 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddrV6 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the socket address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + match self.scope_id() { + 0 => write!(f, "[{}]:{}", self.ip(), self.port()), + scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()), + } + } else { + const LONGEST_IPV6_SOCKET_ADDR: &str = + "[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%4294967296]:65536"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV6_SOCKET_ADDR.len() }>::new(); + match self.scope_id() { + 0 => write!(buf, "[{}]:{}", self.ip(), self.port()), + scope_id => write!(buf, "[{}%{}]:{}", self.ip(), scope_id, self.port()), + } + // Buffer is long enough for the longest possible IPv6 socket address, so this should never fail. + .unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddrV6 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl PartialOrd for SocketAddrV4 { + fn partial_cmp(&self, other: &SocketAddrV4) -> Option { + Some(self.cmp(other)) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl PartialOrd for SocketAddrV6 { + fn partial_cmp(&self, other: &SocketAddrV6) -> Option { + Some(self.cmp(other)) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl Ord for SocketAddrV4 { + fn cmp(&self, other: &SocketAddrV4) -> Ordering { + self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl Ord for SocketAddrV6 { + fn cmp(&self, other: &SocketAddrV6) -> Ordering { + self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl hash::Hash for SocketAddrV4 { + fn hash(&self, s: &mut H) { + (self.port, self.ip).hash(s) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl hash::Hash for SocketAddrV6 { + fn hash(&self, s: &mut H) { + (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s) + } +} diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs index a888ced49..f8d493e8b 100644 --- a/library/core/src/num/dec2flt/mod.rs +++ b/library/core/src/num/dec2flt/mod.rs @@ -75,6 +75,7 @@ issue = "none" )] +use crate::error::Error; use crate::fmt; use crate::str::FromStr; @@ -182,15 +183,10 @@ enum FloatErrorKind { Invalid, } -impl ParseFloatError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[stable(feature = "rust1", since = "1.0.0")] +impl Error for ParseFloatError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FloatErrorKind::Empty => "cannot parse float from empty string", FloatErrorKind::Invalid => "invalid float literal", @@ -201,7 +197,8 @@ impl ParseFloatError { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseFloatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs index 768dd8781..1bae4efe7 100644 --- a/library/core/src/num/error.rs +++ b/library/core/src/num/error.rs @@ -9,23 +9,19 @@ use crate::fmt; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TryFromIntError(pub(crate) ()); -impl TryFromIntError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - "out of range integral type conversion attempted" +#[stable(feature = "try_from", since = "1.34.0")] +impl fmt::Display for TryFromIntError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + #[allow(deprecated)] + self.description().fmt(fmt) } } #[stable(feature = "try_from", since = "1.34.0")] -impl fmt::Display for TryFromIntError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(fmt) +impl Error for TryFromIntError { + #[allow(deprecated)] + fn description(&self) -> &str { + "out of range integral type conversion attempted" } } @@ -121,28 +117,13 @@ impl ParseIntError { pub fn kind(&self) -> &IntErrorKind { &self.kind } - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - match self.kind { - IntErrorKind::Empty => "cannot parse integer from empty string", - IntErrorKind::InvalidDigit => "invalid digit found in string", - IntErrorKind::PosOverflow => "number too large to fit in target type", - IntErrorKind::NegOverflow => "number too small to fit in target type", - IntErrorKind::Zero => "number would be zero for non-zero type", - } - } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseIntError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -150,14 +131,12 @@ impl fmt::Display for ParseIntError { impl Error for ParseIntError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for TryFromIntError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() + match self.kind { + IntErrorKind::Empty => "cannot parse integer from empty string", + IntErrorKind::InvalidDigit => "invalid digit found in string", + IntErrorKind::PosOverflow => "number too large to fit in target type", + IntErrorKind::NegOverflow => "number too small to fit in target type", + IntErrorKind::Zero => "number would be zero for non-zero type", + } } } diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs index 80472528f..0ce31b40a 100644 --- a/library/core/src/num/int_log10.rs +++ b/library/core/src/num/int_log10.rs @@ -138,3 +138,11 @@ pub const fn i64(val: i64) -> u32 { pub const fn i128(val: i128) -> u32 { u128(val as u128) } + +/// Instantiate this panic logic once, rather than for all the ilog methods +/// on every single primitive type. +#[cold] +#[track_caller] +pub const fn panic_for_nonpositive_argument() -> ! { + panic!("argument of integer logarithm must be positive") +} diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 2cae98b8e..aec15212d 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1,11 +1,31 @@ macro_rules! int_impl { - ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $BITS_MINUS_ONE:expr, $Min:expr, $Max:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + UnsignedT = $UnsignedT:ty, + + // There are all for use *only* in doc comments. + // As such, they're all passed as literals -- passing them as a string + // literal is fine if they need to be multiple code tokens. + // In non-comments, use the associated constants rather than these. + BITS = $BITS:literal, + BITS_MINUS_ONE = $BITS_MINUS_ONE:literal, + Min = $Min:literal, + Max = $Max:literal, + rot = $rot:literal, + rot_op = $rot_op:literal, + rot_result = $rot_result:literal, + swap_op = $swap_op:literal, + swapped = $swapped:literal, + reversed = $reversed:literal, + le_bytes = $le_bytes:literal, + be_bytes = $be_bytes:literal, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:literal, + ) => { /// The smallest value that can be represented by this integer type - #[doc = concat!("(−2", $BITS_MINUS_ONE, "", $bound_condition, ")")] + #[doc = concat!("(−2", $BITS_MINUS_ONE, "", $bound_condition, ").")] /// /// # Examples /// @@ -15,10 +35,10 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN, ", stringify!($Min), ");")] /// ``` #[stable(feature = "assoc_int_consts", since = "1.43.0")] - pub const MIN: Self = !0 ^ ((!0 as $UnsignedT) >> 1) as Self; + pub const MIN: Self = !Self::MAX; /// The largest value that can be represented by this integer type - #[doc = concat!("(2", $BITS_MINUS_ONE, " − 1", $bound_condition, ")")] + #[doc = concat!("(2", $BITS_MINUS_ONE, " − 1", $bound_condition, ").")] /// /// # Examples /// @@ -28,7 +48,7 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($Max), ");")] /// ``` #[stable(feature = "assoc_int_consts", since = "1.43.0")] - pub const MAX: Self = !Self::MIN; + pub const MAX: Self = (<$UnsignedT>::MAX >> 1) as Self; /// The size of this integer type in bits. /// @@ -38,7 +58,7 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")] /// ``` #[stable(feature = "int_bits_const", since = "1.53.0")] - pub const BITS: u32 = $BITS; + pub const BITS: u32 = <$UnsignedT>::BITS; /// Converts a string slice in a given base to an integer. /// @@ -1365,7 +1385,7 @@ macro_rules! int_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shl(rhs & ($BITS - 1)) + self.unchecked_shl(rhs & (Self::BITS - 1)) } } @@ -1395,7 +1415,7 @@ macro_rules! int_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shr(rhs & ($BITS - 1)) + self.unchecked_shr(rhs & (Self::BITS - 1)) } } @@ -1901,7 +1921,7 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shl(rhs), rhs >= Self::BITS) } /// Shifts self right by `rhs` bits. @@ -1924,7 +1944,7 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shr(rhs), rhs >= Self::BITS) } /// Computes the absolute value of `self`. @@ -2331,14 +2351,17 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -2354,13 +2377,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -2376,13 +2402,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -2574,12 +2603,13 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_cmp)] pub const fn signum(self) -> Self { - match self { - n if n > 0 => 1, - 0 => 0, - _ => -1, - } + // Picking the right way to phrase this is complicated + // () + // so delegate it to `Ord` which is already producing -1/0/+1 + // exactly like we need and can be the place to deal with the complexity. + self.cmp(&0) as _ } /// Returns `true` if `self` is positive and `false` if the number is zero or diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index ac7f579eb..a50c91579 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -9,9 +9,6 @@ use crate::mem; use crate::ops::{Add, Mul, Sub}; use crate::str::FromStr; -#[cfg(not(no_fp_fmt_parse))] -use crate::error::Error; - // Used because the `?` operator is not allowed in a const context. macro_rules! try_opt { ($e:expr) => { @@ -61,15 +58,6 @@ pub use wrapping::Wrapping; #[cfg(not(no_fp_fmt_parse))] pub use dec2flt::ParseFloatError; -#[cfg(not(no_fp_fmt_parse))] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for ParseFloatError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "rust1", since = "1.0.0")] pub use error::ParseIntError; @@ -238,72 +226,217 @@ macro_rules! widening_impl { } impl i8 { - int_impl! { i8, i8, u8, 8, 7, -128, 127, 2, "-0x7e", "0xa", "0x12", "0x12", "0x48", - "[0x12]", "[0x12]", "", "", "" } + int_impl! { + Self = i8, + ActualT = i8, + UnsignedT = u8, + BITS = 8, + BITS_MINUS_ONE = 7, + Min = -128, + Max = 127, + rot = 2, + rot_op = "-0x7e", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i16 { - int_impl! { i16, i16, u16, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", "0x3412", - "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + int_impl! { + Self = i16, + ActualT = i16, + UnsignedT = u16, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i32 { - int_impl! { i32, i32, u32, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + int_impl! { + Self = i32, + ActualT = i32, + UnsignedT = u32, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i64 { - int_impl! { i64, i64, u64, 64, 63, -9223372036854775808, 9223372036854775807, 12, - "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", "", "", "" } + int_impl! { + Self = i64, + ActualT = i64, + UnsignedT = u64, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i128 { - int_impl! { i128, i128, u128, 128, 127, -170141183460469231731687303715884105728, - 170141183460469231731687303715884105727, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", "", "", "" } + int_impl! { + Self = i128, + ActualT = i128, + UnsignedT = u128, + BITS = 128, + BITS_MINUS_ONE = 127, + Min = -170141183460469231731687303715884105728, + Max = 170141183460469231731687303715884105727, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl isize { - int_impl! { isize, i16, usize, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", - "0x3412", "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + int_impl! { + Self = isize, + ActualT = i16, + UnsignedT = usize, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } } #[cfg(target_pointer_width = "32")] impl isize { - int_impl! { isize, i32, usize, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + int_impl! { + Self = isize, + ActualT = i32, + UnsignedT = usize, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } } #[cfg(target_pointer_width = "64")] impl isize { - int_impl! { isize, i64, usize, 64, 63, -9223372036854775808, 9223372036854775807, - 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + int_impl! { + Self = isize, + ActualT = i64, + UnsignedT = usize, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } } /// If 6th bit set ascii is upper case. const ASCII_CASE_MASK: u8 = 0b0010_0000; impl u8 { - uint_impl! { u8, u8, i8, NonZeroU8, 8, 255, 2, "0x82", "0xa", "0x12", "0x12", "0x48", "[0x12]", - "[0x12]", "", "", "" } + uint_impl! { + Self = u8, + ActualT = u8, + SignedT = i8, + NonZeroT = NonZeroU8, + BITS = 8, + MAX = 255, + rot = 2, + rot_op = "0x82", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u8, u16, 8, unsigned } /// Checks if the value is within the ASCII range. @@ -887,8 +1020,25 @@ impl u8 { } impl u16 { - uint_impl! { u16, u16, i16, NonZeroU16, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + uint_impl! { + Self = u16, + ActualT = u16, + SignedT = i16, + NonZeroT = NonZeroU16, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u16, u32, 16, unsigned } /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`]. @@ -918,56 +1068,144 @@ impl u16 { } impl u32 { - uint_impl! { u32, u32, i32, NonZeroU32, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + uint_impl! { + Self = u32, + ActualT = u32, + SignedT = i32, + NonZeroT = NonZeroU32, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u32, u64, 32, unsigned } } impl u64 { - uint_impl! { u64, u64, i64, NonZeroU64, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - "", "", ""} + uint_impl! { + Self = u64, + ActualT = u64, + SignedT = i64, + NonZeroT = NonZeroU64, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u64, u128, 64, unsigned } } impl u128 { - uint_impl! { u128, u128, i128, NonZeroU128, 128, 340282366920938463463374607431768211455, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", - "", "", ""} + uint_impl! { + Self = u128, + ActualT = u128, + SignedT = i128, + NonZeroT = NonZeroU128, + BITS = 128, + MAX = 340282366920938463463374607431768211455, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl usize { - uint_impl! { usize, u16, isize, NonZeroUsize, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + uint_impl! { + Self = usize, + ActualT = u16, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } widening_impl! { usize, u32, 16, unsigned } } + #[cfg(target_pointer_width = "32")] impl usize { - uint_impl! { usize, u32, isize, NonZeroUsize, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + uint_impl! { + Self = usize, + ActualT = u32, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } widening_impl! { usize, u64, 32, unsigned } } #[cfg(target_pointer_width = "64")] impl usize { - uint_impl! { usize, u64, isize, NonZeroUsize, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + uint_impl! { + Self = usize, + ActualT = u64, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } widening_impl! { usize, u128, 64, unsigned } } diff --git a/library/core/src/num/shells/i128.rs b/library/core/src/num/shells/i128.rs index 7b048dc52..b3b3d3b48 100644 --- a/library/core/src/num/shells/i128.rs +++ b/library/core/src/num/shells/i128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit signed integer type. -//! -//! *[See also the `i128` primitive type][i128].* +//! Redundant constants module for the [`i128` primitive type][i128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i16.rs b/library/core/src/num/shells/i16.rs index 5c5812d5c..70a452e19 100644 --- a/library/core/src/num/shells/i16.rs +++ b/library/core/src/num/shells/i16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit signed integer type. -//! -//! *[See also the `i16` primitive type][i16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i32.rs b/library/core/src/num/shells/i32.rs index b283ac644..c30849e25 100644 --- a/library/core/src/num/shells/i32.rs +++ b/library/core/src/num/shells/i32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit signed integer type. -//! -//! *[See also the `i32` primitive type][i32].* +//! Redundant constants module for the [`i32` primitive type][i32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i64.rs b/library/core/src/num/shells/i64.rs index a416fa7e9..77d95d712 100644 --- a/library/core/src/num/shells/i64.rs +++ b/library/core/src/num/shells/i64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit signed integer type. -//! -//! *[See also the `i64` primitive type][i64].* +//! Redundant constants module for the [`i64` primitive type][i64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i8.rs b/library/core/src/num/shells/i8.rs index 02465013a..516ba8cde 100644 --- a/library/core/src/num/shells/i8.rs +++ b/library/core/src/num/shells/i8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit signed integer type. -//! -//! *[See also the `i8` primitive type][i8].* +//! Redundant constants module for the [`i8` primitive type][i8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/isize.rs b/library/core/src/num/shells/isize.rs index 1579fbab6..828f7345b 100644 --- a/library/core/src/num/shells/isize.rs +++ b/library/core/src/num/shells/isize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized signed integer type. -//! -//! *[See also the `isize` primitive type][isize].* +//! Redundant constants module for the [`isize` primitive type][isize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u128.rs b/library/core/src/num/shells/u128.rs index fe08cee58..b1e30e384 100644 --- a/library/core/src/num/shells/u128.rs +++ b/library/core/src/num/shells/u128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit unsigned integer type. -//! -//! *[See also the `u128` primitive type][u128].* +//! Redundant constants module for the [`u128` primitive type][u128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u16.rs b/library/core/src/num/shells/u16.rs index 36f8c6978..b203806f4 100644 --- a/library/core/src/num/shells/u16.rs +++ b/library/core/src/num/shells/u16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit unsigned integer type. -//! -//! *[See also the `u16` primitive type][u16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u32.rs b/library/core/src/num/shells/u32.rs index 1c369097d..4c84274e7 100644 --- a/library/core/src/num/shells/u32.rs +++ b/library/core/src/num/shells/u32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit unsigned integer type. -//! -//! *[See also the `u32` primitive type][u32].* +//! Redundant constants module for the [`u32` primitive type][u32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u64.rs b/library/core/src/num/shells/u64.rs index e8b691d15..47a95c682 100644 --- a/library/core/src/num/shells/u64.rs +++ b/library/core/src/num/shells/u64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit unsigned integer type. -//! -//! *[See also the `u64` primitive type][u64].* +//! Redundant constants module for the [`u64` primitive type][u64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u8.rs b/library/core/src/num/shells/u8.rs index 817c6a18a..360baef72 100644 --- a/library/core/src/num/shells/u8.rs +++ b/library/core/src/num/shells/u8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit unsigned integer type. -//! -//! *[See also the `u8` primitive type][u8].* +//! Redundant constants module for the [`u8` primitive type][u8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/usize.rs b/library/core/src/num/shells/usize.rs index 3e1bec5ec..44c24dfc2 100644 --- a/library/core/src/num/shells/usize.rs +++ b/library/core/src/num/shells/usize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized unsigned integer type. -//! -//! *[See also the `usize` primitive type][usize].* +//! Redundant constants module for the [`usize` primitive type][usize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 1c97c4686..932038a0b 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1,10 +1,28 @@ macro_rules! uint_impl { - ($SelfT:ty, $ActualT:ident, $SignedT:ident, $NonZeroT:ident, - $BITS:expr, $MaxV:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + SignedT = $SignedT:ident, + NonZeroT = $NonZeroT:ident, + + // There are all for use *only* in doc comments. + // As such, they're all passed as literals -- passing them as a string + // literal is fine if they need to be multiple code tokens. + // In non-comments, use the associated constants rather than these. + BITS = $BITS:literal, + MAX = $MaxV:literal, + rot = $rot:literal, + rot_op = $rot_op:literal, + rot_result = $rot_result:literal, + swap_op = $swap_op:literal, + swapped = $swapped:literal, + reversed = $reversed:literal, + le_bytes = $le_bytes:literal, + be_bytes = $be_bytes:literal, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:literal, + ) => { /// The smallest value that can be represented by this integer type. /// /// # Examples @@ -18,7 +36,7 @@ macro_rules! uint_impl { pub const MIN: Self = 0; /// The largest value that can be represented by this integer type - #[doc = concat!("(2", $BITS, " − 1", $bound_condition, ")")] + #[doc = concat!("(2", $BITS, " − 1", $bound_condition, ").")] /// /// # Examples /// @@ -38,7 +56,7 @@ macro_rules! uint_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")] /// ``` #[stable(feature = "int_bits_const", since = "1.53.0")] - pub const BITS: u32 = $BITS; + pub const BITS: u32 = Self::MAX.count_ones(); /// Converts a string slice in a given base to an integer. /// @@ -705,14 +723,17 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -728,13 +749,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -750,13 +774,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -1381,7 +1408,7 @@ macro_rules! uint_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shl(rhs & ($BITS - 1)) + self.unchecked_shl(rhs & (Self::BITS - 1)) } } @@ -1414,7 +1441,7 @@ macro_rules! uint_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shr(rhs & ($BITS - 1)) + self.unchecked_shr(rhs & (Self::BITS - 1)) } } @@ -1838,7 +1865,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shl(rhs), rhs >= Self::BITS) } /// Shifts self right by `rhs` bits. @@ -1863,7 +1890,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shr(rhs), rhs >= Self::BITS) } /// Raises self to the power of `exp`, using exponentiation by squaring. diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs index 75c52d3ec..0c7ee9630 100644 --- a/library/core/src/ops/arith.rs +++ b/library/core/src/ops/arith.rs @@ -86,7 +86,8 @@ pub trait Add { /// ``` /// assert_eq!(12 + 1, 13); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "add"] #[stable(feature = "rust1", since = "1.0.0")] fn add(self, rhs: Rhs) -> Self::Output; } @@ -195,7 +196,8 @@ pub trait Sub { /// ``` /// assert_eq!(12 - 1, 11); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "sub"] #[stable(feature = "rust1", since = "1.0.0")] fn sub(self, rhs: Rhs) -> Self::Output; } @@ -325,7 +327,8 @@ pub trait Mul { /// ``` /// assert_eq!(12 * 2, 24); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "mul"] #[stable(feature = "rust1", since = "1.0.0")] fn mul(self, rhs: Rhs) -> Self::Output; } @@ -459,7 +462,8 @@ pub trait Div { /// ``` /// assert_eq!(12 / 2, 6); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "div"] #[stable(feature = "rust1", since = "1.0.0")] fn div(self, rhs: Rhs) -> Self::Output; } @@ -545,7 +549,7 @@ div_impl_float! { f32 f64 } #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( - message = "cannot mod `{Self}` by `{Rhs}`", + message = "cannot calculate the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} % {Rhs}`" )] #[doc(alias = "%")] @@ -562,7 +566,8 @@ pub trait Rem { /// ``` /// assert_eq!(12 % 10, 2); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "rem"] #[stable(feature = "rust1", since = "1.0.0")] fn rem(self, rhs: Rhs) -> Self::Output; } @@ -678,7 +683,8 @@ pub trait Neg { /// let x: i32 = 12; /// assert_eq!(-x, -12); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "neg"] #[stable(feature = "rust1", since = "1.0.0")] fn neg(self) -> Self::Output; } @@ -981,7 +987,7 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } #[lang = "rem_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] #[rustc_on_unimplemented( - message = "cannot mod-assign `{Self}` by `{Rhs}``", + message = "cannot calculate and assign the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} %= {Rhs}`" )] #[doc(alias = "%")] diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs index cd183540c..117706fb4 100644 --- a/library/core/src/ops/control_flow.rs +++ b/library/core/src/ops/control_flow.rs @@ -259,46 +259,3 @@ impl ControlFlow { } } } - -impl ControlFlow { - /// It's frequently the case that there's no value needed with `Continue`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// let last_used = (1..10).chain(20..25).try_for_each(|x| { - /// partial_sum += x; - /// if partial_sum > 100 { ControlFlow::Break(x) } - /// else { ControlFlow::CONTINUE } - /// }); - /// assert_eq!(last_used.break_value(), Some(22)); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const CONTINUE: Self = ControlFlow::Continue(()); -} - -impl ControlFlow<(), C> { - /// APIs like `try_for_each` don't need values with `Break`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// (1..10).chain(20..25).try_for_each(|x| { - /// if partial_sum > 100 { ControlFlow::BREAK } - /// else { partial_sum += x; ControlFlow::CONTINUE } - /// }); - /// assert_eq!(partial_sum, 108); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const BREAK: Self = ControlFlow::Break(()); -} diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs index d29ae3561..b8ab26564 100644 --- a/library/core/src/ops/range.rs +++ b/library/core/src/ops/range.rs @@ -96,7 +96,7 @@ impl fmt::Debug for Range { } } -impl> Range { +impl> Range { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -116,10 +116,11 @@ impl> Range { /// assert!(!(f32::NAN..1.0).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains(&self, item: &U) -> bool where - Idx: PartialOrd, - U: ?Sized + PartialOrd, + Idx: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { >::contains(self, item) } @@ -142,7 +143,8 @@ impl> Range { /// assert!( (f32::NAN..5.0).is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] - pub fn is_empty(&self) -> bool { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn is_empty(&self) -> bool { !(self.start < self.end) } } @@ -199,7 +201,7 @@ impl fmt::Debug for RangeFrom { } } -impl> RangeFrom { +impl> RangeFrom { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -214,10 +216,11 @@ impl> RangeFrom { /// assert!(!(f32::NAN..).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains(&self, item: &U) -> bool where - Idx: PartialOrd, - U: ?Sized + PartialOrd, + Idx: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { >::contains(self, item) } @@ -280,7 +283,7 @@ impl fmt::Debug for RangeTo { } } -impl> RangeTo { +impl> RangeTo { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -295,10 +298,11 @@ impl> RangeTo { /// assert!(!(..f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains(&self, item: &U) -> bool where - Idx: PartialOrd, - U: ?Sized + PartialOrd, + Idx: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { >::contains(self, item) } @@ -437,7 +441,8 @@ impl RangeInclusive { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn into_inner(self) -> (Idx, Idx) { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn into_inner(self) -> (Idx, Idx) { (self.start, self.end) } } @@ -469,7 +474,7 @@ impl fmt::Debug for RangeInclusive { } } -impl> RangeInclusive { +impl> RangeInclusive { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -500,10 +505,11 @@ impl> RangeInclusive { /// assert!(!r.contains(&3) && !r.contains(&5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains(&self, item: &U) -> bool where - Idx: PartialOrd, - U: ?Sized + PartialOrd, + Idx: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { >::contains(self, item) } @@ -535,8 +541,9 @@ impl> RangeInclusive { /// assert!(r.is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] #[inline] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.exhausted || !(self.start <= self.end) } } @@ -598,7 +605,7 @@ impl fmt::Debug for RangeToInclusive { } } -impl> RangeToInclusive { +impl> RangeToInclusive { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -613,10 +620,11 @@ impl> RangeToInclusive { /// assert!(!(..=f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains(&self, item: &U) -> bool where - Idx: PartialOrd, - U: ?Sized + PartialOrd, + Idx: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { >::contains(self, item) } @@ -757,6 +765,7 @@ impl Bound<&T> { /// `RangeBounds` is implemented by Rust's built-in range types, produced /// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`. #[stable(feature = "collections_range", since = "1.28.0")] +#[const_trait] pub trait RangeBounds { /// Start index bound. /// @@ -809,8 +818,8 @@ pub trait RangeBounds { #[stable(feature = "range_contains", since = "1.35.0")] fn contains(&self, item: &U) -> bool where - T: PartialOrd, - U: ?Sized + PartialOrd, + T: ~const PartialOrd, + U: ?Sized + ~const PartialOrd, { (match self.start_bound() { Included(start) => start <= item, @@ -827,7 +836,8 @@ pub trait RangeBounds { use self::Bound::{Excluded, Included, Unbounded}; #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeFull { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeFull { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -837,7 +847,8 @@ impl RangeBounds for RangeFull { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeFrom { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeFrom { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -847,7 +858,8 @@ impl RangeBounds for RangeFrom { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeTo { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeTo { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -857,7 +869,8 @@ impl RangeBounds for RangeTo { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for Range { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for Range { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -867,7 +880,8 @@ impl RangeBounds for Range { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeInclusive { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeInclusive { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -883,7 +897,8 @@ impl RangeBounds for RangeInclusive { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeToInclusive { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeToInclusive { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -893,7 +908,8 @@ impl RangeBounds for RangeToInclusive { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for (Bound, Bound) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for (Bound, Bound) { fn start_bound(&self) -> Bound<&T> { match *self { (Included(ref start), _) => Included(start), @@ -912,7 +928,8 @@ impl RangeBounds for (Bound, Bound) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<'a, T: ?Sized + 'a> RangeBounds for (Bound<&'a T>, Bound<&'a T>) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<'a, T: ?Sized + 'a> const RangeBounds for (Bound<&'a T>, Bound<&'a T>) { fn start_bound(&self) -> Bound<&T> { self.0 } @@ -923,7 +940,8 @@ impl<'a, T: ?Sized + 'a> RangeBounds for (Bound<&'a T>, Bound<&'a T>) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeFrom<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeFrom<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -933,7 +951,8 @@ impl RangeBounds for RangeFrom<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeTo<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeTo<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -943,7 +962,8 @@ impl RangeBounds for RangeTo<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for Range<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for Range<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -953,7 +973,8 @@ impl RangeBounds for Range<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -963,7 +984,8 @@ impl RangeBounds for RangeInclusive<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl RangeBounds for RangeToInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl const RangeBounds for RangeToInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index 84a690468..86aa1e4fd 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -379,13 +379,27 @@ pub(crate) type ChangeOutputType = <::Residual as Residual>:: pub(crate) struct NeverShortCircuit(pub T); impl NeverShortCircuit { - /// Implementation for building `ConstFnMutClosure` for wrapping the output of a ~const FnMut in a `NeverShortCircuit`. + /// Wraps a unary function to produce one that wraps the output into a `NeverShortCircuit`. + /// + /// This is useful for implementing infallible functions in terms of the `try_` ones, + /// without accidentally capturing extra generic parameters in a closure. + #[inline] + pub fn wrap_mut_1(mut f: impl FnMut(A) -> T) -> impl FnMut(A) -> NeverShortCircuit { + move |a| NeverShortCircuit(f(a)) + } + #[inline] - pub const fn wrap_mut_2_imp T>( - f: &mut F, - (a, b): (A, B), - ) -> NeverShortCircuit { - NeverShortCircuit(f(a, b)) + pub fn wrap_mut_2( + mut f: impl ~const FnMut(A, B) -> T, + ) -> impl ~const FnMut(A, B) -> Self { + cfg_if! { + if #[cfg(bootstrap)] { + #[allow(unused_parens)] + (const move |a, b| NeverShortCircuit(f(a, b))) + } else { + const move |a, b| NeverShortCircuit(f(a, b)) + } + } } } diff --git a/library/core/src/option.rs b/library/core/src/option.rs index 7cc00e3f8..994c08d1f 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -551,8 +551,9 @@ use crate::marker::Destruct; use crate::panicking::{panic, panic_str}; use crate::pin::Pin; use crate::{ - convert, hint, mem, + cmp, convert, hint, mem, ops::{self, ControlFlow, Deref, DerefMut}, + slice, }; /// The `Option` type. See [the module level documentation](self) for more. @@ -734,6 +735,124 @@ impl Option { } } + const fn get_some_offset() -> isize { + if mem::size_of::>() == mem::size_of::() { + // niche optimization means the `T` is always stored at the same position as the Option. + 0 + } else { + assert!(mem::size_of::>() == mem::size_of::>>()); + let some_uninit = Some(mem::MaybeUninit::::uninit()); + // SAFETY: This gets the byte offset of the `Some(_)` value following the fact that + // niche optimization is not active, and thus Option and Option> share + // the same layout. + unsafe { + (some_uninit.as_ref().unwrap() as *const mem::MaybeUninit) + .byte_offset_from(&some_uninit as *const Option>) + } + } + } + + /// Returns a slice of the contained value, if any. If this is `None`, an + /// empty slice is returned. This can be useful to have a single type of + /// iterator over an `Option` or slice. + /// + /// Note: Should you have an `Option<&T>` and wish to get a slice of `T`, + /// you can unpack it via `opt.map_or(&[], std::slice::from_ref)`. + /// + /// # Examples + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!( + /// [Some(1234).as_slice(), None.as_slice()], + /// [&[1234][..], &[][..]], + /// ); + /// ``` + /// + /// The inverse of this function is (discounting + /// borrowing) [`[_]::first`](slice::first): + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// for i in [Some(1234_u16), None] { + /// assert_eq!(i.as_ref(), i.as_slice().first()); + /// } + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "option_as_slice", issue = "108545")] + pub fn as_slice(&self) -> &[T] { + // SAFETY: This is sound as long as `get_some_offset` returns the + // correct offset. Though in the `None` case, the slice may be located + // at a pointer pointing into padding, the fact that the slice is + // empty, and the padding is at a properly aligned position for a + // value of that type makes it sound. + unsafe { + slice::from_raw_parts( + (self as *const Option).wrapping_byte_offset(Self::get_some_offset()) + as *const T, + self.is_some() as usize, + ) + } + } + + /// Returns a mutable slice of the contained value, if any. If this is + /// `None`, an empty slice is returned. This can be useful to have a + /// single type of iterator over an `Option` or slice. + /// + /// Note: Should you have an `Option<&mut T>` instead of a + /// `&mut Option`, which this method takes, you can obtain a mutable + /// slice via `opt.map_or(&mut [], std::slice::from_mut)`. + /// + /// # Examples + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!( + /// [Some(1234).as_mut_slice(), None.as_mut_slice()], + /// [&mut [1234][..], &mut [][..]], + /// ); + /// ``` + /// + /// The result is a mutable slice of zero or one items that points into + /// our original `Option`: + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// let mut x = Some(1234); + /// x.as_mut_slice()[0] += 1; + /// assert_eq!(x, Some(1235)); + /// ``` + /// + /// The inverse of this method (discounting borrowing) + /// is [`[_]::first_mut`](slice::first_mut): + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!(Some(123).as_mut_slice().first_mut(), Some(&mut 123)) + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "option_as_slice", issue = "108545")] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: This is sound as long as `get_some_offset` returns the + // correct offset. Though in the `None` case, the slice may be located + // at a pointer pointing into padding, the fact that the slice is + // empty, and the padding is at a properly aligned position for a + // value of that type makes it sound. + unsafe { + slice::from_raw_parts_mut( + (self as *mut Option).wrapping_byte_offset(Self::get_some_offset()) as *mut T, + self.is_some() as usize, + ) + } + } + ///////////////////////////////////////////////////////////////////////// // Getting to contained values ///////////////////////////////////////////////////////////////////////// @@ -943,7 +1062,7 @@ impl Option { // Transforming contained values ///////////////////////////////////////////////////////////////////////// - /// Maps an `Option` to `Option` by applying a function to a contained value. + /// Maps an `Option` to `Option` by applying a function to a contained value (if `Some`) or returns `None` (if `None`). /// /// # Examples /// @@ -955,8 +1074,10 @@ impl Option { /// let maybe_some_string = Some(String::from("Hello, World!")); /// // `Option::map` takes self *by value*, consuming `maybe_some_string` /// let maybe_some_len = maybe_some_string.map(|s| s.len()); - /// /// assert_eq!(maybe_some_len, Some(13)); + /// + /// let x: Option<&str> = None; + /// assert_eq!(x.map(|s| s.len()), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -2090,6 +2211,12 @@ impl PartialEq for Option { } } +/// This specialization trait is a workaround for LLVM not currently (2023-01) +/// being able to optimize this itself, even though Alive confirms that it would +/// be legal to do so: +/// +/// Once that's fixed, `Option` should go back to deriving `PartialEq`, as +/// it used to do before . #[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")] #[doc(hidden)] pub trait SpecOptionPartialEq: Sized { @@ -2146,6 +2273,14 @@ impl SpecOptionPartialEq for crate::ptr::NonNull { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl SpecOptionPartialEq for cmp::Ordering { + #[inline] + fn eq(l: &Option, r: &Option) -> bool { + l.map_or(2, |x| x as i8) == r.map_or(2, |x| x as i8) + } +} + ///////////////////////////////////////////////////////////////////////////// // The Option Iterators ///////////////////////////////////////////////////////////////////////////// diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index 48e90e6d7..805a1e51a 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -117,7 +117,7 @@ pub const fn panic(expr: &'static str) -> ! { /// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize. #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(not(bootstrap), lang = "panic_nounwind")] // needed by codegen for non-unwinding panics +#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics #[rustc_nounwind] pub fn panic_nounwind(expr: &'static str) -> ! { panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[])); @@ -165,8 +165,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { /// any extra arguments (including those synthesized by track_caller). #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(bootstrap, lang = "panic_no_unwind")] // needed by codegen for panic in nounwind function -#[cfg_attr(not(bootstrap), lang = "panic_cannot_unwind")] // needed by codegen for panic in nounwind function +#[lang = "panic_cannot_unwind"] // needed by codegen for panic in nounwind function #[rustc_nounwind] fn panic_cannot_unwind() -> ! { panic_nounwind("panic in a function that cannot unwind") diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index d6e9da187..6f78811a1 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -587,8 +587,10 @@ mod prim_pointer {} /// There are two syntactic forms for creating an array: /// /// * A list with each element, i.e., `[x, y, z]`. -/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`. -/// The type of `x` must be [`Copy`]. +/// * A repeat expression `[expr; N]` where `N` is how many times to repeat `expr` in the array. `expr` must either be: +/// +/// * A value of a type implementing the [`Copy`] trait +/// * A `const` value /// /// Note that `[expr; 0]` is allowed, and produces an empty array. /// This will still evaluate `expr`, however, and immediately drop the resulting value, so diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index 2123147c7..efe6d4183 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -41,7 +41,7 @@ impl Alignment { /// Returns the alignment for a type. /// /// This provides the same numerical value as [`mem::align_of`], - /// but in an `Alignment` instead of a `usize. + /// but in an `Alignment` instead of a `usize`. #[unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] pub const fn of() -> Self { diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 7b1cb5488..57e2ffe5d 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -23,8 +23,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); @@ -323,8 +321,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// @@ -384,8 +380,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -449,8 +443,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -526,8 +518,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -731,7 +721,7 @@ impl *const T { /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to - /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can /// sometimes allow it to optimize slightly better with some backends. /// @@ -908,8 +898,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -993,8 +981,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1072,8 +1058,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1152,8 +1136,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1359,7 +1341,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1482,7 +1463,6 @@ impl *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index ed1e3bd48..422d0f2b8 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -22,8 +22,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -332,8 +330,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// @@ -396,8 +392,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -461,8 +455,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -539,8 +531,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; @@ -660,8 +650,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -904,7 +892,7 @@ impl *mut T { /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to - /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can /// sometimes allow it to optimize slightly better with some backends. /// @@ -1010,8 +998,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -1095,8 +1081,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1174,8 +1158,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1254,8 +1236,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1627,7 +1607,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1752,7 +1731,6 @@ impl *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/result.rs b/library/core/src/result.rs index f00c40f35..208b220c2 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -458,7 +458,7 @@ //! [`Result`] of a collection of each contained value of the original //! [`Result`] values, or [`Err`] if any of the elements was [`Err`]. //! -//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA%2C%20E%3E%3E-for-Result%3CV%2C%20E%3E +//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA,+E%3E%3E-for-Result%3CV,+E%3E //! //! ``` //! let v = [Ok(2), Ok(4), Err("err!"), Ok(8)]; @@ -474,8 +474,8 @@ //! to provide the [`product`][Iterator::product] and //! [`sum`][Iterator::sum] methods. //! -//! [impl-Product]: Result#impl-Product%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E -//! [impl-Sum]: Result#impl-Sum%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E +//! [impl-Product]: Result#impl-Product%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E +//! [impl-Sum]: Result#impl-Sum%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E //! //! ``` //! let v = [Err("error!"), Ok(1), Ok(2), Ok(3), Err("foo")]; @@ -525,8 +525,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(-3); /// assert_eq!(x.is_ok(), true); @@ -572,8 +570,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(-3); /// assert_eq!(x.is_err(), false); @@ -627,8 +623,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.ok(), Some(2)); @@ -658,8 +652,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.err(), None); @@ -693,8 +685,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.as_ref(), Ok(&2)); @@ -716,8 +706,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn mutate(r: &mut Result) { /// match r.as_mut() { @@ -812,8 +800,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let k = 21; /// @@ -841,8 +827,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn stringify(x: u32) -> String { format!("error code: {x}") } /// @@ -968,8 +952,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(7); /// assert_eq!(x.iter().next(), Some(&7)); @@ -989,8 +971,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut x: Result = Ok(7); /// match x.iter_mut().next() { @@ -1031,8 +1011,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result = Err("emergency failure"); /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure` @@ -1160,8 +1138,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result = Ok(10); /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10` @@ -1222,8 +1198,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1259,8 +1233,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1298,8 +1270,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(2); /// let y: Result<&str, &str> = Err("late error"); @@ -1383,8 +1353,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(2); /// let y: Result = Err("late error"); @@ -1426,8 +1394,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn sq(x: u32) -> Result { Ok(x * x) } /// fn err(x: u32) -> Result { Err(x) } @@ -1456,8 +1422,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let default = 2; /// let x: Result = Ok(9); @@ -1487,8 +1451,6 @@ impl Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn count(x: &str) -> usize { x.len() } /// @@ -1752,8 +1714,6 @@ impl Result, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(result_flattening)] /// let x: Result, u32> = Ok(Ok("hello")); @@ -1842,8 +1802,6 @@ impl IntoIterator for Result { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result = Ok(5); /// let v: Vec = x.into_iter().collect(); diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs index 5e1b218e5..7601dd3c7 100644 --- a/library/core/src/slice/cmp.rs +++ b/library/core/src/slice/cmp.rs @@ -1,6 +1,6 @@ //! Comparison traits for `[T]`. -use crate::cmp::{self, Ordering}; +use crate::cmp::{self, BytewiseEq, Ordering}; use crate::ffi; use crate::mem; @@ -77,7 +77,7 @@ where // Use memcmp for bytewise equality when the types allow impl SlicePartialEq for [A] where - A: BytewiseEquality, + A: BytewiseEq, { fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { @@ -203,29 +203,6 @@ impl SliceOrd for u8 { } } -// Hack to allow specializing on `Eq` even though `Eq` has a method. -#[rustc_unsafe_specialization_marker] -trait MarkerEq: PartialEq {} - -impl MarkerEq for T {} - -#[doc(hidden)] -/// Trait implemented for types that can be compared for equality using -/// their bytewise representation -#[rustc_specialization_trait] -trait BytewiseEquality: MarkerEq + Copy {} - -macro_rules! impl_marker_for { - ($traitname:ident, $($ty:ty)*) => { - $( - impl $traitname<$ty> for $ty { } - )* - } -} - -impl_marker_for!(BytewiseEquality, - u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool); - pub(super) trait SliceContains: Sized { fn slice_contains(&self, x: &[Self]) -> bool; } diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 90ab43d12..c4317799b 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -7,7 +7,9 @@ use crate::cmp; use crate::cmp::Ordering; use crate::fmt; use crate::intrinsics::assume; -use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +use crate::iter::{ + FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator, +}; use crate::marker::{PhantomData, Send, Sized, Sync}; use crate::mem::{self, SizedTypeProperties}; use crate::num::NonZeroUsize; diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 0fd57b197..89b92a7d5 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -384,6 +384,15 @@ macro_rules! iterator { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl TrustedLen for $name<'_, T> {} + + impl<'a, T> UncheckedIterator for $name<'a, T> { + unsafe fn next_unchecked(&mut self) -> $elem { + // SAFETY: The caller promised there's at least one more item. + unsafe { + next_unchecked!(self) + } + } + } } } diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index c848c2e18..98c8349eb 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -16,25 +16,29 @@ const USIZE_BYTES: usize = mem::size_of::(); /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0 } -#[cfg(target_pointer_width = "16")] #[inline] +#[cfg(target_pointer_width = "16")] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } -#[cfg(not(target_pointer_width = "16"))] #[inline] +#[cfg(not(target_pointer_width = "16"))] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. -#[must_use] #[inline] +#[must_use] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] pub const fn memchr(x: u8, text: &[u8]) -> Option { // Fast path for small slices. if text.len() < 2 * USIZE_BYTES { @@ -45,6 +49,7 @@ pub const fn memchr(x: u8, text: &[u8]) -> Option { } #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_naive(x: u8, text: &[u8]) -> Option { let mut i = 0; @@ -60,6 +65,10 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option { None } +#[rustc_allow_const_fn_unstable(const_cmp)] +#[rustc_allow_const_fn_unstable(const_slice_index)] +#[rustc_allow_const_fn_unstable(const_align_offset)] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_aligned(x: u8, text: &[u8]) -> Option { // Scan for a single byte value by reading two `usize` words at a time. // diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index d93a3a57e..1cd86b445 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -805,8 +805,9 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn windows(&self, size: usize) -> Windows<'_, T> { - let size = NonZeroUsize::new(size).expect("size is zero"); + let size = NonZeroUsize::new(size).expect("window size must be non-zero"); Windows::new(self, size) } @@ -839,8 +840,9 @@ impl [T] { /// [`rchunks`]: slice::rchunks #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); Chunks::new(self, chunk_size) } @@ -877,8 +879,9 @@ impl [T] { /// [`rchunks_mut`]: slice::rchunks_mut #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksMut::new(self, chunk_size) } @@ -914,8 +917,9 @@ impl [T] { /// [`rchunks_exact`]: slice::rchunks_exact #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExact::new(self, chunk_size) } @@ -956,8 +960,9 @@ impl [T] { /// [`rchunks_exact_mut`]: slice::rchunks_exact_mut #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExactMut::new(self, chunk_size) } @@ -1037,9 +1042,10 @@ impl [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks(&self) -> (&[[T; N]], &[T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1068,9 +1074,10 @@ impl [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks(&self) -> (&[T], &[[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1108,8 +1115,9 @@ impl [T] { /// [`chunks_exact`]: slice::chunks_exact #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks(&self) -> ArrayChunks<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunks::new(self) } @@ -1186,9 +1194,10 @@ impl [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks_mut(&mut self) -> (&mut [[T; N]], &mut [T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at_mut(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1223,9 +1232,10 @@ impl [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks_mut(&mut self) -> (&mut [T], &mut [[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1265,8 +1275,9 @@ impl [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks_mut(&mut self) -> ArrayChunksMut<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunksMut::new(self) } @@ -1297,8 +1308,9 @@ impl [T] { /// [`windows`]: slice::windows #[unstable(feature = "array_windows", issue = "75027")] #[inline] + #[track_caller] pub fn array_windows(&self) -> ArrayWindows<'_, T, N> { - assert_ne!(N, 0, "windows cannot have a size of zero"); + assert!(N != 0, "window size must be non-zero"); ArrayWindows::new(self) } @@ -1331,8 +1343,9 @@ impl [T] { /// [`chunks`]: slice::chunks #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunks::new(self, chunk_size) } @@ -1369,8 +1382,9 @@ impl [T] { /// [`chunks_mut`]: slice::chunks_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksMut::new(self, chunk_size) } @@ -1408,8 +1422,9 @@ impl [T] { /// [`chunks_exact`]: slice::chunks_exact #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExact::new(self, chunk_size) } @@ -1451,8 +1466,9 @@ impl [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExactMut::new(self, chunk_size) } @@ -2714,8 +2730,10 @@ impl [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index`. Additionally, this reordering is /// unstable (i.e. any number of equal elements may end up at position `index`), in-place - /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth - /// element" in other libraries. It returns a triplet of the following from the reordered slice: + /// (i.e. does not allocate), and *O*(*n*) on average. The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from the reordered slice: /// the subslice prior to `index`, the element at `index`, and the subslice after `index`; /// accordingly, the values in those two subslices will respectively all be less-than-or-equal-to /// and greater-than-or-equal-to the value of the element at `index`. @@ -2761,8 +2779,11 @@ impl [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the comparator function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). This function is also known as + /// "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided comparator function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to @@ -2813,8 +2834,11 @@ impl [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the key extraction function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided key extraction function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to @@ -2931,7 +2955,7 @@ impl [T] { // This operation is still `O(n)`. // // Example: We start in this state, where `r` represents "next - // read" and `w` represents "next_write`. + // read" and `w` represents "next_write". // // r // +---+---+---+---+---+---+ diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 2181f9a81..2333f60a8 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -13,115 +13,184 @@ use crate::cmp; use crate::mem::{self, MaybeUninit, SizedTypeProperties}; use crate::ptr; -/// When dropped, copies from `src` into `dest`. -struct CopyOnDrop { +// When dropped, copies from `src` into `dest`. +struct InsertionHole { src: *const T, dest: *mut T, } -impl Drop for CopyOnDrop { +impl Drop for InsertionHole { fn drop(&mut self) { - // SAFETY: This is a helper class. - // Please refer to its usage for correctness. - // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`. + // SAFETY: This is a helper class. Please refer to its usage for correctness. Namely, one + // must be sure that `src` and `dst` does not overlap as required by + // `ptr::copy_nonoverlapping` and are both valid for writes. unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } -/// Shifts the first element to the right until it encounters a greater or equal element. -fn shift_head(v: &mut [T], is_less: &mut F) +/// Inserts `v[v.len() - 1]` into pre-sorted sequence `v[..v.len() - 1]` so that whole `v[..]` +/// becomes sorted. +unsafe fn insert_tail(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >=2. - // 2. All the indexing that we will do is always between {0 <= index < len} at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i-1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + let arr_ptr = v.as_mut_ptr(); + let i = v.len() - 1; + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the first two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { - // Read the first element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) }; - ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); - - for i in 2..len { - if !is_less(&*v.add(i), &*tmp) { + // See insert_head which talks about why this approach is beneficial. + let i_ptr = arr_ptr.add(i); + + // It's important that we use i_ptr here. If this check is positive and we continue, + // We want to make sure that no other copy of the value was seen by is_less. + // Otherwise we would have to copy it back. + if is_less(&*i_ptr, &*i_ptr.sub(1)) { + // It's important, that we use tmp for comparison from now on. As it is the value that + // will be copied back. And notionally we could have created a divergence if we copy + // back the wrong value. + let tmp = mem::ManuallyDrop::new(ptr::read(i_ptr)); + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: i_ptr.sub(1) }; + ptr::copy_nonoverlapping(hole.dest, i_ptr, 1); + + // SAFETY: We know i is at least 1. + for j in (0..(i - 1)).rev() { + let j_ptr = arr_ptr.add(j); + if !is_less(&*tmp, &*j_ptr) { break; } - // Move `i`-th element one place to the left, thus shifting the hole to the right. - ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(j_ptr, hole.dest, 1); + hole.dest = j_ptr; } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } -/// Shifts the last element to the left until it encounters a smaller or equal element. -fn shift_tail(v: &mut [T], is_less: &mut F) +/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. +/// +/// This is the integral subroutine of insertion sort. +unsafe fn insert_head(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >= 2. - // 2. All the indexing that we will do is always between `0 <= index < len-1` at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i+1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the last two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { - // Read the last element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) }; - ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); - - for i in (0..len - 2).rev() { - if !is_less(&*tmp, &*v.add(i)) { + if is_less(v.get_unchecked(1), v.get_unchecked(0)) { + let arr_ptr = v.as_mut_ptr(); + + // There are three ways to implement insertion here: + // + // 1. Swap adjacent elements until the first one gets to its final destination. + // However, this way we copy data around more than is necessary. If elements are big + // structures (costly to copy), this method will be slow. + // + // 2. Iterate until the right place for the first element is found. Then shift the + // elements succeeding it to make room for it and finally place it into the + // remaining hole. This is a good method. + // + // 3. Copy the first element into a temporary variable. Iterate until the right place + // for it is found. As we go along, copy every traversed element into the slot + // preceding it. Finally, copy data from the temporary variable into the remaining + // hole. This method is very good. Benchmarks demonstrated slightly better + // performance than with the 2nd method. + // + // All methods were benchmarked, and the 3rd showed best results. So we chose that one. + let tmp = mem::ManuallyDrop::new(ptr::read(arr_ptr)); + + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: arr_ptr.add(1) }; + ptr::copy_nonoverlapping(arr_ptr.add(1), arr_ptr.add(0), 1); + + for i in 2..v.len() { + if !is_less(&v.get_unchecked(i), &*tmp) { break; } - - // Move `i`-th element one place to the right, thus shifting the hole to the left. - ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(arr_ptr.add(i), arr_ptr.add(i - 1), 1); + hole.dest = arr_ptr.add(i); } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } +/// Sort `v` assuming `v[..offset]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_left(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len); + + // Shift each element of the unsorted region v[i..] as far left as is needed to make v sorted. + for i in offset..len { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2. The range is exclusive and we know `i` must be at least 1 so this slice has at + // >least len 2. + unsafe { + insert_tail(&mut v[..=i], is_less); + } + } +} + +/// Sort `v` assuming `v[offset..]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_right(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len && len >= 2); + + // Shift each element of the unsorted region v[..i] as far left as is needed to make v sorted. + for i in (0..offset).rev() { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2.We ensured that the slice length is always at least 2 long. We know that start_found + // will be at least one less than end, and the range is exclusive. Which gives us i always + // <= (end - 2). + unsafe { + insert_head(&mut v[i..len], is_less); + } + } +} + /// Partially sorts a slice by shifting several out-of-order elements around. /// /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case. @@ -161,26 +230,19 @@ where // Swap the found pair of elements. This puts them in correct order. v.swap(i - 1, i); - // Shift the smaller element to the left. - shift_tail(&mut v[..i], is_less); - // Shift the greater element to the right. - shift_head(&mut v[i..], is_less); + if i >= 2 { + // Shift the smaller element to the left. + insertion_sort_shift_left(&mut v[..i], i - 1, is_less); + + // Shift the greater element to the right. + insertion_sort_shift_right(&mut v[..i], 1, is_less); + } } // Didn't manage to sort the slice in the limited number of steps. false } -/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case. -fn insertion_sort(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - for i in 1..v.len() { - shift_tail(&mut v[..i + 1], is_less); - } -} - /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case. #[cold] #[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")] @@ -198,8 +260,11 @@ where } // Choose the greater child. - if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) { - child += 1; + if child + 1 < v.len() { + // We need a branch to be sure not to out-of-bounds index, + // but it's highly predictable. The comparison, however, + // is better done branchless, especially for primitives. + child += is_less(&v[child], &v[child + 1]) as usize; } // Stop if the invariant holds at `node`. @@ -252,7 +317,7 @@ where // 1. `block` - Number of elements in the block. // 2. `start` - Start pointer into the `offsets` array. // 3. `end` - End pointer into the `offsets` array. - // 4. `offsets - Indices of out-of-order elements within the block. + // 4. `offsets` - Indices of out-of-order elements within the block. // The current block on the left side (from `l` to `l.add(block_l)`). let mut l = v.as_mut_ptr(); @@ -262,7 +327,7 @@ where let mut offsets_l = [MaybeUninit::::uninit(); BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). - // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe` + // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); @@ -507,7 +572,7 @@ where // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Find the first pair of out-of-order elements. @@ -560,7 +625,7 @@ where // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Now partition the slice. @@ -608,19 +673,23 @@ where fn break_patterns(v: &mut [T]) { let len = v.len(); if len >= 8 { - // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. - let mut random = len as u32; - let mut gen_u32 = || { - random ^= random << 13; - random ^= random >> 17; - random ^= random << 5; - random - }; + let mut seed = len; let mut gen_usize = || { + // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. if usize::BITS <= 32 { - gen_u32() as usize + let mut r = seed as u32; + r ^= r << 13; + r ^= r >> 17; + r ^= r << 5; + seed = r as usize; + seed } else { - (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize + let mut r = seed as u64; + r ^= r << 13; + r ^= r >> 7; + r ^= r << 17; + seed = r as usize; + seed } }; @@ -742,7 +811,9 @@ where // Very short slices get sorted using insertion sort. if len <= MAX_INSERTION { - insertion_sort(v, is_less); + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -844,10 +915,14 @@ fn partition_at_index_loop<'a, T, F>( let mut was_balanced = true; loop { + let len = v.len(); + // For slices of up to this length it's probably faster to simply sort them. const MAX_INSERTION: usize = 10; - if v.len() <= MAX_INSERTION { - insertion_sort(v, is_less); + if len <= MAX_INSERTION { + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -887,7 +962,7 @@ fn partition_at_index_loop<'a, T, F>( } let (mid, _) = partition(v, pivot, is_less); - was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8; + was_balanced = cmp::min(mid, len - mid) >= len / 8; // Split the slice into `left`, `pivot`, and `right`. let (left, right) = v.split_at_mut(mid); @@ -954,75 +1029,6 @@ where (left, pivot, right) } -/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. -/// -/// This is the integral subroutine of insertion sort. -fn insert_head(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - if v.len() >= 2 && is_less(&v[1], &v[0]) { - // SAFETY: Copy tmp back even if panic, and ensure unique observation. - unsafe { - // There are three ways to implement insertion here: - // - // 1. Swap adjacent elements until the first one gets to its final destination. - // However, this way we copy data around more than is necessary. If elements are big - // structures (costly to copy), this method will be slow. - // - // 2. Iterate until the right place for the first element is found. Then shift the - // elements succeeding it to make room for it and finally place it into the - // remaining hole. This is a good method. - // - // 3. Copy the first element into a temporary variable. Iterate until the right place - // for it is found. As we go along, copy every traversed element into the slot - // preceding it. Finally, copy data from the temporary variable into the remaining - // hole. This method is very good. Benchmarks demonstrated slightly better - // performance than with the 2nd method. - // - // All methods were benchmarked, and the 3rd showed best results. So we chose that one. - let tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); - - // Intermediate state of the insertion process is always tracked by `hole`, which - // serves two purposes: - // 1. Protects integrity of `v` from panics in `is_less`. - // 2. Fills the remaining hole in `v` in the end. - // - // Panic safety: - // - // If `is_less` panics at any point during the process, `hole` will get dropped and - // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it - // initially held exactly once. - let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] }; - ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); - - for i in 2..v.len() { - if !is_less(&v[i], &*tmp) { - break; - } - ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1); - hole.dest = &mut v[i]; - } - // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. - } - } - - // When dropped, copies from `src` into `dest`. - struct InsertionHole { - src: *const T, - dest: *mut T, - } - - impl Drop for InsertionHole { - fn drop(&mut self) { - // SAFETY: The caller must ensure that src and dest are correctly set. - unsafe { - ptr::copy_nonoverlapping(self.src, self.dest, 1); - } - } - } -} - /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and /// stores the result into `v[..]`. /// @@ -1180,8 +1186,6 @@ pub fn merge_sort( { // Slices of up to this length get sorted using insertion sort. const MAX_INSERTION: usize = 20; - // Very short runs are extended using insertion sort to span at least this many elements. - const MIN_RUN: usize = 10; // The caller should have already checked that. debug_assert!(!T::IS_ZST); @@ -1191,9 +1195,7 @@ pub fn merge_sort( // Short arrays get sorted in-place via insertion sort to avoid allocations. if len <= MAX_INSERTION { if len >= 2 { - for i in (0..len - 1).rev() { - insert_head(&mut v[i..], is_less); - } + insertion_sort_shift_left(v, 1, is_less); } return; } @@ -1203,59 +1205,43 @@ pub fn merge_sort( // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, // which will always have length at most `len / 2`. let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn); - let buf_ptr = buf.buf_ptr; + let buf_ptr = buf.buf_ptr.as_ptr(); let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn); - // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a - // strange decision, but consider the fact that merges more often go in the opposite direction - // (forwards). According to benchmarks, merging forwards is slightly faster than merging - // backwards. To conclude, identifying runs by traversing backwards improves performance. - let mut end = len; - while end > 0 { - // Find the next natural run, and reverse it if it's strictly descending. - let mut start = end - 1; - if start > 0 { - start -= 1; - - // SAFETY: The v.get_unchecked must be fed with correct inbound indicies. - unsafe { - if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) { - while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) { - start -= 1; - } - v[start..end].reverse(); - } else { - while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) - { - start -= 1; - } - } - } + let mut end = 0; + let mut start = 0; + + // Scan forward. Memory pre-fetching prefers forward scanning vs backwards scanning, and the + // code-gen is usually better. For the most sensitive types such as integers, these are merged + // bidirectionally at once. So there is no benefit in scanning backwards. + while end < len { + let (streak_end, was_reversed) = find_streak(&v[start..], is_less); + end += streak_end; + if was_reversed { + v[start..end].reverse(); } // Insert some more elements into the run if it's too short. Insertion sort is faster than // merge sort on short sequences, so this significantly improves performance. - while start > 0 && end - start < MIN_RUN { - start -= 1; - insert_head(&mut v[start..end], is_less); - } + end = provide_sorted_batch(v, start, end, is_less); // Push this run onto the stack. runs.push(TimSortRun { start, len: end - start }); - end = start; + start = end; // Merge some pairs of adjacent runs to satisfy the invariants. - while let Some(r) = collapse(runs.as_slice()) { - let left = runs[r + 1]; - let right = runs[r]; + while let Some(r) = collapse(runs.as_slice(), len) { + let left = runs[r]; + let right = runs[r + 1]; + let merge_slice = &mut v[left.start..right.start + right.len]; // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and // neither side may be on length 0. unsafe { - merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less); + merge(merge_slice, left.len, buf_ptr, is_less); } - runs[r] = TimSortRun { start: left.start, len: left.len + right.len }; - runs.remove(r + 1); + runs[r + 1] = TimSortRun { start: left.start, len: left.len + right.len }; + runs.remove(r); } } @@ -1277,10 +1263,10 @@ pub fn merge_sort( // run starts at index 0, it will always demand a merge operation until the stack is fully // collapsed, in order to complete the sort. #[inline] - fn collapse(runs: &[TimSortRun]) -> Option { + fn collapse(runs: &[TimSortRun], stop: usize) -> Option { let n = runs.len(); if n >= 2 - && (runs[n - 1].start == 0 + && (runs[n - 1].start + runs[n - 1].len == stop || runs[n - 2].len <= runs[n - 1].len || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) @@ -1298,7 +1284,7 @@ pub fn merge_sort( where ElemDeallocF: Fn(*mut T, usize), { - buf_ptr: *mut T, + buf_ptr: ptr::NonNull, capacity: usize, elem_dealloc_fn: ElemDeallocF, } @@ -1315,7 +1301,11 @@ pub fn merge_sort( where ElemAllocF: Fn(usize) -> *mut T, { - Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn } + Self { + buf_ptr: ptr::NonNull::new(elem_alloc_fn(len)).unwrap(), + capacity: len, + elem_dealloc_fn, + } } } @@ -1324,7 +1314,7 @@ pub fn merge_sort( ElemDeallocF: Fn(*mut T, usize), { fn drop(&mut self) { - (self.elem_dealloc_fn)(self.buf_ptr, self.capacity); + (self.elem_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } @@ -1333,7 +1323,7 @@ pub fn merge_sort( RunAllocF: Fn(usize) -> *mut TimSortRun, RunDeallocF: Fn(*mut TimSortRun, usize), { - buf_ptr: *mut TimSortRun, + buf_ptr: ptr::NonNull, capacity: usize, len: usize, run_alloc_fn: RunAllocF, @@ -1350,7 +1340,7 @@ pub fn merge_sort( const START_RUN_CAPACITY: usize = 16; Self { - buf_ptr: run_alloc_fn(START_RUN_CAPACITY), + buf_ptr: ptr::NonNull::new(run_alloc_fn(START_RUN_CAPACITY)).unwrap(), capacity: START_RUN_CAPACITY, len: 0, run_alloc_fn, @@ -1361,15 +1351,15 @@ pub fn merge_sort( fn push(&mut self, val: TimSortRun) { if self.len == self.capacity { let old_capacity = self.capacity; - let old_buf_ptr = self.buf_ptr; + let old_buf_ptr = self.buf_ptr.as_ptr(); self.capacity = self.capacity * 2; - self.buf_ptr = (self.run_alloc_fn)(self.capacity); + self.buf_ptr = ptr::NonNull::new((self.run_alloc_fn)(self.capacity)).unwrap(); // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has // old_capacity valid elements. unsafe { - ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity); + ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr.as_ptr(), old_capacity); } (self.run_dealloc_fn)(old_buf_ptr, old_capacity); @@ -1377,7 +1367,7 @@ pub fn merge_sort( // SAFETY: The invariant was just checked. unsafe { - self.buf_ptr.add(self.len).write(val); + self.buf_ptr.as_ptr().add(self.len).write(val); } self.len += 1; } @@ -1390,7 +1380,7 @@ pub fn merge_sort( // SAFETY: buf_ptr needs to be valid and len invariant upheld. unsafe { // the place we are taking from. - let ptr = self.buf_ptr.add(index); + let ptr = self.buf_ptr.as_ptr().add(index); // Shift everything down to fill in that spot. ptr::copy(ptr.add(1), ptr, self.len - index - 1); @@ -1400,7 +1390,7 @@ pub fn merge_sort( fn as_slice(&self) -> &[TimSortRun] { // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld. - unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) } + unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr.as_ptr(), self.len) } } fn len(&self) -> usize { @@ -1419,7 +1409,7 @@ pub fn merge_sort( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &*(self.buf_ptr.add(index)); + return &*(self.buf_ptr.as_ptr().add(index)); } } @@ -1436,7 +1426,7 @@ pub fn merge_sort( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &mut *(self.buf_ptr.add(index)); + return &mut *(self.buf_ptr.as_ptr().add(index)); } } @@ -1452,7 +1442,7 @@ pub fn merge_sort( fn drop(&mut self) { // As long as TimSortRun is Copy we don't need to drop them individually but just the // whole allocation. - (self.run_dealloc_fn)(self.buf_ptr, self.capacity); + (self.run_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } } @@ -1463,3 +1453,71 @@ pub struct TimSortRun { len: usize, start: usize, } + +/// Takes a range as denoted by start and end, that is already sorted and extends it to the right if +/// necessary with sorts optimized for smaller ranges such as insertion sort. +#[cfg(not(no_global_oom_handling))] +fn provide_sorted_batch(v: &mut [T], start: usize, mut end: usize, is_less: &mut F) -> usize +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + assert!(end >= start && end <= len); + + // This value is a balance between least comparisons and best performance, as + // influenced by for example cache locality. + const MIN_INSERTION_RUN: usize = 10; + + // Insert some more elements into the run if it's too short. Insertion sort is faster than + // merge sort on short sequences, so this significantly improves performance. + let start_end_diff = end - start; + + if start_end_diff < MIN_INSERTION_RUN && end < len { + // v[start_found..end] are elements that are already sorted in the input. We want to extend + // the sorted region to the left, so we push up MIN_INSERTION_RUN - 1 to the right. Which is + // more efficient that trying to push those already sorted elements to the left. + end = cmp::min(start + MIN_INSERTION_RUN, len); + let presorted_start = cmp::max(start_end_diff, 1); + + insertion_sort_shift_left(&mut v[start..end], presorted_start, is_less); + } + + end +} + +/// Finds a streak of presorted elements starting at the beginning of the slice. Returns the first +/// value that is not part of said streak, and a bool denoting wether the streak was reversed. +/// Streaks can be increasing or decreasing. +fn find_streak(v: &[T], is_less: &mut F) -> (usize, bool) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + if len < 2 { + return (len, false); + } + + let mut end = 2; + + // SAFETY: See below specific. + unsafe { + // SAFETY: We checked that len >= 2, so 0 and 1 are valid indices. + let assume_reverse = is_less(v.get_unchecked(1), v.get_unchecked(0)); + + // SAFETY: We know end >= 2 and check end < len. + // From that follows that accessing v at end and end - 1 is safe. + if assume_reverse { + while end < len && is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + + (end, true) + } else { + while end < len && !is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + (end, false) + } + } +} diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index d969475aa..95c682f42 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -1,6 +1,6 @@ //! Iterators for `str` methods. -use crate::char; +use crate::char as char_mod; use crate::fmt::{self, Write}; use crate::iter::{Chain, FlatMap, Flatten}; use crate::iter::{Copied, Filter, FusedIterator, Map, TrustedLen}; @@ -1455,8 +1455,8 @@ impl FusedIterator for EncodeUtf16<'_> {} #[derive(Clone, Debug)] pub struct EscapeDebug<'a> { pub(super) inner: Chain< - Flatten>, - FlatMap, char::EscapeDebug, CharEscapeDebugContinue>, + Flatten>, + FlatMap, char_mod::EscapeDebug, CharEscapeDebugContinue>, >, } @@ -1464,14 +1464,14 @@ pub struct EscapeDebug<'a> { #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeDefault<'a> { - pub(super) inner: FlatMap, char::EscapeDefault, CharEscapeDefault>, + pub(super) inner: FlatMap, char_mod::EscapeDefault, CharEscapeDefault>, } /// The return type of [`str::escape_unicode`]. #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeUnicode<'a> { - pub(super) inner: FlatMap, char::EscapeUnicode, CharEscapeUnicode>, + pub(super) inner: FlatMap, char_mod::EscapeUnicode, CharEscapeUnicode>, } macro_rules! escape_types_impls { diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs index d3ed811b1..68f62ce8b 100644 --- a/library/core/src/str/traits.rs +++ b/library/core/src/str/traits.rs @@ -28,10 +28,6 @@ impl PartialEq for str { fn eq(&self, other: &str) -> bool { self.as_bytes() == other.as_bytes() } - #[inline] - fn ne(&self, other: &str) -> bool { - !(*self).eq(other) - } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 14367eb09..040a59184 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -305,6 +305,50 @@ impl AtomicBool { AtomicBool { v: UnsafeCell::new(v as u8) } } + /// Creates a new `AtomicBool` from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + /// use std::sync::atomic::{self, AtomicBool}; + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + /// let ptr: *mut bool = Box::into_raw(Box::new(false)); + /// + /// assert!(ptr.is_aligned_to(align_of::())); + /// + /// { + /// // Create an atomic view of the allocated value + /// let atomic = unsafe { AtomicBool::from_ptr(ptr) }; + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(true, atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert_eq!(unsafe { *ptr }, true); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::()` (note that on some platforms this can be bigger than `align_of::()`). + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + /// Returns a mutable reference to the underlying [`bool`]. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -922,14 +966,14 @@ impl AtomicBool { /// /// let mut atomic = AtomicBool::new(true); /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut bool { - self.v.get() as *mut bool + pub const fn as_ptr(&self) -> *mut bool { + self.v.get().cast() } /// Fetches the value, and applies a function to it that returns an optional @@ -1017,6 +1061,50 @@ impl AtomicPtr { AtomicPtr { p: UnsafeCell::new(p) } } + /// Creates a new `AtomicPtr` from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + /// use std::sync::atomic::{self, AtomicPtr}; + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut())); + /// + /// assert!(ptr.is_aligned_to(align_of::>())); + /// + /// { + /// // Create an atomic view of the allocated value + /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) }; + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert!(!unsafe { *ptr }.is_null()); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::>()` (note that on some platforms this can be bigger than `align_of::<*mut T>()`). + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + /// Returns a mutable reference to the underlying pointer. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -1803,7 +1891,7 @@ impl AtomicPtr { /// /// ```ignore (extern-declaration) /// #![feature(atomic_mut_ptr)] - //// use std::sync::atomic::AtomicPtr; + /// use std::sync::atomic::AtomicPtr; /// /// extern "C" { /// fn my_atomic_op(arg: *mut *mut u32); @@ -1814,12 +1902,12 @@ impl AtomicPtr { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut *mut T { + pub const fn as_ptr(&self) -> *mut *mut T { self.p.get() } } @@ -1861,7 +1949,8 @@ macro_rules! if_not_8_bit { ($_:ident, $($tt:tt)*) => { $($tt)* }; } -#[cfg(target_has_atomic_load_store = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic_load_store))] +#[cfg_attr(bootstrap, cfg(target_has_atomic_load_store = "8"))] macro_rules! atomic_int { ($cfg_cas:meta, $cfg_align:meta, @@ -1957,6 +2046,53 @@ macro_rules! atomic_int { Self {v: UnsafeCell::new(v)} } + /// Creates a new reference to an atomic integer from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + #[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")] + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + #[doc = concat!("let ptr: *mut ", stringify!($int_type), " = Box::into_raw(Box::new(0));")] + /// + #[doc = concat!("assert!(ptr.is_aligned_to(align_of::<", stringify!($atomic_type), ">()));")] + /// + /// { + /// // Create an atomic view of the allocated value + // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above) + #[doc = concat!(" let atomic = unsafe {", stringify!($atomic_type), "::from_ptr(ptr) };")] + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(1, atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert_eq!(unsafe { *ptr }, 1); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::()` (note that on some platforms this can be bigger than `align_of::()`). + #[doc = concat!(" * `ptr` must be aligned to `align_of::<", stringify!($atomic_type), ">()` (note that on some platforms this can be bigger than `align_of::<", stringify!($int_type), ">()`).")] + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + + /// Returns a mutable reference to the underlying integer. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -2718,7 +2854,7 @@ macro_rules! atomic_int { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` @@ -2726,7 +2862,7 @@ macro_rules! atomic_int { #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut $int_type { + pub const fn as_ptr(&self) -> *mut $int_type { self.v.get() } } @@ -2988,7 +3124,8 @@ atomic_int_ptr_sized! { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] fn strongest_failure_ordering(order: Ordering) -> Ordering { match order { Release => Relaxed, @@ -3030,7 +3167,8 @@ unsafe fn atomic_load(dst: *const T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_swap`. @@ -3047,7 +3185,8 @@ unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the previous value (like __sync_fetch_and_add). #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_add`. @@ -3064,7 +3203,8 @@ unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the previous value (like __sync_fetch_and_sub). #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_sub`. @@ -3080,7 +3220,8 @@ unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_compare_exchange( dst: *mut T, @@ -3115,7 +3256,8 @@ unsafe fn atomic_compare_exchange( } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_compare_exchange_weak( dst: *mut T, @@ -3150,7 +3292,8 @@ unsafe fn atomic_compare_exchange_weak( } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_and` @@ -3166,7 +3309,8 @@ unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_nand` @@ -3182,7 +3326,8 @@ unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_or` @@ -3198,7 +3343,8 @@ unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_xor` @@ -3215,7 +3361,8 @@ unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { /// returns the max value (signed comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_max(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_max` @@ -3232,7 +3379,8 @@ unsafe fn atomic_max(dst: *mut T, val: T, order: Ordering) -> T { /// returns the min value (signed comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_min(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_min` @@ -3249,7 +3397,8 @@ unsafe fn atomic_min(dst: *mut T, val: T, order: Ordering) -> T { /// returns the max value (unsigned comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_umax(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umax` @@ -3266,7 +3415,8 @@ unsafe fn atomic_umax(dst: *mut T, val: T, order: Ordering) -> T { /// returns the min value (unsigned comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_umin(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umin` diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs index 25b61c0e6..af5bf441b 100644 --- a/library/core/src/task/poll.rs +++ b/library/core/src/task/poll.rs @@ -45,6 +45,7 @@ impl Poll { /// assert_eq!(poll_some_len, Poll::Ready(13)); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map(self, f: F) -> Poll where F: FnOnce(T) -> U, @@ -144,6 +145,7 @@ impl Poll> { /// assert_eq!(squared, Poll::Ready(Ok(144))); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map_ok(self, f: F) -> Poll> where F: FnOnce(T) -> U, @@ -171,6 +173,7 @@ impl Poll> { /// assert_eq!(res, Poll::Ready(Err(0))); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map_err(self, f: F) -> Poll> where F: FnOnce(E) -> U, @@ -199,6 +202,7 @@ impl Poll>> { /// assert_eq!(squared, Poll::Ready(Some(Ok(144)))); /// ``` #[stable(feature = "poll_map", since = "1.51.0")] + #[inline] pub fn map_ok(self, f: F) -> Poll>> where F: FnOnce(T) -> U, @@ -228,6 +232,7 @@ impl Poll>> { /// assert_eq!(res, Poll::Ready(Some(Err(0)))); /// ``` #[stable(feature = "poll_map", since = "1.51.0")] + #[inline] pub fn map_err(self, f: F) -> Poll>> where F: FnOnce(E) -> U, diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs index 89adfccd9..808825326 100644 --- a/library/core/src/task/wake.rs +++ b/library/core/src/task/wake.rs @@ -174,7 +174,7 @@ impl RawWakerVTable { /// Currently, `Context` only serves to provide access to a [`&Waker`](Waker) /// which can be used to wake the current task. #[stable(feature = "futures_api", since = "1.36.0")] -#[cfg_attr(not(bootstrap), lang = "Context")] +#[lang = "Context"] pub struct Context<'a> { waker: &'a Waker, // Ensure we future-proof against variance changes by forcing diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs index f268fe3ae..5327e4f81 100644 --- a/library/core/tests/array.rs +++ b/library/core/tests/array.rs @@ -700,3 +700,28 @@ fn array_into_iter_rfold() { let s = it.rfold(10, |a, b| 10 * a + b); assert_eq!(s, 10432); } + +#[cfg(not(panic = "abort"))] +#[test] +fn array_map_drops_unmapped_elements_on_panic() { + struct DropCounter<'a>(usize, &'a AtomicUsize); + impl Drop for DropCounter<'_> { + fn drop(&mut self) { + self.1.fetch_add(1, Ordering::SeqCst); + } + } + + const MAX: usize = 11; + for panic_after in 0..MAX { + let counter = AtomicUsize::new(0); + let a = array::from_fn::<_, 11, _>(|i| DropCounter(i, &counter)); + let success = std::panic::catch_unwind(|| { + let _ = a.map(|x| { + assert!(x.0 < panic_after); + assert_eq!(counter.load(Ordering::SeqCst), x.0); + }); + }); + assert!(success.is_err()); + assert_eq!(counter.load(Ordering::SeqCst), MAX); + } +} diff --git a/library/core/tests/iter/adapters/mod.rs b/library/core/tests/iter/adapters/mod.rs index ffd5f3857..ca3463aa7 100644 --- a/library/core/tests/iter/adapters/mod.rs +++ b/library/core/tests/iter/adapters/mod.rs @@ -24,7 +24,7 @@ mod zip; use core::cell::Cell; -/// An iterator that panics whenever `next` or next_back` is called +/// An iterator that panics whenever `next` or `next_back` is called /// after `None` has already been returned. This does not violate /// `Iterator`'s contract. Used to test that iterator adapters don't /// poll their inner iterators after exhausting them. diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs index 84498a8ea..0f91ffe2d 100644 --- a/library/core/tests/iter/range.rs +++ b/library/core/tests/iter/range.rs @@ -26,7 +26,6 @@ fn test_range() { #[test] fn test_char_range() { - use std::char; // Miri is too slow let from = if cfg!(miri) { char::from_u32(0xD800 - 10).unwrap() } else { '\0' }; let to = if cfg!(miri) { char::from_u32(0xDFFF + 10).unwrap() } else { char::MAX }; diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs index 37345c1d3..62566a950 100644 --- a/library/core/tests/iter/traits/iterator.rs +++ b/library/core/tests/iter/traits/iterator.rs @@ -582,6 +582,9 @@ fn test_next_chunk() { assert_eq!(it.next_chunk().unwrap(), []); assert_eq!(it.next_chunk().unwrap(), [4, 5, 6, 7, 8, 9]); assert_eq!(it.next_chunk::<4>().unwrap_err().as_slice(), &[10, 11]); + + let mut it = std::iter::repeat_with(|| panic!()); + assert_eq!(it.next_chunk::<0>().unwrap(), []); } // just tests by whether or not this compiles diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 42a26ae16..ccb7be68e 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -66,6 +66,8 @@ #![feature(try_trait_v2)] #![feature(slice_internals)] #![feature(slice_partition_dedup)] +#![feature(ip)] +#![feature(ip_in_core)] #![feature(iter_advance_by)] #![feature(iter_array_chunks)] #![feature(iter_collect_into)] @@ -77,6 +79,9 @@ #![feature(iter_repeat_n)] #![feature(iterator_try_collect)] #![feature(iterator_try_reduce)] +#![feature(const_ip)] +#![feature(const_ipv4)] +#![feature(const_ipv6)] #![feature(const_mut_refs)] #![feature(const_pin)] #![feature(const_waker)] @@ -135,6 +140,7 @@ mod lazy; mod macros; mod manually_drop; mod mem; +mod net; mod nonzero; mod num; mod ops; diff --git a/library/core/tests/net/ip_addr.rs b/library/core/tests/net/ip_addr.rs new file mode 100644 index 000000000..5a6ac08c0 --- /dev/null +++ b/library/core/tests/net/ip_addr.rs @@ -0,0 +1,1035 @@ +use super::{sa4, sa6}; +use core::net::{ + IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope, SocketAddr, SocketAddrV4, SocketAddrV6, +}; +use core::str::FromStr; + +#[test] +fn test_from_str_ipv4() { + assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse()); + assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse()); + assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse()); + + // out of range + let none: Option = "256.0.0.1".parse().ok(); + assert_eq!(None, none); + // too short + let none: Option = "255.0.0".parse().ok(); + assert_eq!(None, none); + // too long + let none: Option = "255.0.0.1.2".parse().ok(); + assert_eq!(None, none); + // no number between dots + let none: Option = "255.0..1".parse().ok(); + assert_eq!(None, none); + // octal + let none: Option = "255.0.0.01".parse().ok(); + assert_eq!(None, none); + // octal zero + let none: Option = "255.0.0.00".parse().ok(); + assert_eq!(None, none); + let none: Option = "255.0.00.0".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_ipv6() { + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse()); + + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse()); + + assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse()); + + // too long group + let none: Option = "::00000".parse().ok(); + assert_eq!(None, none); + // too short + let none: Option = "1:2:3:4:5:6:7".parse().ok(); + assert_eq!(None, none); + // too long + let none: Option = "1:2:3:4:5:6:7:8:9".parse().ok(); + assert_eq!(None, none); + // triple colon + let none: Option = "1:2:::6:7:8".parse().ok(); + assert_eq!(None, none); + // two double colons + let none: Option = "1:2::6::8".parse().ok(); + assert_eq!(None, none); + // `::` indicating zero groups of zeros + let none: Option = "1:2:3:4::5:6:7:8".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_ipv4_in_ipv6() { + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse()); + assert_eq!( + Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)), + "64:ff9b::192.0.2.33".parse() + ); + assert_eq!( + Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)), + "2001:db8:122:c000:2:2100:192.0.2.33".parse() + ); + + // colon after v4 + let none: Option = "::127.0.0.1:".parse().ok(); + assert_eq!(None, none); + // not enough groups + let none: Option = "1:2:3:4:5:127.0.0.1".parse().ok(); + assert_eq!(None, none); + // too many groups + let none: Option = "1:2:3:4:5:6:7:127.0.0.1".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_socket_addr() { + assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); + assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); + assert_eq!( + Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)), + "[2a02:6b8:0:1::1]:53".parse() + ); + assert_eq!( + Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)), + "[2a02:6b8:0:1::1]:53".parse() + ); + assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse()); + assert_eq!( + Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)), + "[::127.0.0.1]:22".parse() + ); + + // without port + let none: Option = "127.0.0.1".parse().ok(); + assert_eq!(None, none); + // without port + let none: Option = "127.0.0.1:".parse().ok(); + assert_eq!(None, none); + // wrong brackets around v4 + let none: Option = "[127.0.0.1]:22".parse().ok(); + assert_eq!(None, none); + // port out of range + let none: Option = "127.0.0.1:123456".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn ipv4_addr_to_string() { + assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1"); + // Short address + assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1"); + // Long address + assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127"); + + // Test padding + assert_eq!(format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 "); + assert_eq!(format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1"); +} + +#[test] +fn ipv6_addr_to_string() { + // ipv4-mapped address + let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280); + assert_eq!(a1.to_string(), "::ffff:192.0.2.128"); + + // ipv4-compatible address + let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280); + assert_eq!(a1.to_string(), "::192.0.2.128"); + + // v6 address with no zero segments + assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f"); + + // longest possible IPv6 length + assert_eq!( + Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(), + "1111:2222:3333:4444:5555:6666:7777:8888" + ); + // padding + assert_eq!(format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 "); + assert_eq!(format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8"); + + // reduce a single run of zeros + assert_eq!( + "ae::ffff:102:304", + Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string() + ); + + // don't reduce just a single zero segment + assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string()); + + // 'any' address + assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string()); + + // loopback address + assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string()); + + // ends in zeros + assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string()); + + // two runs of zeros, second one is longer + assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string()); + + // two runs of zeros, equal length + assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string()); + + // don't prefix `0x` to each segment in `dbg!`. + assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8))); +} + +#[test] +fn ipv4_to_ipv6() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678), + Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped() + ); + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678), + Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible() + ); +} + +#[test] +fn ipv6_to_ipv4_mapped() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None); +} + +#[test] +fn ipv6_to_ipv4() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None); +} + +#[test] +fn ip_properties() { + macro_rules! ip { + ($s:expr) => { + IpAddr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr) => { + check!($s, 0); + }; + + ($s:expr, $mask:expr) => {{ + let unspec: u8 = 1 << 0; + let loopback: u8 = 1 << 1; + let global: u8 = 1 << 2; + let multicast: u8 = 1 << 3; + let doc: u8 = 1 << 4; + let benchmarking: u8 = 1 << 5; + + if ($mask & unspec) == unspec { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + + if ($mask & multicast) == multicast { + assert!(ip!($s).is_multicast()); + } else { + assert!(!ip!($s).is_multicast()); + } + + if ($mask & doc) == doc { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + }}; + } + + let unspec: u8 = 1 << 0; + let loopback: u8 = 1 << 1; + let global: u8 = 1 << 2; + let multicast: u8 = 1 << 3; + let doc: u8 = 1 << 4; + let benchmarking: u8 = 1 << 5; + + check!("0.0.0.0", unspec); + check!("0.0.0.1"); + check!("0.1.0.0"); + check!("10.9.8.7"); + check!("127.1.2.3", loopback); + check!("172.31.254.253"); + check!("169.254.253.242"); + check!("192.0.2.183", doc); + check!("192.1.2.183", global); + check!("192.168.254.253"); + check!("198.51.100.0", doc); + check!("203.0.113.0", doc); + check!("203.2.113.0", global); + check!("224.0.0.0", global | multicast); + check!("239.255.255.255", global | multicast); + check!("255.255.255.255"); + // make sure benchmarking addresses are not global + check!("198.18.0.0", benchmarking); + check!("198.18.54.2", benchmarking); + check!("198.19.255.255", benchmarking); + // make sure addresses reserved for protocol assignment are not global + check!("192.0.0.0"); + check!("192.0.0.255"); + check!("192.0.0.100"); + // make sure reserved addresses are not global + check!("240.0.0.0"); + check!("251.54.1.76"); + check!("254.255.255.255"); + // make sure shared addresses are not global + check!("100.64.0.0"); + check!("100.127.255.255"); + check!("100.100.100.0"); + + check!("::", unspec); + check!("::1", loopback); + check!("::0.0.0.2", global); + check!("1::", global); + check!("fc00::"); + check!("fdff:ffff::"); + check!("fe80:ffff::"); + check!("febf:ffff::"); + check!("fec0::", global); + check!("ff01::", global | multicast); + check!("ff02::", global | multicast); + check!("ff03::", global | multicast); + check!("ff04::", global | multicast); + check!("ff05::", global | multicast); + check!("ff08::", global | multicast); + check!("ff0e::", global | multicast); + check!("2001:db8:85a3::8a2e:370:7334", doc); + check!("2001:2::ac32:23ff:21", benchmarking); + check!("102:304:506:708:90a:b0c:d0e:f10", global); +} + +#[test] +fn ipv4_properties() { + macro_rules! ip { + ($s:expr) => { + Ipv4Addr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr) => { + check!($s, 0); + }; + + ($s:expr, $mask:expr) => {{ + let unspec: u16 = 1 << 0; + let loopback: u16 = 1 << 1; + let private: u16 = 1 << 2; + let link_local: u16 = 1 << 3; + let global: u16 = 1 << 4; + let multicast: u16 = 1 << 5; + let broadcast: u16 = 1 << 6; + let documentation: u16 = 1 << 7; + let benchmarking: u16 = 1 << 8; + let reserved: u16 = 1 << 10; + let shared: u16 = 1 << 11; + + if ($mask & unspec) == unspec { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + + if ($mask & private) == private { + assert!(ip!($s).is_private()); + } else { + assert!(!ip!($s).is_private()); + } + + if ($mask & link_local) == link_local { + assert!(ip!($s).is_link_local()); + } else { + assert!(!ip!($s).is_link_local()); + } + + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + + if ($mask & multicast) == multicast { + assert!(ip!($s).is_multicast()); + } else { + assert!(!ip!($s).is_multicast()); + } + + if ($mask & broadcast) == broadcast { + assert!(ip!($s).is_broadcast()); + } else { + assert!(!ip!($s).is_broadcast()); + } + + if ($mask & documentation) == documentation { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + + if ($mask & reserved) == reserved { + assert!(ip!($s).is_reserved()); + } else { + assert!(!ip!($s).is_reserved()); + } + + if ($mask & shared) == shared { + assert!(ip!($s).is_shared()); + } else { + assert!(!ip!($s).is_shared()); + } + }}; + } + + let unspec: u16 = 1 << 0; + let loopback: u16 = 1 << 1; + let private: u16 = 1 << 2; + let link_local: u16 = 1 << 3; + let global: u16 = 1 << 4; + let multicast: u16 = 1 << 5; + let broadcast: u16 = 1 << 6; + let documentation: u16 = 1 << 7; + let benchmarking: u16 = 1 << 8; + let reserved: u16 = 1 << 10; + let shared: u16 = 1 << 11; + + check!("0.0.0.0", unspec); + check!("0.0.0.1"); + check!("0.1.0.0"); + check!("10.9.8.7", private); + check!("127.1.2.3", loopback); + check!("172.31.254.253", private); + check!("169.254.253.242", link_local); + check!("192.0.2.183", documentation); + check!("192.1.2.183", global); + check!("192.168.254.253", private); + check!("198.51.100.0", documentation); + check!("203.0.113.0", documentation); + check!("203.2.113.0", global); + check!("224.0.0.0", global | multicast); + check!("239.255.255.255", global | multicast); + check!("255.255.255.255", broadcast); + check!("198.18.0.0", benchmarking); + check!("198.18.54.2", benchmarking); + check!("198.19.255.255", benchmarking); + check!("192.0.0.0"); + check!("192.0.0.255"); + check!("192.0.0.100"); + check!("240.0.0.0", reserved); + check!("251.54.1.76", reserved); + check!("254.255.255.255", reserved); + check!("100.64.0.0", shared); + check!("100.127.255.255", shared); + check!("100.100.100.0", shared); +} + +#[test] +fn ipv6_properties() { + macro_rules! ip { + ($s:expr) => { + Ipv6Addr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr, &[$($octet:expr),*], $mask:expr) => { + assert_eq!($s, ip!($s).to_string()); + let octets = &[$($octet),*]; + assert_eq!(&ip!($s).octets(), octets); + assert_eq!(Ipv6Addr::from(*octets), ip!($s)); + + let unspecified: u32 = 1 << 0; + let loopback: u32 = 1 << 1; + let unique_local: u32 = 1 << 2; + let global: u32 = 1 << 3; + let unicast_link_local: u32 = 1 << 4; + let unicast_global: u32 = 1 << 7; + let documentation: u32 = 1 << 8; + let benchmarking: u32 = 1 << 16; + let multicast_interface_local: u32 = 1 << 9; + let multicast_link_local: u32 = 1 << 10; + let multicast_realm_local: u32 = 1 << 11; + let multicast_admin_local: u32 = 1 << 12; + let multicast_site_local: u32 = 1 << 13; + let multicast_organization_local: u32 = 1 << 14; + let multicast_global: u32 = 1 << 15; + let multicast: u32 = multicast_interface_local + | multicast_admin_local + | multicast_global + | multicast_link_local + | multicast_realm_local + | multicast_site_local + | multicast_organization_local; + + if ($mask & unspecified) == unspecified { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + if ($mask & unique_local) == unique_local { + assert!(ip!($s).is_unique_local()); + } else { + assert!(!ip!($s).is_unique_local()); + } + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + if ($mask & unicast_link_local) == unicast_link_local { + assert!(ip!($s).is_unicast_link_local()); + } else { + assert!(!ip!($s).is_unicast_link_local()); + } + if ($mask & unicast_global) == unicast_global { + assert!(ip!($s).is_unicast_global()); + } else { + assert!(!ip!($s).is_unicast_global()); + } + if ($mask & documentation) == documentation { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + if ($mask & multicast) != 0 { + assert!(ip!($s).multicast_scope().is_some()); + assert!(ip!($s).is_multicast()); + } else { + assert!(ip!($s).multicast_scope().is_none()); + assert!(!ip!($s).is_multicast()); + } + if ($mask & multicast_interface_local) == multicast_interface_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::InterfaceLocal); + } + if ($mask & multicast_link_local) == multicast_link_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::LinkLocal); + } + if ($mask & multicast_realm_local) == multicast_realm_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::RealmLocal); + } + if ($mask & multicast_admin_local) == multicast_admin_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::AdminLocal); + } + if ($mask & multicast_site_local) == multicast_site_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::SiteLocal); + } + if ($mask & multicast_organization_local) == multicast_organization_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::OrganizationLocal); + } + if ($mask & multicast_global) == multicast_global { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::Global); + } + } + } + + let unspecified: u32 = 1 << 0; + let loopback: u32 = 1 << 1; + let unique_local: u32 = 1 << 2; + let global: u32 = 1 << 3; + let unicast_link_local: u32 = 1 << 4; + let unicast_global: u32 = 1 << 7; + let documentation: u32 = 1 << 8; + let benchmarking: u32 = 1 << 16; + let multicast_interface_local: u32 = 1 << 9; + let multicast_link_local: u32 = 1 << 10; + let multicast_realm_local: u32 = 1 << 11; + let multicast_admin_local: u32 = 1 << 12; + let multicast_site_local: u32 = 1 << 13; + let multicast_organization_local: u32 = 1 << 14; + let multicast_global: u32 = 1 << 15; + + check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified); + + check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback); + + check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global); + + check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global); + + check!( + "::ffff:127.0.0.1", + &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x7f, 0, 0, 1], + unicast_global + ); + + check!( + "64:ff9b:1::", + &[0, 0x64, 0xff, 0x9b, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_global + ); + + check!("100::", &[0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!("2001::", &[0x20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:1::1", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + global | unicast_global + ); + + check!( + "2001:1::2", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], + global | unicast_global + ); + + check!( + "2001:3::", + &[0x20, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:4:112::", + &[0x20, 1, 0, 4, 1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:20::", + &[0x20, 1, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!("2001:30::", &[0x20, 1, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:200::", + &[0x20, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local); + + check!( + "fdff:ffff::", + &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unique_local + ); + + check!( + "fe80:ffff::", + &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); + + check!( + "febf:ffff::", + &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); + + check!( + "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + &[ + 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff + ], + unicast_link_local + ); + + check!( + "fe80::ffff:ffff:ffff:ffff", + &[ + 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff + ], + unicast_link_local + ); + + check!( + "fe80:0:0:1::", + &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!( + "fec0::", + &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_global | global + ); + + check!( + "ff01::", + &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_interface_local | global + ); + + check!( + "ff02::", + &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_link_local | global + ); + + check!( + "ff03::", + &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_realm_local | global + ); + + check!( + "ff04::", + &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_admin_local | global + ); + + check!( + "ff05::", + &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_site_local | global + ); + + check!( + "ff08::", + &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_organization_local | global + ); + + check!( + "ff0e::", + &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_global | global + ); + + check!( + "2001:db8:85a3::8a2e:370:7334", + &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34], + documentation + ); + + check!( + "2001:2::ac32:23ff:21", + &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21], + benchmarking + ); + + check!( + "102:304:506:708:90a:b0c:d0e:f10", + &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + global | unicast_global + ); +} + +#[test] +fn test_ipv4_to_int() { + let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); + assert_eq!(u32::from(a), 0x11223344); +} + +#[test] +fn test_int_to_ipv4() { + let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); + assert_eq!(Ipv4Addr::from(0x11223344), a); +} + +#[test] +fn test_ipv6_to_int() { + let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); + assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128); +} + +#[test] +fn test_int_to_ipv6() { + let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); + assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a); +} + +#[test] +fn ipv4_from_constructors() { + assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1)); + assert!(Ipv4Addr::LOCALHOST.is_loopback()); + assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0)); + assert!(Ipv4Addr::UNSPECIFIED.is_unspecified()); + assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255)); + assert!(Ipv4Addr::BROADCAST.is_broadcast()); +} + +#[test] +fn ipv6_from_constructors() { + assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + assert!(Ipv6Addr::LOCALHOST.is_loopback()); + assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); + assert!(Ipv6Addr::UNSPECIFIED.is_unspecified()); +} + +#[test] +fn ipv4_from_octets() { + assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1)) +} + +#[test] +fn ipv6_from_segments() { + let from_u16s = + Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); + let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff); + assert_eq!(new, from_u16s); +} + +#[test] +fn ipv6_from_octets() { + let from_u16s = + Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); + let from_u8s = Ipv6Addr::from([ + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, + ]); + assert_eq!(from_u16s, from_u8s); +} + +#[test] +fn cmp() { + let v41 = Ipv4Addr::new(100, 64, 3, 3); + let v42 = Ipv4Addr::new(192, 0, 2, 2); + let v61 = "2001:db8:f00::1002".parse::().unwrap(); + let v62 = "2001:db8:f00::2001".parse::().unwrap(); + assert!(v41 < v42); + assert!(v61 < v62); + + assert_eq!(v41, IpAddr::V4(v41)); + assert_eq!(v61, IpAddr::V6(v61)); + assert!(v41 != IpAddr::V4(v42)); + assert!(v61 != IpAddr::V6(v62)); + + assert!(v41 < IpAddr::V4(v42)); + assert!(v61 < IpAddr::V6(v62)); + assert!(IpAddr::V4(v41) < v42); + assert!(IpAddr::V6(v61) < v62); + + assert!(v41 < IpAddr::V6(v61)); + assert!(IpAddr::V4(v41) < v61); +} + +#[test] +fn is_v4() { + let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3)); + assert!(ip.is_ipv4()); + assert!(!ip.is_ipv6()); +} + +#[test] +fn is_v6() { + let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678)); + assert!(!ip.is_ipv4()); + assert!(ip.is_ipv6()); +} + +#[test] +fn ipv4_const() { + // test that the methods of `Ipv4Addr` are usable in a const context + + const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1); + assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST); + + const OCTETS: [u8; 4] = IP_ADDRESS.octets(); + assert_eq!(OCTETS, [127, 0, 0, 1]); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_PRIVATE: bool = IP_ADDRESS.is_private(); + assert!(!IS_PRIVATE); + + const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local(); + assert!(!IS_LINK_LOCAL); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_SHARED: bool = IP_ADDRESS.is_shared(); + assert!(!IS_SHARED); + + const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); + assert!(!IS_BENCHMARKING); + + const IS_RESERVED: bool = IP_ADDRESS.is_reserved(); + assert!(!IS_RESERVED); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast(); + assert!(!IS_BROADCAST); + + const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); + assert!(!IS_DOCUMENTATION); + + const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible(); + assert_eq!( + IP_V6_COMPATIBLE, + Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1]) + ); + + const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped(); + assert_eq!( + IP_V6_MAPPED, + Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1]) + ); +} + +#[test] +fn ipv6_const() { + // test that the methods of `Ipv6Addr` are usable in a const context + + const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST); + + const SEGMENTS: [u16; 8] = IP_ADDRESS.segments(); + assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]); + + const OCTETS: [u8; 16] = IP_ADDRESS.octets(); + assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local(); + assert!(!IS_UNIQUE_LOCAL); + + const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local(); + assert!(!IS_UNICAST_LINK_LOCAL); + + const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); + assert!(!IS_DOCUMENTATION); + + const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); + assert!(!IS_BENCHMARKING); + + const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global(); + assert!(!IS_UNICAST_GLOBAL); + + const MULTICAST_SCOPE: Option = IP_ADDRESS.multicast_scope(); + assert_eq!(MULTICAST_SCOPE, None); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IP_V4: Option = IP_ADDRESS.to_ipv4(); + assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1)); +} + +#[test] +fn ip_const() { + // test that the methods of `IpAddr` are usable in a const context + + const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IS_IP_V4: bool = IP_ADDRESS.is_ipv4(); + assert!(IS_IP_V4); + + const IS_IP_V6: bool = IP_ADDRESS.is_ipv6(); + assert!(!IS_IP_V6); +} + +#[test] +fn structural_match() { + // test that all IP types can be structurally matched upon + + const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST; + match IPV4 { + Ipv4Addr::LOCALHOST => {} + _ => unreachable!(), + } + + const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST; + match IPV6 { + Ipv6Addr::LOCALHOST => {} + _ => unreachable!(), + } + + const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + match IP { + IpAddr::V4(Ipv4Addr::LOCALHOST) => {} + _ => unreachable!(), + } +} diff --git a/library/core/tests/net/mod.rs b/library/core/tests/net/mod.rs new file mode 100644 index 000000000..8f17bbe55 --- /dev/null +++ b/library/core/tests/net/mod.rs @@ -0,0 +1,13 @@ +use core::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +mod ip_addr; +mod parser; +mod socket_addr; + +pub fn sa4(a: Ipv4Addr, p: u16) -> SocketAddr { + SocketAddr::V4(SocketAddrV4::new(a, p)) +} + +pub fn sa6(a: Ipv6Addr, p: u16) -> SocketAddr { + SocketAddr::V6(SocketAddrV6::new(a, p, 0, 0)) +} diff --git a/library/core/tests/net/parser.rs b/library/core/tests/net/parser.rs new file mode 100644 index 000000000..36b87d7c1 --- /dev/null +++ b/library/core/tests/net/parser.rs @@ -0,0 +1,149 @@ +// FIXME: These tests are all excellent candidates for AFL fuzz testing +use core::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use core::str::FromStr; + +const PORT: u16 = 8080; +const SCOPE_ID: u32 = 1337; + +const IPV4: Ipv4Addr = Ipv4Addr::new(192, 168, 0, 1); +const IPV4_STR: &str = "192.168.0.1"; +const IPV4_STR_PORT: &str = "192.168.0.1:8080"; +const IPV4_STR_WITH_OCTAL: &str = "0127.0.0.1"; +const IPV4_STR_WITH_HEX: &str = "0x10.0.0.1"; + +const IPV6: Ipv6Addr = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0xc0a8, 0x1); +const IPV6_STR_FULL: &str = "2001:db8:0:0:0:0:c0a8:1"; +const IPV6_STR_COMPRESS: &str = "2001:db8::c0a8:1"; +const IPV6_STR_V4: &str = "2001:db8::192.168.0.1"; +const IPV6_STR_V4_WITH_OCTAL: &str = "2001:db8::0127.0.0.1"; +const IPV6_STR_V4_WITH_HEX: &str = "2001:db8::0x10.0.0.1"; +const IPV6_STR_PORT: &str = "[2001:db8::c0a8:1]:8080"; +const IPV6_STR_PORT_SCOPE_ID: &str = "[2001:db8::c0a8:1%1337]:8080"; + +#[test] +fn parse_ipv4() { + let result: Ipv4Addr = IPV4_STR.parse().unwrap(); + assert_eq!(result, IPV4); + + assert!(Ipv4Addr::from_str(IPV4_STR_PORT).is_err()); + assert!(Ipv4Addr::from_str(IPV4_STR_WITH_OCTAL).is_err()); + assert!(Ipv4Addr::from_str(IPV4_STR_WITH_HEX).is_err()); + assert!(Ipv4Addr::from_str(IPV6_STR_FULL).is_err()); + assert!(Ipv4Addr::from_str(IPV6_STR_COMPRESS).is_err()); + assert!(Ipv4Addr::from_str(IPV6_STR_V4).is_err()); + assert!(Ipv4Addr::from_str(IPV6_STR_PORT).is_err()); +} + +#[test] +fn parse_ipv6() { + let result: Ipv6Addr = IPV6_STR_FULL.parse().unwrap(); + assert_eq!(result, IPV6); + + let result: Ipv6Addr = IPV6_STR_COMPRESS.parse().unwrap(); + assert_eq!(result, IPV6); + + let result: Ipv6Addr = IPV6_STR_V4.parse().unwrap(); + assert_eq!(result, IPV6); + + assert!(Ipv6Addr::from_str(IPV6_STR_V4_WITH_OCTAL).is_err()); + assert!(Ipv6Addr::from_str(IPV6_STR_V4_WITH_HEX).is_err()); + assert!(Ipv6Addr::from_str(IPV4_STR).is_err()); + assert!(Ipv6Addr::from_str(IPV4_STR_PORT).is_err()); + assert!(Ipv6Addr::from_str(IPV6_STR_PORT).is_err()); +} + +#[test] +fn parse_ip() { + let result: IpAddr = IPV4_STR.parse().unwrap(); + assert_eq!(result, IpAddr::from(IPV4)); + + let result: IpAddr = IPV6_STR_FULL.parse().unwrap(); + assert_eq!(result, IpAddr::from(IPV6)); + + let result: IpAddr = IPV6_STR_COMPRESS.parse().unwrap(); + assert_eq!(result, IpAddr::from(IPV6)); + + let result: IpAddr = IPV6_STR_V4.parse().unwrap(); + assert_eq!(result, IpAddr::from(IPV6)); + + assert!(IpAddr::from_str(IPV4_STR_PORT).is_err()); + assert!(IpAddr::from_str(IPV6_STR_PORT).is_err()); +} + +#[test] +fn parse_socket_v4() { + let result: SocketAddrV4 = IPV4_STR_PORT.parse().unwrap(); + assert_eq!(result, SocketAddrV4::new(IPV4, PORT)); + + assert!(SocketAddrV4::from_str(IPV4_STR).is_err()); + assert!(SocketAddrV4::from_str(IPV6_STR_FULL).is_err()); + assert!(SocketAddrV4::from_str(IPV6_STR_COMPRESS).is_err()); + assert!(SocketAddrV4::from_str(IPV6_STR_V4).is_err()); + assert!(SocketAddrV4::from_str(IPV6_STR_PORT).is_err()); +} + +#[test] +fn parse_socket_v6() { + assert_eq!(IPV6_STR_PORT.parse(), Ok(SocketAddrV6::new(IPV6, PORT, 0, 0))); + assert_eq!(IPV6_STR_PORT_SCOPE_ID.parse(), Ok(SocketAddrV6::new(IPV6, PORT, 0, SCOPE_ID))); + + assert!(SocketAddrV6::from_str(IPV4_STR).is_err()); + assert!(SocketAddrV6::from_str(IPV4_STR_PORT).is_err()); + assert!(SocketAddrV6::from_str(IPV6_STR_FULL).is_err()); + assert!(SocketAddrV6::from_str(IPV6_STR_COMPRESS).is_err()); + assert!(SocketAddrV6::from_str(IPV6_STR_V4).is_err()); +} + +#[test] +fn parse_socket() { + let result: SocketAddr = IPV4_STR_PORT.parse().unwrap(); + assert_eq!(result, SocketAddr::from((IPV4, PORT))); + + let result: SocketAddr = IPV6_STR_PORT.parse().unwrap(); + assert_eq!(result, SocketAddr::from((IPV6, PORT))); + + assert!(SocketAddr::from_str(IPV4_STR).is_err()); + assert!(SocketAddr::from_str(IPV6_STR_FULL).is_err()); + assert!(SocketAddr::from_str(IPV6_STR_COMPRESS).is_err()); + assert!(SocketAddr::from_str(IPV6_STR_V4).is_err()); +} + +#[test] +fn ipv6_corner_cases() { + let result: Ipv6Addr = "1::".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0)); + + let result: Ipv6Addr = "1:1::".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(1, 1, 0, 0, 0, 0, 0, 0)); + + let result: Ipv6Addr = "::1".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + + let result: Ipv6Addr = "::1:1".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 1, 1)); + + let result: Ipv6Addr = "::".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); + + let result: Ipv6Addr = "::192.168.0.1".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc0a8, 0x1)); + + let result: Ipv6Addr = "::1:192.168.0.1".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(0, 0, 0, 0, 0, 1, 0xc0a8, 0x1)); + + let result: Ipv6Addr = "1:1:1:1:1:1:192.168.0.1".parse().unwrap(); + assert_eq!(result, Ipv6Addr::new(1, 1, 1, 1, 1, 1, 0xc0a8, 0x1)); +} + +// Things that might not seem like failures but are +#[test] +fn ipv6_corner_failures() { + // No IP address before the :: + assert!(Ipv6Addr::from_str("1:192.168.0.1::").is_err()); + + // :: must have at least 1 set of zeroes + assert!(Ipv6Addr::from_str("1:1:1:1::1:1:1:1").is_err()); + + // Need brackets for a port + assert!(SocketAddrV6::from_str("1:1:1:1:1:1:1:1:8080").is_err()); +} diff --git a/library/core/tests/net/socket_addr.rs b/library/core/tests/net/socket_addr.rs new file mode 100644 index 000000000..68c7cd94d --- /dev/null +++ b/library/core/tests/net/socket_addr.rs @@ -0,0 +1,233 @@ +use core::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +#[test] +fn ipv4_socket_addr_to_string() { + // Shortest possible IPv4 length. + assert_eq!(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0).to_string(), "0.0.0.0:0"); + + // Longest possible IPv4 length. + assert_eq!( + SocketAddrV4::new(Ipv4Addr::new(255, 255, 255, 255), u16::MAX).to_string(), + "255.255.255.255:65535" + ); + + // Test padding. + assert_eq!( + &format!("{:16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + "1.1.1.1:53 " + ); + assert_eq!( + &format!("{:>16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + " 1.1.1.1:53" + ); +} + +#[test] +fn ipv6_socket_addr_to_string() { + // IPv4-mapped address. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280), 8080, 0, 0) + .to_string(), + "[::ffff:192.0.2.128]:8080" + ); + + // IPv4-compatible address. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280), 8080, 0, 0).to_string(), + "[::192.0.2.128]:8080" + ); + + // IPv6 address with no zero segments. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15), 80, 0, 0).to_string(), + "[8:9:a:b:c:d:e:f]:80" + ); + + // Shortest possible IPv6 length. + assert_eq!(SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 0, 0, 0).to_string(), "[::]:0"); + + // Longest possible IPv6 length. + assert_eq!( + SocketAddrV6::new( + Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888), + u16::MAX, + u32::MAX, + u32::MAX, + ) + .to_string(), + "[1111:2222:3333:4444:5555:6666:7777:8888%4294967295]:65535" + ); + + // Test padding. + assert_eq!( + &format!("{:22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + "[1:2:3:4:5:6:7:8]:9 " + ); + assert_eq!( + &format!("{:>22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + " [1:2:3:4:5:6:7:8]:9" + ); +} + +#[test] +fn set_ip() { + fn ip4(low: u8) -> Ipv4Addr { + Ipv4Addr::new(77, 88, 21, low) + } + fn ip6(low: u16) -> Ipv6Addr { + Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, low) + } + + let mut v4 = SocketAddrV4::new(ip4(11), 80); + assert_eq!(v4.ip(), &ip4(11)); + v4.set_ip(ip4(12)); + assert_eq!(v4.ip(), &ip4(12)); + + let mut addr = SocketAddr::V4(v4); + assert_eq!(addr.ip(), IpAddr::V4(ip4(12))); + addr.set_ip(IpAddr::V4(ip4(13))); + assert_eq!(addr.ip(), IpAddr::V4(ip4(13))); + addr.set_ip(IpAddr::V6(ip6(14))); + assert_eq!(addr.ip(), IpAddr::V6(ip6(14))); + + let mut v6 = SocketAddrV6::new(ip6(1), 80, 0, 0); + assert_eq!(v6.ip(), &ip6(1)); + v6.set_ip(ip6(2)); + assert_eq!(v6.ip(), &ip6(2)); + + let mut addr = SocketAddr::V6(v6); + assert_eq!(addr.ip(), IpAddr::V6(ip6(2))); + addr.set_ip(IpAddr::V6(ip6(3))); + assert_eq!(addr.ip(), IpAddr::V6(ip6(3))); + addr.set_ip(IpAddr::V4(ip4(4))); + assert_eq!(addr.ip(), IpAddr::V4(ip4(4))); +} + +#[test] +fn set_port() { + let mut v4 = SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80); + assert_eq!(v4.port(), 80); + v4.set_port(443); + assert_eq!(v4.port(), 443); + + let mut addr = SocketAddr::V4(v4); + assert_eq!(addr.port(), 443); + addr.set_port(8080); + assert_eq!(addr.port(), 8080); + + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 0); + assert_eq!(v6.port(), 80); + v6.set_port(443); + assert_eq!(v6.port(), 443); + + let mut addr = SocketAddr::V6(v6); + assert_eq!(addr.port(), 443); + addr.set_port(8080); + assert_eq!(addr.port(), 8080); +} + +#[test] +fn set_flowinfo() { + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 10, 0); + assert_eq!(v6.flowinfo(), 10); + v6.set_flowinfo(20); + assert_eq!(v6.flowinfo(), 20); +} + +#[test] +fn set_scope_id() { + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 10); + assert_eq!(v6.scope_id(), 10); + v6.set_scope_id(20); + assert_eq!(v6.scope_id(), 20); +} + +#[test] +fn is_v4() { + let v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)); + assert!(v4.is_ipv4()); + assert!(!v4.is_ipv6()); +} + +#[test] +fn is_v6() { + let v6 = SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), + 80, + 10, + 0, + )); + assert!(!v6.is_ipv4()); + assert!(v6.is_ipv6()); +} + +#[test] +fn socket_v4_to_str() { + let socket = SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 8080); + + assert_eq!(format!("{socket}"), "192.168.0.1:8080"); + assert_eq!(format!("{socket:<20}"), "192.168.0.1:8080 "); + assert_eq!(format!("{socket:>20}"), " 192.168.0.1:8080"); + assert_eq!(format!("{socket:^20}"), " 192.168.0.1:8080 "); + assert_eq!(format!("{socket:.10}"), "192.168.0."); +} + +#[test] +fn socket_v6_to_str() { + let mut socket = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0); + + assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1]:53"); + assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1]:53 "); + assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1]:53"); + assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1]:53 "); + assert_eq!(format!("{socket:.15}"), "[2a02:6b8:0:1::"); + + socket.set_scope_id(5); + + assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1%5]:53"); + assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1%5]:53 "); + assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1%5]:53"); + assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1%5]:53 "); + assert_eq!(format!("{socket:.18}"), "[2a02:6b8:0:1::1%5"); +} + +#[test] +fn compare() { + let v4_1 = "224.120.45.1:23456".parse::().unwrap(); + let v4_2 = "224.210.103.5:12345".parse::().unwrap(); + let v4_3 = "224.210.103.5:23456".parse::().unwrap(); + let v6_1 = "[2001:db8:f00::1002]:23456".parse::().unwrap(); + let v6_2 = "[2001:db8:f00::2001]:12345".parse::().unwrap(); + let v6_3 = "[2001:db8:f00::2001]:23456".parse::().unwrap(); + + // equality + assert_eq!(v4_1, v4_1); + assert_eq!(v6_1, v6_1); + assert_eq!(SocketAddr::V4(v4_1), SocketAddr::V4(v4_1)); + assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1)); + assert!(v4_1 != v4_2); + assert!(v6_1 != v6_2); + + // compare different addresses + assert!(v4_1 < v4_2); + assert!(v6_1 < v6_2); + assert!(v4_2 > v4_1); + assert!(v6_2 > v6_1); + + // compare the same address with different ports + assert!(v4_2 < v4_3); + assert!(v6_2 < v6_3); + assert!(v4_3 > v4_2); + assert!(v6_3 > v6_2); + + // compare different addresses with the same port + assert!(v4_1 < v4_3); + assert!(v6_1 < v6_3); + assert!(v4_3 > v4_1); + assert!(v6_3 > v6_1); + + // compare with an inferred right-hand side + assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap()); + assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap()); + assert_eq!(SocketAddr::V4(v4_1), "224.120.45.1:23456".parse().unwrap()); +} diff --git a/library/core/tests/num/dec2flt/mod.rs b/library/core/tests/num/dec2flt/mod.rs index c4e105cba..a2b9bb551 100644 --- a/library/core/tests/num/dec2flt/mod.rs +++ b/library/core/tests/num/dec2flt/mod.rs @@ -15,7 +15,7 @@ macro_rules! test_literal { for input in inputs { assert_eq!(input.parse(), Ok(x64)); assert_eq!(input.parse(), Ok(x32)); - let neg_input = &format!("-{input}"); + let neg_input = format!("-{input}"); assert_eq!(neg_input.parse(), Ok(-x64)); assert_eq!(neg_input.parse(), Ok(-x32)); } diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 80d30f14c..c02cd99cc 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -25,7 +25,7 @@ fn test() { snd: isize, } let mut p = Pair { fst: 10, snd: 20 }; - let pptr: *mut Pair = &mut p; + let pptr: *mut Pair = addr_of_mut!(p); let iptr: *mut isize = pptr as *mut isize; assert_eq!(*iptr, 10); *iptr = 30; @@ -1070,8 +1070,8 @@ fn swap_copy_untyped() { let mut x = 5u8; let mut y = 6u8; - let ptr1 = &mut x as *mut u8 as *mut bool; - let ptr2 = &mut y as *mut u8 as *mut bool; + let ptr1 = addr_of_mut!(x).cast::(); + let ptr2 = addr_of_mut!(y).cast::(); unsafe { ptr::swap(ptr1, ptr2); -- cgit v1.2.3