diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:03:36 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:03:36 +0000 |
commit | 17d40c6057c88f4c432b0d7bac88e1b84cb7e67f (patch) | |
tree | 3f66c4a5918660bb8a758ab6cda5ff8ee4f6cdcd /library/alloc/tests | |
parent | Adding upstream version 1.64.0+dfsg1. (diff) | |
download | rustc-upstream/1.65.0+dfsg1.tar.xz rustc-upstream/1.65.0+dfsg1.zip |
Adding upstream version 1.65.0+dfsg1.upstream/1.65.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc/tests')
-rw-r--r-- | library/alloc/tests/lib.rs | 2 | ||||
-rw-r--r-- | library/alloc/tests/str.rs | 10 | ||||
-rw-r--r-- | library/alloc/tests/string.rs | 126 | ||||
-rw-r--r-- | library/alloc/tests/thin_box.rs | 8 | ||||
-rw-r--r-- | library/alloc/tests/vec.rs | 276 | ||||
-rw-r--r-- | library/alloc/tests/vec_deque.rs | 161 |
6 files changed, 263 insertions, 320 deletions
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs index d83cd29dd..490c0d8f7 100644 --- a/library/alloc/tests/lib.rs +++ b/library/alloc/tests/lib.rs @@ -38,11 +38,13 @@ #![feature(const_str_from_utf8)] #![feature(nonnull_slice_from_raw_parts)] #![feature(panic_update_hook)] +#![feature(pointer_is_aligned)] #![feature(slice_flatten)] #![feature(thin_box)] #![feature(bench_black_box)] #![feature(strict_provenance)] #![feature(once_cell)] +#![feature(drain_keep_rest)] use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs index 7379569dd..e30329aa1 100644 --- a/library/alloc/tests/str.rs +++ b/library/alloc/tests/str.rs @@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() { fn test_as_ptr() { let buf = "hello".as_ptr(); unsafe { - assert_eq!(*buf.offset(0), b'h'); - assert_eq!(*buf.offset(1), b'e'); - assert_eq!(*buf.offset(2), b'l'); - assert_eq!(*buf.offset(3), b'l'); - assert_eq!(*buf.offset(4), b'o'); + assert_eq!(*buf.add(0), b'h'); + assert_eq!(*buf.add(1), b'e'); + assert_eq!(*buf.add(2), b'l'); + assert_eq!(*buf.add(3), b'l'); + assert_eq!(*buf.add(4), b'o'); } } diff --git a/library/alloc/tests/string.rs b/library/alloc/tests/string.rs index b6836fdc8..99d1296a4 100644 --- a/library/alloc/tests/string.rs +++ b/library/alloc/tests/string.rs @@ -693,12 +693,6 @@ fn test_try_reserve() { const MAX_CAP: usize = isize::MAX as usize; const MAX_USIZE: usize = usize::MAX; - // On 16/32-bit, we check that allocations don't exceed isize::MAX, - // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. - // Any platform that succeeds for these requests is technically broken with - // ptr::offset because LLVM is the worst. - let guards_against_isize = usize::BITS < 64; - { // Note: basic stuff is checked by test_reserve let mut empty_string: String = String::new(); @@ -712,35 +706,19 @@ fn test_try_reserve() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - // Check isize::MAX + 1 does count as overflow - assert_matches!( - empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - // Check usize::MAX does count as overflow - assert_matches!( - empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - // Check isize::MAX + 1 is an OOM - assert_matches!( - empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - - // Check usize::MAX is an OOM - assert_matches!( - empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()), - Err(AllocError { .. }), - "usize::MAX should trigger an OOM!" - ); - } + // Check isize::MAX + 1 does count as overflow + assert_matches!( + empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + // Check usize::MAX does count as overflow + assert_matches!( + empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -753,19 +731,13 @@ fn test_try_reserve() { if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + // Should always overflow in the add-to-len assert_matches!( ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), @@ -785,8 +757,6 @@ fn test_try_reserve_exact() { const MAX_CAP: usize = isize::MAX as usize; const MAX_USIZE: usize = usize::MAX; - let guards_against_isize = usize::BITS < 64; - { let mut empty_string: String = String::new(); @@ -799,31 +769,17 @@ fn test_try_reserve_exact() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - assert_matches!( - empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - assert_matches!( - empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - - assert_matches!( - empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), - Err(AllocError { .. }), - "usize::MAX should trigger an OOM!" - ); - } + assert_matches!( + empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + assert_matches!( + empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -839,19 +795,13 @@ fn test_try_reserve_exact() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + assert_matches!( ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), Err(CapacityOverflow), diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs index 368aa564f..e008b0cc3 100644 --- a/library/alloc/tests/thin_box.rs +++ b/library/alloc/tests/thin_box.rs @@ -48,11 +48,11 @@ fn verify_aligned<T>(ptr: *const T) { // practice these checks are mostly just smoke-detectors for an extremely // broken `ThinBox` impl, since it's an extremely subtle piece of code. let ptr = core::hint::black_box(ptr); - let align = core::mem::align_of::<T>(); assert!( - (ptr.addr() & (align - 1)) == 0 && !ptr.is_null(), - "misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}", - core::any::type_name::<T>(), + ptr.is_aligned() && !ptr.is_null(), + "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}", + ty = core::any::type_name::<T>(), + align = core::mem::align_of::<T>(), ); } diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index b797e2375..f140fc414 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -294,6 +294,22 @@ fn test_retain() { } #[test] +fn test_retain_predicate_order() { + for to_keep in [true, false] { + let mut number_of_executions = 0; + let mut vec = vec![1, 2, 3, 4]; + let mut next_expected = 1; + vec.retain(|&x| { + assert_eq!(next_expected, x); + next_expected += 1; + number_of_executions += 1; + to_keep + }); + assert_eq!(number_of_executions, 4); + } +} + +#[test] fn test_retain_pred_panic_with_hole() { let v = (0..5).map(Rc::new).collect::<Vec<_>>(); catch_unwind(AssertUnwindSafe(|| { @@ -355,6 +371,35 @@ fn test_retain_drop_panic() { } #[test] +fn test_retain_maybeuninits() { + // This test aimed to be run under miri. + use core::mem::MaybeUninit; + let mut vec: Vec<_> = [1i32, 2, 3, 4].map(|v| MaybeUninit::new(vec![v])).into(); + vec.retain(|x| { + // SAFETY: Retain must visit every element of Vec in original order and exactly once. + // Our values is initialized at creation of Vec. + let v = unsafe { x.assume_init_ref()[0] }; + if v & 1 == 0 { + return true; + } + // SAFETY: Value is initialized. + // Value wouldn't be dropped by `Vec::retain` + // because `MaybeUninit` doesn't drop content. + drop(unsafe { x.assume_init_read() }); + false + }); + let vec: Vec<i32> = vec + .into_iter() + .map(|x| unsafe { + // SAFETY: All values dropped in retain predicate must be removed by `Vec::retain`. + // Remaining values are initialized. + x.assume_init()[0] + }) + .collect(); + assert_eq!(vec, [2, 4]); +} + +#[test] fn test_dedup() { fn case(a: Vec<i32>, b: Vec<i32>) { let mut v = a; @@ -795,6 +840,36 @@ fn test_drain_leak() { } #[test] +fn test_drain_keep_rest() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + let mut drain = v.drain(1..6); + assert_eq!(drain.next(), Some(1)); + assert_eq!(drain.next_back(), Some(5)); + assert_eq!(drain.next(), Some(2)); + + drain.keep_rest(); + assert_eq!(v, &[0, 3, 4, 6]); +} + +#[test] +fn test_drain_keep_rest_all() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + v.drain(1..6).keep_rest(); + assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]); +} + +#[test] +fn test_drain_keep_rest_none() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + let mut drain = v.drain(1..6); + + drain.by_ref().for_each(drop); + + drain.keep_rest(); + assert_eq!(v, &[0, 6]); +} + +#[test] fn test_splice() { let mut v = vec![1, 2, 3, 4, 5]; let a = [10, 11, 12]; @@ -1030,6 +1105,12 @@ fn test_into_iter_drop_allocator() { } #[test] +fn test_into_iter_zst() { + for _ in vec![[0u64; 0]].into_iter() {} + for _ in vec![[0u64; 0]; 5].into_iter().rev() {} +} + +#[test] fn test_from_iter_specialization() { let src: Vec<usize> = vec![0usize; 1]; let srcptr = src.as_ptr(); @@ -1489,6 +1570,35 @@ fn drain_filter_unconsumed() { } #[test] +fn test_drain_filter_keep_rest() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + let mut drain = v.drain_filter(|&mut x| x % 2 == 0); + assert_eq!(drain.next(), Some(0)); + assert_eq!(drain.next(), Some(2)); + + drain.keep_rest(); + assert_eq!(v, &[1, 3, 4, 5, 6]); +} + +#[test] +fn test_drain_filter_keep_rest_all() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + v.drain_filter(|_| true).keep_rest(); + assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]); +} + +#[test] +fn test_drain_filter_keep_rest_none() { + let mut v = vec![0, 1, 2, 3, 4, 5, 6]; + let mut drain = v.drain_filter(|_| true); + + drain.by_ref().for_each(drop); + + drain.keep_rest(); + assert_eq!(v, &[]); +} + +#[test] fn test_reserve_exact() { // This is all the same as test_reserve @@ -1527,12 +1637,6 @@ fn test_try_reserve() { const MAX_CAP: usize = isize::MAX as usize; const MAX_USIZE: usize = usize::MAX; - // On 16/32-bit, we check that allocations don't exceed isize::MAX, - // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. - // Any platform that succeeds for these requests is technically broken with - // ptr::offset because LLVM is the worst. - let guards_against_isize = usize::BITS < 64; - { // Note: basic stuff is checked by test_reserve let mut empty_bytes: Vec<u8> = Vec::new(); @@ -1546,35 +1650,19 @@ fn test_try_reserve() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - // Check isize::MAX + 1 does count as overflow - assert_matches!( - empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - // Check usize::MAX does count as overflow - assert_matches!( - empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - // Check isize::MAX + 1 is an OOM - assert_matches!( - empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - - // Check usize::MAX is an OOM - assert_matches!( - empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), - Err(AllocError { .. }), - "usize::MAX should trigger an OOM!" - ); - } + // Check isize::MAX + 1 does count as overflow + assert_matches!( + empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + // Check usize::MAX does count as overflow + assert_matches!( + empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -1587,19 +1675,13 @@ fn test_try_reserve() { if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + // Should always overflow in the add-to-len assert_matches!( ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), @@ -1620,19 +1702,13 @@ fn test_try_reserve() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + // Should fail in the mul-by-size assert_matches!( ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()), @@ -1652,8 +1728,6 @@ fn test_try_reserve_exact() { const MAX_CAP: usize = isize::MAX as usize; const MAX_USIZE: usize = usize::MAX; - let guards_against_isize = size_of::<usize>() < 8; - { let mut empty_bytes: Vec<u8> = Vec::new(); @@ -1666,31 +1740,17 @@ fn test_try_reserve_exact() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - assert_matches!( - empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - assert_matches!( - empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - - assert_matches!( - empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), - Err(AllocError { .. }), - "usize::MAX should trigger an OOM!" - ); - } + assert_matches!( + empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + assert_matches!( + empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -1706,19 +1766,13 @@ fn test_try_reserve_exact() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + assert_matches!( ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), Err(CapacityOverflow), @@ -1739,19 +1793,13 @@ fn test_try_reserve_exact() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + assert_matches!( ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()), Err(CapacityOverflow), diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs index 89cc7f905..019d73c0b 100644 --- a/library/alloc/tests/vec_deque.rs +++ b/library/alloc/tests/vec_deque.rs @@ -2,7 +2,6 @@ use std::assert_matches::assert_matches; use std::collections::TryReserveErrorKind::*; use std::collections::{vec_deque::Drain, VecDeque}; use std::fmt::Debug; -use std::mem::size_of; use std::ops::Bound::*; use std::panic::{catch_unwind, AssertUnwindSafe}; @@ -1161,12 +1160,6 @@ fn test_try_reserve() { const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; const MAX_USIZE: usize = usize::MAX; - // On 16/32-bit, we check that allocations don't exceed isize::MAX, - // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. - // Any platform that succeeds for these requests is technically broken with - // ptr::offset because LLVM is the worst. - let guards_against_isize = size_of::<usize>() < 8; - { // Note: basic stuff is checked by test_reserve let mut empty_bytes: VecDeque<u8> = VecDeque::new(); @@ -1180,31 +1173,19 @@ fn test_try_reserve() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - // Check isize::MAX + 1 does count as overflow - assert_matches!( - empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - // Check usize::MAX does count as overflow - assert_matches!( - empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - // Check isize::MAX is an OOM - // VecDeque starts with capacity 7, always adds 1 to the capacity - // and also rounds the number to next power of 2 so this is the - // furthest we can go without triggering CapacityOverflow - assert_matches!( - empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + // Check isize::MAX + 1 does count as overflow + assert_matches!( + empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + // Check usize::MAX does count as overflow + assert_matches!( + empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -1217,19 +1198,13 @@ fn test_try_reserve() { if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + // Should always overflow in the add-to-len assert_matches!( ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()), @@ -1250,19 +1225,13 @@ fn test_try_reserve() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + // Should fail in the mul-by-size assert_matches!( ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()), @@ -1282,8 +1251,6 @@ fn test_try_reserve_exact() { const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; const MAX_USIZE: usize = usize::MAX; - let guards_against_isize = size_of::<usize>() < 8; - { let mut empty_bytes: VecDeque<u8> = VecDeque::new(); @@ -1296,29 +1263,17 @@ fn test_try_reserve_exact() { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - - assert_matches!( - empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), - Err(CapacityOverflow), - "usize::MAX should trigger an overflow!" - ); - } else { - // Check isize::MAX is an OOM - // VecDeque starts with capacity 7, always adds 1 to the capacity - // and also rounds the number to next power of 2 so this is the - // furthest we can go without triggering CapacityOverflow - assert_matches!( - empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + assert_matches!( + empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + + assert_matches!( + empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), + Err(CapacityOverflow), + "usize::MAX should trigger an overflow!" + ); } { @@ -1334,19 +1289,13 @@ fn test_try_reserve_exact() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + assert_matches!( ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()), Err(CapacityOverflow), @@ -1367,19 +1316,13 @@ fn test_try_reserve_exact() { { panic!("isize::MAX shouldn't trigger an overflow!"); } - if guards_against_isize { - assert_matches!( - ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(CapacityOverflow), - "isize::MAX + 1 should trigger an overflow!" - ); - } else { - assert_matches!( - ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), - Err(AllocError { .. }), - "isize::MAX + 1 should trigger an OOM!" - ); - } + + assert_matches!( + ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()), + Err(CapacityOverflow), + "isize::MAX + 1 should trigger an overflow!" + ); + assert_matches!( ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()), Err(CapacityOverflow), |