diff options
Diffstat (limited to 'third_party/rust/bumpalo/tests')
-rw-r--r-- | third_party/rust/bumpalo/tests/alloc_fill.rs | 32 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/alloc_with.rs | 66 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/quickchecks.rs | 230 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/readme_up_to_date.rs | 22 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/string.rs | 19 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/tests.rs | 195 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/try_alloc.rs | 178 | ||||
-rw-r--r-- | third_party/rust/bumpalo/tests/vec.rs | 85 |
8 files changed, 827 insertions, 0 deletions
diff --git a/third_party/rust/bumpalo/tests/alloc_fill.rs b/third_party/rust/bumpalo/tests/alloc_fill.rs new file mode 100644 index 0000000000..70bd84f31f --- /dev/null +++ b/third_party/rust/bumpalo/tests/alloc_fill.rs @@ -0,0 +1,32 @@ +use bumpalo::Bump; +use std::alloc::Layout; + +#[test] +fn alloc_slice_fill_zero() { + let b = Bump::new(); + let layout = Layout::new::<u8>(); + + let ptr1 = b.alloc_layout(layout); + + struct MyZeroSizedType; + + b.alloc_slice_copy::<u64>(&[]); + b.alloc_slice_clone::<String>(&[]); + b.alloc_slice_fill_with::<String, _>(0, |_| panic!("should not happen")); + b.alloc_slice_fill_copy(0, 42u64); + b.alloc_slice_fill_clone(0, &"hello".to_string()); + b.alloc_slice_fill_default::<String>(0); + let ptr2 = b.alloc(MyZeroSizedType); + assert_eq!(ptr1.as_ptr() as usize & !7, ptr2 as *mut _ as usize); + + let ptr3 = b.alloc_layout(layout); + assert_eq!(ptr2 as *mut _ as usize, ptr3.as_ptr() as usize + 1); +} + +#[test] +#[should_panic(expected = "out of memory")] +fn alloc_slice_overflow() { + let b = Bump::new(); + + b.alloc_slice_fill_default::<u64>(usize::max_value()); +} diff --git a/third_party/rust/bumpalo/tests/alloc_with.rs b/third_party/rust/bumpalo/tests/alloc_with.rs new file mode 100644 index 0000000000..2791603664 --- /dev/null +++ b/third_party/rust/bumpalo/tests/alloc_with.rs @@ -0,0 +1,66 @@ +#![cfg(not(debug_assertions))] + +// All of these alloc_with tests will fail with "fatal runtime error: stack overflow" unless LLVM +// manages to optimize the stack writes away. +// +// We only run them when debug_assertions are not set, as we expect them to fail outside release +// mode. + +use bumpalo::Bump; + +#[test] +fn alloc_with_large_array() { + let b = Bump::new(); + + b.alloc_with(|| [4u8; 10_000_000]); +} + +#[allow(dead_code)] +struct LargeStruct { + small: usize, + big1: [u8; 20_000_000], + big2: [u8; 20_000_000], + big3: [u8; 20_000_000], +} + +#[test] +fn alloc_with_large_struct() { + let b = Bump::new(); + + b.alloc_with(|| LargeStruct { + small: 1, + big1: [2; 20_000_000], + big2: [3; 20_000_000], + big3: [4; 20_000_000], + }); +} + +#[test] +fn alloc_with_large_tuple() { + let b = Bump::new(); + + b.alloc_with(|| { + ( + 1u32, + LargeStruct { + small: 2, + big1: [3; 20_000_000], + big2: [4; 20_000_000], + big3: [5; 20_000_000], + }, + ) + }); +} + +enum LargeEnum { + Small, + #[allow(dead_code)] + Large([u8; 10_000_000]), +} + +#[test] +fn alloc_with_large_enum() { + let b = Bump::new(); + + b.alloc_with(|| LargeEnum::Small); +} diff --git a/third_party/rust/bumpalo/tests/quickchecks.rs b/third_party/rust/bumpalo/tests/quickchecks.rs new file mode 100644 index 0000000000..6a555b2f0f --- /dev/null +++ b/third_party/rust/bumpalo/tests/quickchecks.rs @@ -0,0 +1,230 @@ +use bumpalo::Bump; +use quickcheck::{quickcheck, Arbitrary, Gen}; +use std::mem; + +#[derive(Clone, Debug, PartialEq)] +struct BigValue { + data: [u64; 32], +} + +impl BigValue { + fn new(x: u64) -> BigValue { + BigValue { + data: [ + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, + ], + } + } +} + +impl Arbitrary for BigValue { + fn arbitrary<G: Gen>(g: &mut G) -> BigValue { + BigValue::new(u64::arbitrary(g)) + } +} + +#[derive(Clone, Debug)] +enum Elems<T, U> { + OneT(T), + TwoT(T, T), + FourT(T, T, T, T), + OneU(U), + TwoU(U, U), + FourU(U, U, U, U), +} + +impl<T, U> Arbitrary for Elems<T, U> +where + T: Arbitrary + Clone, + U: Arbitrary + Clone, +{ + fn arbitrary<G: Gen>(g: &mut G) -> Elems<T, U> { + let x: u8 = u8::arbitrary(g); + match x % 6 { + 0 => Elems::OneT(T::arbitrary(g)), + 1 => Elems::TwoT(T::arbitrary(g), T::arbitrary(g)), + 2 => Elems::FourT( + T::arbitrary(g), + T::arbitrary(g), + T::arbitrary(g), + T::arbitrary(g), + ), + 3 => Elems::OneU(U::arbitrary(g)), + 4 => Elems::TwoU(U::arbitrary(g), U::arbitrary(g)), + 5 => Elems::FourU( + U::arbitrary(g), + U::arbitrary(g), + U::arbitrary(g), + U::arbitrary(g), + ), + _ => unreachable!(), + } + } + + fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { + match self { + Elems::OneT(_) => Box::new(vec![].into_iter()), + Elems::TwoT(a, b) => { + Box::new(vec![Elems::OneT(a.clone()), Elems::OneT(b.clone())].into_iter()) + } + Elems::FourT(a, b, c, d) => Box::new( + vec![ + Elems::TwoT(a.clone(), b.clone()), + Elems::TwoT(a.clone(), c.clone()), + Elems::TwoT(a.clone(), d.clone()), + Elems::TwoT(b.clone(), c.clone()), + Elems::TwoT(b.clone(), d.clone()), + Elems::TwoT(c.clone(), d.clone()), + ] + .into_iter(), + ), + Elems::OneU(_) => Box::new(vec![].into_iter()), + Elems::TwoU(a, b) => { + Box::new(vec![Elems::OneU(a.clone()), Elems::OneU(b.clone())].into_iter()) + } + Elems::FourU(a, b, c, d) => Box::new( + vec![ + Elems::TwoU(a.clone(), b.clone()), + Elems::TwoU(a.clone(), c.clone()), + Elems::TwoU(a.clone(), d.clone()), + Elems::TwoU(b.clone(), c.clone()), + Elems::TwoU(b.clone(), d.clone()), + Elems::TwoU(c.clone(), d.clone()), + ] + .into_iter(), + ), + } + } +} + +fn overlap((a1, a2): (usize, usize), (b1, b2): (usize, usize)) -> bool { + assert!(a1 < a2); + assert!(b1 < b2); + a1 < b2 && b1 < a2 +} + +fn range<T>(t: &T) -> (usize, usize) { + let start = t as *const _ as usize; + let end = start + mem::size_of::<T>(); + (start, end) +} + +quickcheck! { + fn can_allocate_big_values(values: Vec<BigValue>) -> () { + let bump = Bump::new(); + let mut alloced = vec![]; + + for vals in values.iter().cloned() { + alloced.push(bump.alloc(vals)); + } + + for (vals, alloc) in values.iter().zip(alloced.into_iter()) { + assert_eq!(vals, alloc); + } + } + + fn big_allocations_never_overlap(values: Vec<BigValue>) -> () { + let bump = Bump::new(); + let mut alloced = vec![]; + + for v in values { + let a = bump.alloc(v); + let start = a as *const _ as usize; + let end = unsafe { (a as *const BigValue).offset(1) as usize }; + let range = (start, end); + + for r in &alloced { + assert!(!overlap(*r, range)); + } + + alloced.push(range); + } + } + + fn can_allocate_heterogeneous_things_and_they_dont_overlap(things: Vec<Elems<u8, u64>>) -> () { + let bump = Bump::new(); + let mut ranges = vec![]; + + for t in things { + let r = match t { + Elems::OneT(a) => { + range(bump.alloc(a)) + }, + Elems::TwoT(a, b) => { + range(bump.alloc([a, b])) + }, + Elems::FourT(a, b, c, d) => { + range(bump.alloc([a, b, c, d])) + }, + Elems::OneU(a) => { + range(bump.alloc(a)) + }, + Elems::TwoU(a, b) => { + range(bump.alloc([a, b])) + }, + Elems::FourU(a, b, c, d) => { + range(bump.alloc([a, b, c, d])) + }, + }; + + for s in &ranges { + assert!(!overlap(r, *s)); + } + + ranges.push(r); + } + } + + + fn test_alignment_chunks(sizes: Vec<usize>) -> () { + const SUPPORTED_ALIGNMENTS: &[usize] = &[1, 2, 4, 8, 16]; + for &alignment in SUPPORTED_ALIGNMENTS { + let mut b = Bump::with_capacity(513); + let mut sizes = sizes.iter().map(|&size| (size % 10) * alignment).collect::<Vec<_>>(); + + for &size in &sizes { + let layout = std::alloc::Layout::from_size_align(size, alignment).unwrap(); + let ptr = b.alloc_layout(layout).as_ptr() as *const u8 as usize; + assert_eq!(ptr % alignment, 0); + } + + for chunk in b.iter_allocated_chunks() { + let mut remaining = chunk.len(); + while remaining > 0 { + let size = sizes.pop().expect("too many bytes in the chunk output"); + assert!(remaining >= size, "returned chunk contained padding"); + remaining -= size; + } + } + assert_eq!(sizes.into_iter().sum::<usize>(), 0); + } + } + + fn alloc_slices(allocs: Vec<(u8, usize)>) -> () { + let b = Bump::new(); + let mut allocated: Vec<(usize, usize)> = vec![]; + for (val, len) in allocs { + let len = len % 100; + let s = b.alloc_slice_fill_copy(len, val); + + assert_eq!(s.len(), len); + assert!(s.iter().all(|v| v == &val)); + + let range = (s.as_ptr() as usize, unsafe { s.as_ptr().add(s.len()) } as usize); + for r in &allocated { + let no_overlap = range.1 <= r.0 || r.1 <= range.0; + assert!(no_overlap); + } + allocated.push(range); + } + } + + fn alloc_strs(allocs: Vec<String>) -> () { + let b = Bump::new(); + let allocated: Vec<&str> = allocs.iter().map(|s| b.alloc_str(s) as &_).collect(); + for (val, alloc) in allocs.into_iter().zip(allocated) { + assert_eq!(val, alloc); + } + } +} diff --git a/third_party/rust/bumpalo/tests/readme_up_to_date.rs b/third_party/rust/bumpalo/tests/readme_up_to_date.rs new file mode 100644 index 0000000000..38af909e31 --- /dev/null +++ b/third_party/rust/bumpalo/tests/readme_up_to_date.rs @@ -0,0 +1,22 @@ +use std::fs; +use std::process::Command; + +#[test] +fn cargo_readme_up_to_date() { + println!("Checking that `cargo readme > README.md` is up to date..."); + + let expected = Command::new("cargo") + .arg("readme") + .current_dir(env!("CARGO_MANIFEST_DIR")) + .output() + .expect("should run `cargo readme` OK") + .stdout; + let expected = String::from_utf8_lossy(&expected); + + let actual = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md")) + .expect("should read README.md OK"); + + if actual != expected { + panic!("Run `cargo readme > README.md` to update README.md"); + } +} diff --git a/third_party/rust/bumpalo/tests/string.rs b/third_party/rust/bumpalo/tests/string.rs new file mode 100644 index 0000000000..55237517fb --- /dev/null +++ b/third_party/rust/bumpalo/tests/string.rs @@ -0,0 +1,19 @@ +#![cfg(feature = "collections")] +use bumpalo::{collections::String, format, Bump}; +use std::fmt::Write; + +#[test] +fn format_a_bunch_of_strings() { + let b = Bump::new(); + let mut s = String::from_str_in("hello", &b); + for i in 0..1000 { + write!(&mut s, " {}", i).unwrap(); + } +} + +#[test] +fn trailing_comma_in_format_macro() { + let b = Bump::new(); + let v = format![in &b, "{}{}", 1, 2, ]; + assert_eq!(v, "12"); +} diff --git a/third_party/rust/bumpalo/tests/tests.rs b/third_party/rust/bumpalo/tests/tests.rs new file mode 100644 index 0000000000..0c7cb0cbf1 --- /dev/null +++ b/third_party/rust/bumpalo/tests/tests.rs @@ -0,0 +1,195 @@ +use bumpalo::Bump; +use std::alloc::Layout; +use std::mem; +use std::usize; + +#[test] +fn can_iterate_over_allocated_things() { + let mut bump = Bump::new(); + + const MAX: u64 = 131_072; + + let mut chunk_ends = vec![]; + let mut last = None; + + for i in 0..MAX { + let this = bump.alloc(i); + assert_eq!(*this, i); + let this = this as *const _ as usize; + + if match last { + Some(last) if last - mem::size_of::<u64>() == this => false, + _ => true, + } { + let chunk_end = this + mem::size_of::<u64>(); + println!("new chunk ending @ 0x{:x}", chunk_end); + assert!( + !chunk_ends.contains(&chunk_end), + "should not have already allocated this chunk" + ); + chunk_ends.push(chunk_end); + } + + last = Some(this); + } + + let mut seen = vec![false; MAX as usize]; + + // Safe because we always allocated objects of the same type in this arena, + // and their size >= their align. + for ch in bump.iter_allocated_chunks() { + let chunk_end = ch.as_ptr() as usize + ch.len(); + println!("iter chunk ending @ {:#x}", chunk_end); + assert_eq!( + chunk_ends.pop().unwrap(), + chunk_end, + "should iterate over each chunk once, in order they were allocated in" + ); + + let (before, mid, after) = unsafe { ch.align_to::<u64>() }; + assert!(before.is_empty()); + assert!(after.is_empty()); + for i in mid { + assert!(*i < MAX, "{} < {} (aka {:x} < {:x})", i, MAX, i, MAX); + seen[*i as usize] = true; + } + } + + assert!(seen.iter().all(|s| *s)); +} + +#[test] +#[should_panic(expected = "out of memory")] +fn oom_instead_of_bump_pointer_overflow() { + let bump = Bump::new(); + let x = bump.alloc(0_u8); + let p = x as *mut u8 as usize; + + // A size guaranteed to overflow the bump pointer. + let size = usize::MAX - p + 1; + let align = 1; + let layout = match Layout::from_size_align(size, align) { + Err(e) => { + // Return on error so that we don't panic and the test fails. + eprintln!("Layout::from_size_align errored: {}", e); + return; + } + Ok(l) => l, + }; + + // This should panic. + bump.alloc_layout(layout); +} + +#[test] +fn force_new_chunk_fits_well() { + let b = Bump::new(); + + // Use the first chunk for something + b.alloc_layout(Layout::from_size_align(1, 1).unwrap()); + + // Next force allocation of some new chunks. + b.alloc_layout(Layout::from_size_align(100_001, 1).unwrap()); + b.alloc_layout(Layout::from_size_align(100_003, 1).unwrap()); +} + +#[test] +fn alloc_with_strong_alignment() { + let b = Bump::new(); + + // 64 is probably the strongest alignment we'll see in practice + // e.g. AVX-512 types, or cache line padding optimizations + b.alloc_layout(Layout::from_size_align(4096, 64).unwrap()); +} + +#[test] +fn alloc_slice_copy() { + let b = Bump::new(); + + let src: &[u16] = &[0xFEED, 0xFACE, 0xA7, 0xCAFE]; + let dst = b.alloc_slice_copy(src); + + assert_eq!(src, dst); +} + +#[test] +fn alloc_slice_clone() { + let b = Bump::new(); + + let src = vec![vec![0], vec![1, 2], vec![3, 4, 5], vec![6, 7, 8, 9]]; + let dst = b.alloc_slice_clone(&src); + + assert_eq!(src, dst); +} + +#[test] +fn small_size_and_large_align() { + let b = Bump::new(); + let layout = std::alloc::Layout::from_size_align(1, 0x1000).unwrap(); + b.alloc_layout(layout); +} + +fn with_capacity_helper<I, T>(iter: I) +where + T: Copy + Eq, + I: Clone + Iterator<Item = T> + DoubleEndedIterator, +{ + for &initial_size in &[0, 1, 8, 11, 0x1000, 0x12345] { + let mut b = Bump::with_capacity(initial_size); + + for v in iter.clone() { + b.alloc(v); + } + + let pushed_values = b.iter_allocated_chunks().flat_map(|c| { + let (before, mid, after) = unsafe { c.align_to::<T>() }; + assert!(before.is_empty()); + assert!(after.is_empty()); + mid.iter().copied() + }); + assert!(pushed_values.eq(iter.clone().rev())); + } +} + +#[test] +fn with_capacity_test() { + with_capacity_helper(0u8..255); + with_capacity_helper(0u16..10000); + with_capacity_helper(0u32..10000); + with_capacity_helper(0u64..10000); + with_capacity_helper(0u128..10000); +} + +#[test] +fn test_reset() { + let mut b = Bump::new(); + + for i in 0u64..10_000 { + b.alloc(i); + } + + assert!(b.iter_allocated_chunks().count() > 1); + + let last_chunk = b.iter_allocated_chunks().next().unwrap(); + let start = last_chunk.as_ptr() as usize; + let end = start + last_chunk.len(); + b.reset(); + assert_eq!( + end - mem::size_of::<u64>(), + b.alloc(0u64) as *const u64 as usize + ); + assert_eq!(b.iter_allocated_chunks().count(), 1); +} + +#[test] +fn test_alignment() { + for &alignment in &[2, 4, 8, 16, 32, 64] { + let b = Bump::with_capacity(513); + let layout = std::alloc::Layout::from_size_align(alignment, alignment).unwrap(); + + for _ in 0..1024 { + let ptr = b.alloc_layout(layout).as_ptr(); + assert_eq!(ptr as *const u8 as usize % alignment, 0); + } + } +} diff --git a/third_party/rust/bumpalo/tests/try_alloc.rs b/third_party/rust/bumpalo/tests/try_alloc.rs new file mode 100644 index 0000000000..573f138f07 --- /dev/null +++ b/third_party/rust/bumpalo/tests/try_alloc.rs @@ -0,0 +1,178 @@ +use bumpalo::Bump; +use rand::Rng; +use std::alloc::{GlobalAlloc, Layout, System}; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// A custom allocator that wraps the system allocator, but lets us force +/// allocation failures for testing. +struct Allocator(AtomicBool); + +impl Allocator { + fn is_returning_null(&self) -> bool { + self.0.load(Ordering::SeqCst) + } + + fn set_returning_null(&self, returning_null: bool) { + self.0.store(returning_null, Ordering::SeqCst); + } + + fn toggle_returning_null(&self) { + self.set_returning_null(!self.is_returning_null()); + } + + #[allow(dead_code)] // Silence warnings for non-"collections" builds. + fn with_successful_allocs<F, T>(&self, callback: F) -> T + where + F: FnOnce() -> T, + { + let old_returning_null = self.is_returning_null(); + self.set_returning_null(false); + let result = callback(); + self.set_returning_null(old_returning_null); + result + } + + fn with_alloc_failures<F, T>(&self, callback: F) -> T + where + F: FnOnce() -> T, + { + let old_returning_null = self.is_returning_null(); + self.set_returning_null(true); + let result = callback(); + self.set_returning_null(old_returning_null); + result + } +} + +unsafe impl GlobalAlloc for Allocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if self.is_returning_null() { + core::ptr::null_mut() + } else { + System.alloc(layout) + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + System.dealloc(ptr, layout); + } + + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if self.is_returning_null() { + core::ptr::null_mut() + } else { + System.realloc(ptr, layout, new_size) + } + } +} + +#[global_allocator] +static GLOBAL_ALLOCATOR: Allocator = Allocator(AtomicBool::new(false)); + +/// `assert!` may allocate on failure (e.g. for string formatting and boxing +/// panic info), so we must re-enable allocations during assertions. +macro_rules! assert { + ($cond:expr $(, $args:tt)*) => { + if !$cond { + GLOBAL_ALLOCATOR.set_returning_null(false); + panic!(concat!("Assertion failed: ", stringify!($cond))); + } + }; +} + +/// NB: We provide our own `main` rather than using the default test harness's +/// so that we can ensure that tests are executed serially, and no background +/// threads get tripped up by us disabling the global allocator, or anything +/// like that. +fn main() { + macro_rules! test { + ($name:expr, $test:expr $(,)*) => { + ($name, $test as fn()) + }; + } + + let tests = [ + test!("Bump::try_new fails when global allocator fails", || { + GLOBAL_ALLOCATOR.with_alloc_failures(|| { + assert!(Bump::try_new().is_err()); + }); + }), + test!( + "test try_alloc_layout with and without global allocation failures", + || { + const NUM_TESTS: usize = 5000; + const MAX_BYTES_ALLOCATED: usize = 65536; + + let mut bump = Bump::try_new().unwrap(); + let mut bytes_allocated = bump.chunk_capacity(); + + // Bump preallocates space in the initial chunk, so we need to + // use up this block prior to the actual test + let layout = Layout::from_size_align(bump.chunk_capacity(), 1).unwrap(); + assert!(bump.try_alloc_layout(layout).is_ok()); + + let mut rng = rand::thread_rng(); + + for _ in 0..NUM_TESTS { + if rng.gen() { + GLOBAL_ALLOCATOR.toggle_returning_null(); + } + + let layout = Layout::from_size_align(bump.chunk_capacity() + 1, 1).unwrap(); + if GLOBAL_ALLOCATOR.is_returning_null() { + assert!(bump.try_alloc_layout(layout).is_err()); + } else { + assert!(bump.try_alloc_layout(layout).is_ok()); + bytes_allocated += bump.chunk_capacity(); + } + + if bytes_allocated >= MAX_BYTES_ALLOCATED { + bump = GLOBAL_ALLOCATOR.with_successful_allocs(|| Bump::try_new().unwrap()); + bytes_allocated = bump.chunk_capacity(); + } + } + }, + ), + #[cfg(feature = "collections")] + test!("test Vec::try_reserve and Vec::try_reserve_exact", || { + use bumpalo::collections::Vec; + + let bump = Bump::try_new().unwrap(); + + GLOBAL_ALLOCATOR.with_alloc_failures(|| { + let mut vec = Vec::<u8>::new_in(&bump); + let chunk_cap = bump.chunk_capacity(); + + // Will always succeed since this size gets pre-allocated in Bump::try_new() + assert!(vec.try_reserve(chunk_cap).is_ok()); + assert!(vec.try_reserve_exact(chunk_cap).is_ok()); + + // Fails to allocate futher since allocator returns null + assert!(vec.try_reserve(chunk_cap + 1).is_err()); + assert!(vec.try_reserve_exact(chunk_cap + 1).is_err()); + }); + + GLOBAL_ALLOCATOR.with_successful_allocs(|| { + let mut vec = Vec::<u8>::new_in(&bump); + let chunk_cap = bump.chunk_capacity(); + + // Will always succeed since this size gets pre-allocated in Bump::try_new() + assert!(vec.try_reserve(chunk_cap).is_ok()); + assert!(vec.try_reserve_exact(chunk_cap).is_ok()); + + // Succeeds to allocate further + assert!(vec.try_reserve(chunk_cap + 1).is_ok()); + assert!(vec.try_reserve_exact(chunk_cap + 1).is_ok()); + }); + }), + ]; + + for (name, test) in tests.iter() { + assert!(!GLOBAL_ALLOCATOR.is_returning_null()); + + eprintln!("=== {} ===", name); + test(); + + GLOBAL_ALLOCATOR.set_returning_null(false); + } +} diff --git a/third_party/rust/bumpalo/tests/vec.rs b/third_party/rust/bumpalo/tests/vec.rs new file mode 100644 index 0000000000..b8fd77d3a2 --- /dev/null +++ b/third_party/rust/bumpalo/tests/vec.rs @@ -0,0 +1,85 @@ +#![cfg(feature = "collections")] +use bumpalo::{collections::Vec, vec, Bump}; +use std::cell::Cell; + +#[test] +fn push_a_bunch_of_items() { + let b = Bump::new(); + let mut v = Vec::new_in(&b); + for x in 0..10_000 { + v.push(x); + } +} + +#[test] +fn trailing_comma_in_vec_macro() { + let b = Bump::new(); + let v = vec![in &b; 1, 2, 3,]; + assert_eq!(v, [1, 2, 3]); +} + +#[test] +fn recursive_vecs() { + // The purpose of this test is to see if the data structures with + // self references are allowed without causing a compile error + // because of the dropck + let b = Bump::new(); + + struct Node<'a> { + myself: Cell<Option<&'a Node<'a>>>, + edges: Cell<Vec<'a, &'a Node<'a>>>, + } + + let node1: &Node = b.alloc(Node { + myself: Cell::new(None), + edges: Cell::new(Vec::new_in(&b)), + }); + let node2: &Node = b.alloc(Node { + myself: Cell::new(None), + edges: Cell::new(Vec::new_in(&b)), + }); + + node1.myself.set(Some(node1)); + node1.edges.set(bumpalo::vec![in &b; node1, node1, node2]); + + node2.myself.set(Some(node2)); + node2.edges.set(bumpalo::vec![in &b; node1, node2]); +} + +#[test] +fn test_into_bump_slice_mut() { + let b = Bump::new(); + let v = bumpalo::vec![in &b; 1, 2, 3]; + let slice = v.into_bump_slice_mut(); + + slice[0] = 3; + slice[2] = 1; + + assert_eq!(slice, [3, 2, 1]); +} + +quickcheck::quickcheck! { + fn vec_resizes_causing_reallocs(sizes: std::vec::Vec<usize>) -> () { + // Exercise `realloc` by doing a bunch of `resize`s followed by + // `shrink_to_fit`s. + + let b = Bump::new(); + let mut v = bumpalo::vec![in &b]; + + for len in sizes { + // We don't want to get too big and OOM. + const MAX_SIZE: usize = 1 << 15; + + // But we want allocations to get fairly close to the minimum chunk + // size, so that we are exercising both realloc'ing within a chunk + // and when we need new chunks. + const MIN_SIZE: usize = 1 << 7; + + let len = std::cmp::min(len, MAX_SIZE); + let len = std::cmp::max(len, MIN_SIZE); + + v.resize(len, 0); + v.shrink_to_fit(); + } + } +} |