use crate::cfg::{self, CfgPrivate}; use crate::clear::Clear; use crate::sync::UnsafeCell; use crate::Pack; pub(crate) mod slot; mod stack; pub(crate) use self::slot::Slot; use std::{fmt, marker::PhantomData}; /// A page address encodes the location of a slot within a shard (the page /// number and offset within that page) as a single linear value. #[repr(transparent)] pub(crate) struct Addr { addr: usize, _cfg: PhantomData, } impl Addr { const NULL: usize = Self::BITS + 1; pub(crate) fn index(self) -> usize { // Since every page is twice as large as the previous page, and all page sizes // are powers of two, we can determine the page index that contains a given // address by counting leading zeros, which tells us what power of two // the offset fits into. // // First, we must shift down to the smallest page size, so that the last // offset on the first page becomes 0. let shifted = (self.addr + C::INITIAL_SZ) >> C::ADDR_INDEX_SHIFT; // Now, we can determine the number of twos places by counting the // number of leading zeros (unused twos places) in the number's binary // representation, and subtracting that count from the total number of bits in a word. cfg::WIDTH - shifted.leading_zeros() as usize } pub(crate) fn offset(self) -> usize { self.addr } } pub(crate) trait FreeList { fn push(&self, new_head: usize, slot: &Slot) where C: cfg::Config; } impl Pack for Addr { const LEN: usize = C::MAX_PAGES + C::ADDR_INDEX_SHIFT; type Prev = (); fn as_usize(&self) -> usize { self.addr } fn from_usize(addr: usize) -> Self { debug_assert!(addr <= Self::BITS); Self { addr, _cfg: PhantomData, } } } pub(crate) type Iter<'a, T, C> = std::iter::FilterMap< std::slice::Iter<'a, Slot, C>>, fn(&'a Slot, C>) -> Option<&'a T>, >; pub(crate) struct Local { /// Index of the first slot on the local free list head: UnsafeCell, } pub(crate) struct Shared { /// The remote free list /// /// Slots freed from a remote thread are pushed onto this list. remote: stack::TransferStack, // Total size of the page. // // If the head index of the local or remote free list is greater than the size of the // page, then that free list is emtpy. If the head of both free lists is greater than `size` // then there are no slots left in that page. size: usize, prev_sz: usize, slab: UnsafeCell>>, } type Slots = Box<[Slot]>; impl Local { pub(crate) fn new() -> Self { Self { head: UnsafeCell::new(0), } } #[inline(always)] fn head(&self) -> usize { self.head.with(|head| unsafe { *head }) } #[inline(always)] fn set_head(&self, new_head: usize) { self.head.with_mut(|head| unsafe { *head = new_head; }) } } impl FreeList for Local { fn push(&self, new_head: usize, slot: &Slot) { slot.set_next(self.head()); self.set_head(new_head); } } impl Shared where C: cfg::Config, { const NULL: usize = Addr::::NULL; pub(crate) fn new(size: usize, prev_sz: usize) -> Self { Self { prev_sz, size, remote: stack::TransferStack::new(), slab: UnsafeCell::new(None), } } /// Return the head of the freelist /// /// If there is space on the local list, it returns the head of the local list. Otherwise, it /// pops all the slots from the global list and returns the head of that list /// /// *Note*: The local list's head is reset when setting the new state in the slot pointed to be /// `head` returned from this function #[inline] fn pop(&self, local: &Local) -> Option { let head = local.head(); test_println!("-> local head {:?}", head); // are there any items on the local free list? (fast path) let head = if head < self.size { head } else { // slow path: if the local free list is empty, pop all the items on // the remote free list. let head = self.remote.pop_all(); test_println!("-> remote head {:?}", head); head? }; // if the head is still null, both the local and remote free lists are // empty --- we can't fit any more items on this page. if head == Self::NULL { test_println!("-> NULL! {:?}", head); None } else { Some(head) } } /// Returns `true` if storage is currently allocated for this page, `false` /// otherwise. #[inline] fn is_unallocated(&self) -> bool { self.slab.with(|s| unsafe { (*s).is_none() }) } #[inline] pub(crate) fn with_slot<'a, U>( &'a self, addr: Addr, f: impl FnOnce(&'a Slot) -> Option, ) -> Option { let poff = addr.offset() - self.prev_sz; test_println!("-> offset {:?}", poff); self.slab.with(|slab| { let slot = unsafe { &*slab }.as_ref()?.get(poff)?; f(slot) }) } #[inline(always)] pub(crate) fn free_list(&self) -> &impl FreeList { &self.remote } } impl<'a, T, C> Shared, C> where C: cfg::Config + 'a, { pub(crate) fn take( &self, addr: Addr, gen: slot::Generation, free_list: &F, ) -> Option where F: FreeList, { let offset = addr.offset() - self.prev_sz; test_println!("-> take: offset {:?}", offset); self.slab.with(|slab| { let slab = unsafe { &*slab }.as_ref()?; let slot = slab.get(offset)?; slot.remove_value(gen, offset, free_list) }) } pub(crate) fn remove>( &self, addr: Addr, gen: slot::Generation, free_list: &F, ) -> bool { let offset = addr.offset() - self.prev_sz; test_println!("-> offset {:?}", offset); self.slab.with(|slab| { let slab = unsafe { &*slab }.as_ref(); if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { slot.try_remove_value(gen, offset, free_list) } else { false } }) } // Need this function separately, as we need to pass a function pointer to `filter_map` and // `Slot::value` just returns a `&T`, specifically a `&Option` for this impl. fn make_ref(slot: &'a Slot, C>) -> Option<&'a T> { slot.value().as_ref() } pub(crate) fn iter(&self) -> Option> { let slab = self.slab.with(|slab| unsafe { (&*slab).as_ref() }); slab.map(|slab| { slab.iter() .filter_map(Shared::make_ref as fn(&'a Slot, C>) -> Option<&'a T>) }) } } impl Shared where T: Clear + Default, C: cfg::Config, { pub(crate) fn init_with( &self, local: &Local, init: impl FnOnce(usize, &Slot) -> Option, ) -> Option { let head = self.pop(local)?; // do we need to allocate storage for this page? if self.is_unallocated() { self.allocate(); } let index = head + self.prev_sz; let result = self.slab.with(|slab| { let slab = unsafe { &*(slab) } .as_ref() .expect("page must have been allocated to insert!"); let slot = &slab[head]; let result = init(index, slot)?; local.set_head(slot.next()); Some(result) })?; test_println!("-> init_with: insert at offset: {}", index); Some(result) } /// Allocates storage for the page's slots. #[cold] fn allocate(&self) { test_println!("-> alloc new page ({})", self.size); debug_assert!(self.is_unallocated()); let mut slab = Vec::with_capacity(self.size); slab.extend((1..self.size).map(Slot::new)); slab.push(Slot::new(Self::NULL)); self.slab.with_mut(|s| { // safety: this mut access is safe — it only occurs to initially allocate the page, // which only happens on this thread; if the page has not yet been allocated, other // threads will not try to access it yet. unsafe { *s = Some(slab.into_boxed_slice()); } }); } pub(crate) fn mark_clear>( &self, addr: Addr, gen: slot::Generation, free_list: &F, ) -> bool { let offset = addr.offset() - self.prev_sz; test_println!("-> offset {:?}", offset); self.slab.with(|slab| { let slab = unsafe { &*slab }.as_ref(); if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { slot.try_clear_storage(gen, offset, free_list) } else { false } }) } pub(crate) fn clear>( &self, addr: Addr, gen: slot::Generation, free_list: &F, ) -> bool { let offset = addr.offset() - self.prev_sz; test_println!("-> offset {:?}", offset); self.slab.with(|slab| { let slab = unsafe { &*slab }.as_ref(); if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { slot.clear_storage(gen, offset, free_list) } else { false } }) } } impl fmt::Debug for Local { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.head.with(|head| { let head = unsafe { *head }; f.debug_struct("Local") .field("head", &format_args!("{:#0x}", head)) .finish() }) } } impl fmt::Debug for Shared { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Shared") .field("remote", &self.remote) .field("prev_sz", &self.prev_sz) .field("size", &self.size) // .field("slab", &self.slab) .finish() } } impl fmt::Debug for Addr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Addr") .field("addr", &format_args!("{:#0x}", &self.addr)) .field("index", &self.index()) .field("offset", &self.offset()) .finish() } } impl PartialEq for Addr { fn eq(&self, other: &Self) -> bool { self.addr == other.addr } } impl Eq for Addr {} impl PartialOrd for Addr { fn partial_cmp(&self, other: &Self) -> Option { self.addr.partial_cmp(&other.addr) } } impl Ord for Addr { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.addr.cmp(&other.addr) } } impl Clone for Addr { fn clone(&self) -> Self { Self::from_usize(self.addr) } } impl Copy for Addr {} #[inline(always)] pub(crate) fn indices(idx: usize) -> (Addr, usize) { let addr = C::unpack_addr(idx); (addr, addr.index()) } #[cfg(test)] mod test { use super::*; use crate::Pack; use proptest::prelude::*; proptest! { #[test] fn addr_roundtrips(pidx in 0usize..Addr::::BITS) { let addr = Addr::::from_usize(pidx); let packed = addr.pack(0); assert_eq!(addr, Addr::from_packed(packed)); } #[test] fn gen_roundtrips(gen in 0usize..slot::Generation::::BITS) { let gen = slot::Generation::::from_usize(gen); let packed = gen.pack(0); assert_eq!(gen, slot::Generation::from_packed(packed)); } #[test] fn page_roundtrips( gen in 0usize..slot::Generation::::BITS, addr in 0usize..Addr::::BITS, ) { let gen = slot::Generation::::from_usize(gen); let addr = Addr::::from_usize(addr); let packed = gen.pack(addr.pack(0)); assert_eq!(addr, Addr::from_packed(packed)); assert_eq!(gen, slot::Generation::from_packed(packed)); } } }