summaryrefslogtreecommitdiffstats
path: root/third_party/rust/hashbrown/src/raw
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/hashbrown/src/raw')
-rw-r--r--third_party/rust/hashbrown/src/raw/alloc.rs57
-rw-r--r--third_party/rust/hashbrown/src/raw/bitmask.rs99
-rw-r--r--third_party/rust/hashbrown/src/raw/generic.rs59
-rw-r--r--third_party/rust/hashbrown/src/raw/mod.rs3301
-rw-r--r--third_party/rust/hashbrown/src/raw/neon.rs124
-rw-r--r--third_party/rust/hashbrown/src/raw/sse2.rs31
6 files changed, 3091 insertions, 580 deletions
diff --git a/third_party/rust/hashbrown/src/raw/alloc.rs b/third_party/rust/hashbrown/src/raw/alloc.rs
index ba09ea9de7..15299e7b09 100644
--- a/third_party/rust/hashbrown/src/raw/alloc.rs
+++ b/third_party/rust/hashbrown/src/raw/alloc.rs
@@ -1,5 +1,9 @@
pub(crate) use self::inner::{do_alloc, Allocator, Global};
+// Nightly-case.
+// Use unstable `allocator_api` feature.
+// This is compatible with `allocator-api2` which can be enabled or not.
+// This is used when building for `std`.
#[cfg(feature = "nightly")]
mod inner {
use crate::alloc::alloc::Layout;
@@ -7,28 +11,44 @@ mod inner {
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
match alloc.allocate(layout) {
Ok(ptr) => Ok(ptr.as_non_null_ptr()),
Err(_) => Err(()),
}
}
+}
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[inline]
- fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
- match self.0.try_alloc_layout(layout) {
- Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())),
- Err(_) => Err(core::alloc::AllocError),
- }
+// Basic non-nightly case.
+// This uses `allocator-api2` enabled by default.
+// If any crate enables "nightly" in `allocator-api2`,
+// this will be equivalent to the nightly case,
+// since `allocator_api2::alloc::Allocator` would be re-export of
+// `core::alloc::Allocator`.
+#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))]
+mod inner {
+ use crate::alloc::alloc::Layout;
+ pub use allocator_api2::alloc::{Allocator, Global};
+ use core::ptr::NonNull;
+
+ #[allow(clippy::map_err_ignore)]
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ match alloc.allocate(layout) {
+ Ok(ptr) => Ok(ptr.cast()),
+ Err(_) => Err(()),
}
- #[inline]
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
}
}
-#[cfg(not(feature = "nightly"))]
+// No-defaults case.
+// When building with default-features turned off and
+// neither `nightly` nor `allocator-api2` is enabled,
+// this will be used.
+// Making it impossible to use any custom allocator with collections defined
+// in this crate.
+// Any crate in build-tree can enable `allocator-api2`,
+// or `nightly` without disturbing users that don't want to use it.
+#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))]
mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
@@ -41,6 +61,7 @@ mod inner {
#[derive(Copy, Clone)]
pub struct Global;
+
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
@@ -51,6 +72,7 @@ mod inner {
dealloc(ptr.as_ptr(), layout);
}
}
+
impl Default for Global {
#[inline]
fn default() -> Self {
@@ -58,16 +80,7 @@ mod inner {
}
}
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc.allocate(layout)
}
-
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[allow(clippy::map_err_ignore)]
- fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
- self.0.try_alloc_layout(layout).map_err(|_| ())
- }
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
- }
}
diff --git a/third_party/rust/hashbrown/src/raw/bitmask.rs b/third_party/rust/hashbrown/src/raw/bitmask.rs
index 7d4f9fc387..6576b3c5c0 100644
--- a/third_party/rust/hashbrown/src/raw/bitmask.rs
+++ b/third_party/rust/hashbrown/src/raw/bitmask.rs
@@ -1,6 +1,6 @@
-use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
-#[cfg(feature = "nightly")]
-use core::intrinsics;
+use super::imp::{
+ BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE,
+};
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
@@ -8,75 +8,55 @@ use core::intrinsics;
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
-/// For implementation reasons, the bits in the set may be sparsely packed, so
-/// that there is only one bit-per-byte used (the high bit, 7). If this is the
+/// For implementation reasons, the bits in the set may be sparsely packed with
+/// groups of 8 bits representing one element. If any of these bits are non-zero
+/// then this element is considered to true in the mask. If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
+///
+/// To iterate over a bit mask, it must be converted to a form where only 1 bit
+/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the
+/// mask bits.
#[derive(Copy, Clone)]
-pub struct BitMask(pub BitMaskWord);
+pub(crate) struct BitMask(pub(crate) BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
- pub fn invert(self) -> Self {
+ #[allow(dead_code)]
+ pub(crate) fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
- /// Flip the bit in the mask for the entry at the given index.
- ///
- /// Returns the bit's previous state.
- #[inline]
- #[allow(clippy::cast_ptr_alignment)]
- #[cfg(feature = "raw")]
- pub unsafe fn flip(&mut self, index: usize) -> bool {
- // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
- let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
- self.0 ^= mask;
- // The bit was set if the bit is now 0.
- self.0 & mask == 0
- }
-
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
- pub fn remove_lowest_bit(self) -> Self {
+ fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
+
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
- pub fn any_bit_set(self) -> bool {
+ pub(crate) fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
- pub fn lowest_set_bit(self) -> Option<usize> {
- if self.0 == 0 {
- None
+ pub(crate) fn lowest_set_bit(self) -> Option<usize> {
+ if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) {
+ Some(Self::nonzero_trailing_zeros(nonzero))
} else {
- Some(unsafe { self.lowest_set_bit_nonzero() })
+ None
}
}
- /// Returns the first set bit in the `BitMask`, if there is one. The
- /// bitmask must not be empty.
- #[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
- }
- #[inline]
- #[cfg(not(feature = "nightly"))]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- self.trailing_zeros()
- }
-
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
- pub fn trailing_zeros(self) -> usize {
+ pub(crate) fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
@@ -89,9 +69,21 @@ impl BitMask {
}
}
+ /// Same as above but takes a `NonZeroBitMaskWord`.
+ #[inline]
+ fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize {
+ if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
+ // SAFETY: A byte-swapped non-zero value is still non-zero.
+ let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) };
+ swapped.leading_zeros() as usize / BITMASK_STRIDE
+ } else {
+ nonzero.trailing_zeros() as usize / BITMASK_STRIDE
+ }
+ }
+
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
- pub fn leading_zeros(self) -> usize {
+ pub(crate) fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
@@ -102,13 +94,32 @@ impl IntoIterator for BitMask {
#[inline]
fn into_iter(self) -> BitMaskIter {
- BitMaskIter(self)
+ // A BitMask only requires each element (group of bits) to be non-zero.
+ // However for iteration we need each element to only contain 1 bit.
+ BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK))
}
}
/// Iterator over the contents of a `BitMask`, returning the indices of set
/// bits.
-pub struct BitMaskIter(BitMask);
+#[derive(Copy, Clone)]
+pub(crate) struct BitMaskIter(pub(crate) BitMask);
+
+impl BitMaskIter {
+ /// Flip the bit in the mask for the entry at the given index.
+ ///
+ /// Returns the bit's previous state.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ #[cfg(feature = "raw")]
+ pub(crate) unsafe fn flip(&mut self, index: usize) -> bool {
+ // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
+ let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
+ self.0 .0 ^= mask;
+ // The bit was set if the bit is now 0.
+ self.0 .0 & mask == 0
+ }
+}
impl Iterator for BitMaskIter {
type Item = usize;
diff --git a/third_party/rust/hashbrown/src/raw/generic.rs b/third_party/rust/hashbrown/src/raw/generic.rs
index b4d31e62c2..c668b0642a 100644
--- a/third_party/rust/hashbrown/src/raw/generic.rs
+++ b/third_party/rust/hashbrown/src/raw/generic.rs
@@ -5,26 +5,29 @@ use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
-#[cfg(any(
- target_pointer_width = "64",
- target_arch = "aarch64",
- target_arch = "x86_64",
- target_arch = "wasm32",
-))]
-type GroupWord = u64;
-#[cfg(all(
- target_pointer_width = "32",
- not(target_arch = "aarch64"),
- not(target_arch = "x86_64"),
- not(target_arch = "wasm32"),
-))]
-type GroupWord = u32;
-pub type BitMaskWord = GroupWord;
-pub const BITMASK_STRIDE: usize = 8;
+cfg_if! {
+ if #[cfg(any(
+ target_pointer_width = "64",
+ target_arch = "aarch64",
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ ))] {
+ type GroupWord = u64;
+ type NonZeroGroupWord = core::num::NonZeroU64;
+ } else {
+ type GroupWord = u32;
+ type NonZeroGroupWord = core::num::NonZeroU32;
+ }
+}
+
+pub(crate) type BitMaskWord = GroupWord;
+pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord;
+pub(crate) const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
-pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
@@ -37,7 +40,7 @@ fn repeat(byte: u8) -> GroupWord {
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
-pub struct Group(GroupWord);
+pub(crate) struct Group(GroupWord);
// We perform all operations in the native endianness, and convert to
// little-endian just before creating a BitMask. The can potentially
@@ -46,14 +49,14 @@ pub struct Group(GroupWord);
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -69,7 +72,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(ptr::read_unaligned(ptr.cast()))
}
@@ -77,7 +80,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(ptr::read(ptr.cast()))
@@ -87,7 +90,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
ptr::write(ptr.cast(), self.0);
@@ -104,7 +107,7 @@ impl Group {
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
// https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
@@ -124,14 +127,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(self) -> BitMask {
+ pub(crate) fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -140,7 +143,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
diff --git a/third_party/rust/hashbrown/src/raw/mod.rs b/third_party/rust/hashbrown/src/raw/mod.rs
index 211b818a5f..c8e8e29122 100644
--- a/third_party/rust/hashbrown/src/raw/mod.rs
+++ b/third_party/rust/hashbrown/src/raw/mod.rs
@@ -4,7 +4,6 @@ use crate::TryReserveError;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
-use core::mem::ManuallyDrop;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
use core::{hint, ptr};
@@ -21,12 +20,21 @@ cfg_if! {
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
- not(miri)
+ not(miri),
))] {
mod sse2;
use sse2 as imp;
+ } else if #[cfg(all(
+ target_arch = "aarch64",
+ target_feature = "neon",
+ // NEON intrinsics are currently broken on big-endian targets.
+ // See https://github.com/rust-lang/stdarch/issues/1484.
+ target_endian = "little",
+ not(miri),
+ ))] {
+ mod neon;
+ use neon as imp;
} else {
- #[path = "generic.rs"]
mod generic;
use generic as imp;
}
@@ -37,36 +45,24 @@ pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
-use self::bitmask::{BitMask, BitMaskIter};
+use self::bitmask::BitMaskIter;
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as likely;
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as unlikely;
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
-// On stable we can use #[cold] to get a equivalent effect: this attributes
-// suggests that the function is unlikely to be called
-#[cfg(not(feature = "nightly"))]
-#[inline]
-#[cold]
-fn cold() {}
-
-#[cfg(not(feature = "nightly"))]
-#[inline]
-fn likely(b: bool) -> bool {
- if !b {
- cold();
- }
- b
-}
-#[cfg(not(feature = "nightly"))]
-#[inline]
-fn unlikely(b: bool) -> bool {
- if b {
- cold();
- }
- b
+// FIXME: use strict provenance functions once they are stable.
+// Implement it with a transmute for now.
+#[inline(always)]
+#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here
+fn invalid_mut<T>(addr: usize) -> *mut T {
+ unsafe { core::mem::transmute(addr) }
}
#[inline]
@@ -101,6 +97,13 @@ impl Fallibility {
}
}
+trait SizedTypeProperties: Sized {
+ const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
+ const NEEDS_DROP: bool = mem::needs_drop::<Self>();
+}
+
+impl<T> SizedTypeProperties for T {}
+
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
@@ -134,6 +137,13 @@ fn h1(hash: u64) -> usize {
hash as usize
}
+// Constant for h2 function that grabing the top 7 bits of the hash.
+const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
+ mem::size_of::<usize>()
+} else {
+ mem::size_of::<u64>()
+};
+
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
@@ -141,8 +151,8 @@ fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
- let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
- let top7 = hash >> (hash_len * 8 - 7);
+ // So we use MIN_HASH_LEN constant to handle this.
+ let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
@@ -230,11 +240,15 @@ struct TableLayout {
impl TableLayout {
#[inline]
- fn new<T>() -> Self {
+ const fn new<T>() -> Self {
let layout = Layout::new::<T>();
Self {
size: layout.size(),
- ctrl_align: usize::max(layout.align(), Group::WIDTH),
+ ctrl_align: if layout.align() > Group::WIDTH {
+ layout.align()
+ } else {
+ Group::WIDTH
+ },
}
}
@@ -248,6 +262,12 @@ impl TableLayout {
size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
+ // We need an additional check to ensure that the allocation doesn't
+ // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
+ if len > isize::MAX as usize - (ctrl_align - 1) {
+ return None;
+ }
+
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
@@ -255,14 +275,9 @@ impl TableLayout {
}
}
-/// Returns a Layout which describes the allocation required for a hash table,
-/// and the offset of the control bytes in the allocation.
-/// (the offset is also one past last element of buckets)
-///
-/// Returns `None` if an overflow occurs.
-#[cfg_attr(feature = "inline-more", inline)]
-fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
- TableLayout::new::<T>().calculate_layout_for(buckets)
+/// A reference to an empty bucket into which an can be inserted.
+pub struct InsertSlot {
+ index: usize,
}
/// A reference to a hash table bucket containing a `T`.
@@ -290,11 +305,79 @@ impl<T> Clone for Bucket<T> {
}
impl<T> Bucket<T> {
+ /// Creates a [`Bucket`] that contain pointer to the data.
+ /// The pointer calculation is performed by calculating the
+ /// offset from given `base` pointer (convenience for
+ /// `base.as_ptr().sub(index)`).
+ ///
+ /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// If the `T` is a ZST, then we instead track the index of the element
+ /// in the table so that `erase` works properly (return
+ /// `NonNull::new_unchecked((index + 1) as *mut T)`)
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * the `base` pointer must not be `dangling` and must points to the
+ /// end of the first `value element` from the `data part` of the table, i.e.
+ /// must be the pointer that returned by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
- // won't overflow because index must be less than length
- (index + 1) as *mut T
+ // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
+ // the data part of the table (we start counting from "0", so that
+ // in the expression T[last], the "last" index actually one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
+ //
+ // `from_base_index(base, 1).as_ptr()` returns a pointer that
+ // points here in the data part of the table
+ // (to the start of T1)
+ // |
+ // | `base: NonNull<T>` must point here
+ // | (to the end of T0 or to the start of C0)
+ // v v
+ // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
+ // ^
+ // `from_base_index(base, 1)` returns a pointer
+ // that points here in the data part of the table
+ // (to the end of T1)
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes
+ // or metadata for data.
+ let ptr = if T::IS_ZERO_SIZED {
+ // won't overflow because index must be less than length (bucket_mask)
+ // and bucket_mask is guaranteed to be less than `isize::MAX`
+ // (see TableLayout::calculate_layout_for method)
+ invalid_mut(index + 1)
} else {
base.as_ptr().sub(index)
};
@@ -302,27 +385,183 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Calculates the index of a [`Bucket`] as distance between two pointers
+ /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
+ /// The returned value is in units of T: the distance in bytes divided by
+ /// [`core::mem::size_of::<T>()`].
+ ///
+ /// If the `T` is a ZST, then we return the index of the element in
+ /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
+ ///
+ /// This function is the inverse of [`from_base_index`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
+ /// method, as well as for the correct logic of the work of this crate, the
+ /// following rules are necessary and sufficient:
+ ///
+ /// * `base` contained pointer must not be `dangling` and must point to the
+ /// end of the first `element` from the `data part` of the table, i.e.
+ /// must be a pointer that returns by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `self` also must not contain dangling pointer;
+ ///
+ /// * both `self` and `base` must be created from the same [`RawTable`]
+ /// (or [`RawTableInner`]).
+ ///
+ /// If `mem::size_of::<T>() == 0`, this function is always safe.
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`from_base_index`]: crate::raw::Bucket::from_base_index
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTableInner`]: RawTableInner
+ /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
#[inline]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
- if mem::size_of::<T>() == 0 {
+ // If mem::size_of::<T>() != 0 then return an index under which we used to store the
+ // `element` in the data part of the table (we start counting from "0", so
+ // that in the expression T[last], the "last" index actually is one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
+ // For example for 5th element in table calculation is performed like this:
+ //
+ // mem::size_of::<T>()
+ // |
+ // | `self = from_base_index(base, 5)` that returns pointer
+ // | that points here in tha data part of the table
+ // | (to the end of T5)
+ // | | `base: NonNull<T>` must point here
+ // v | (to the end of T0 or to the start of C0)
+ // /???\ v v
+ // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
+ // \__________ __________/
+ // \/
+ // `bucket.to_base_index(base)` = 5
+ // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
+ if T::IS_ZERO_SIZED {
+ // this can not be UB
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
+
+ /// Acquires the underlying raw pointer `*mut T` to `data`.
+ ///
+ /// # Note
+ ///
+ /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
+ /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
+ /// for properly dropping the data we also need to clear `data` control bytes. If we
+ /// drop data, but do not clear `data control byte` it leads to double drop when
+ /// [`RawTable`] goes out of scope.
+ ///
+ /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
+ /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
+ /// will not re-evaluate where the new value should go, meaning the value may become
+ /// "lost" if their location does not reflect their state.
+ ///
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value = ("a", 100);
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap();
+ ///
+ /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub fn as_ptr(&self) -> *mut T {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZERO_SIZED {
// Just return an arbitrary ZST pointer which is properly aligned
- mem::align_of::<T>() as *mut T
+ // invalid pointer is good enough for ZST
+ invalid_mut(mem::align_of::<T>())
} else {
unsafe { self.ptr.as_ptr().sub(1) }
}
}
+
+ /// Create a new [`Bucket`] that is offset from the `self` by the given
+ /// `offset`. The pointer calculation is performed by calculating the
+ /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
+ /// This function is used for iterators.
+ ///
+ /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * `self` contained pointer must not be `dangling`;
+ ///
+ /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
+ /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
+ /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
+ /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
+ /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn next_n(&self, offset: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
- (self.ptr.as_ptr() as usize + offset) as *mut T
+ let ptr = if T::IS_ZERO_SIZED {
+ // invalid pointer is good enough for ZST
+ invalid_mut(self.ptr.as_ptr() as usize + offset)
} else {
self.ptr.as_ptr().sub(offset)
};
@@ -330,26 +569,212 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Executes the destructor (if any) of the pointed-to `data`.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns.
+ ///
+ /// You should use [`RawTable::erase`] instead of this function,
+ /// or be careful with calling this function directly, because for
+ /// properly dropping the data we need also clear `data` control bytes.
+ /// If we drop data, but do not erase `data control byte` it leads to
+ /// double drop when [`RawTable`] goes out of scope.
+ ///
+ /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::erase`]: crate::raw::RawTable::erase
#[cfg_attr(feature = "inline-more", inline)]
- pub unsafe fn drop(&self) {
+ pub(crate) unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
+
+ /// Reads the `value` from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::read`] for safety concerns.
+ ///
+ /// You should use [`RawTable::remove`] instead of this function,
+ /// or be careful with calling this function directly, because compiler
+ /// calls its destructor when readed `value` goes out of scope. It
+ /// can cause double dropping when [`RawTable`] goes out of scope,
+ /// because of not erased `data control byte`.
+ ///
+ /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::remove`]: crate::raw::RawTable::remove
#[inline]
- pub unsafe fn read(&self) -> T {
+ pub(crate) unsafe fn read(&self) -> T {
self.as_ptr().read()
}
+
+ /// Overwrites a memory location with the given `value` without reading
+ /// or dropping the old value (like [`ptr::write`] function).
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::write`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[inline]
- pub unsafe fn write(&self, val: T) {
+ pub(crate) unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
+
+ /// Returns a shared immutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_ref`] for safety concerns.
+ ///
+ /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &("A pony", "is a small horse".to_owned())
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
+
+ /// Returns a unique mutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_mut`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// unsafe {
+ /// bucket
+ /// .as_mut()
+ /// .1
+ /// .push_str(" less than 147 cm at the withers")
+ /// };
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &(
+ /// "A pony",
+ /// "is a small horse less than 147 cm at the withers".to_owned()
+ /// )
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
+
+ /// Copies `size_of<T>` bytes from `other` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns.
+ ///
+ /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
+ /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
+ /// in the region beginning at `*self` and the region beginning at `*other` can
+ /// [violate memory safety].
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
+ /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[cfg(feature = "raw")]
#[inline]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
@@ -358,15 +783,16 @@ impl<T> Bucket<T> {
}
/// A raw hash table with an unsafe API.
-pub struct RawTable<T, A: Allocator + Clone = Global> {
- table: RawTableInner<A>,
+pub struct RawTable<T, A: Allocator = Global> {
+ table: RawTableInner,
+ alloc: A,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
}
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
/// of how many different key-value types are used.
-struct RawTableInner<A> {
+struct RawTableInner {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
@@ -380,8 +806,6 @@ struct RawTableInner<A> {
// Number of elements in the table, only really used by len()
items: usize,
-
- alloc: A,
}
impl<T> RawTable<T, Global> {
@@ -393,7 +817,8 @@ impl<T> RawTable<T, Global> {
#[inline]
pub const fn new() -> Self {
Self {
- table: RawTableInner::new_in(Global),
+ table: RawTableInner::NEW,
+ alloc: Global,
marker: PhantomData,
}
}
@@ -412,7 +837,9 @@ impl<T> RawTable<T, Global> {
}
}
-impl<T, A: Allocator + Clone> RawTable<T, A> {
+impl<T, A: Allocator> RawTable<T, A> {
+ const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
+
/// Creates a new empty hash table without allocating any memory, using the
/// given allocator.
///
@@ -420,9 +847,10 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
- pub fn new_in(alloc: A) -> Self {
+ pub const fn new_in(alloc: A) -> Self {
Self {
- table: RawTableInner::new_in(alloc),
+ table: RawTableInner::NEW,
+ alloc,
marker: PhantomData,
}
}
@@ -440,73 +868,97 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
Ok(Self {
table: RawTableInner::new_uninitialized(
- alloc,
- TableLayout::new::<T>(),
+ &alloc,
+ Self::TABLE_LAYOUT,
buckets,
fallibility,
)?,
+ alloc,
marker: PhantomData,
})
}
- /// Attempts to allocate a new hash table with at least enough capacity
- /// for inserting the given number of elements without reallocating.
- fn fallible_with_capacity(
- alloc: A,
- capacity: usize,
- fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ /// Attempts to allocate a new hash table using the given allocator, with at least enough
+ /// capacity for inserting the given number of elements without reallocating.
+ #[cfg(feature = "raw")]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Ok(Self {
table: RawTableInner::fallible_with_capacity(
- alloc,
- TableLayout::new::<T>(),
+ &alloc,
+ Self::TABLE_LAYOUT,
capacity,
- fallibility,
+ Fallibility::Fallible,
)?,
+ alloc,
marker: PhantomData,
})
}
- /// Attempts to allocate a new hash table using the given allocator, with at least enough
- /// capacity for inserting the given number of elements without reallocating.
- #[cfg(feature = "raw")]
- pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
- Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
- }
-
/// Allocates a new hash table using the given allocator, with at least enough capacity for
/// inserting the given number of elements without reallocating.
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
- Ok(capacity) => capacity,
- Err(_) => unsafe { hint::unreachable_unchecked() },
+ Self {
+ table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
+ alloc,
+ marker: PhantomData,
}
}
/// Returns a reference to the underlying allocator.
#[inline]
pub fn allocator(&self) -> &A {
- &self.table.alloc
+ &self.alloc
}
- /// Deallocates the table without dropping any entries.
- #[cfg_attr(feature = "inline-more", inline)]
- unsafe fn free_buckets(&mut self) {
- self.table.free_buckets(TableLayout::new::<T>());
+ /// Returns pointer to one past last `data` element in the table as viewed from
+ /// the start point of the allocation.
+ ///
+ /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ pub fn data_end(&self) -> NonNull<T> {
+ // `self.table.ctrl.cast()` returns pointer that
+ // points here (to the end of `T0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ // with loading `Group` bytes from the heap works properly, even if the result
+ // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ // `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ self.table.ctrl.cast()
}
- /// Returns pointer to one past last element of data table.
+ /// Returns pointer to start of data table.
#[inline]
- pub unsafe fn data_end(&self) -> NonNull<T> {
- NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
+ #[cfg(any(feature = "raw", feature = "nightly"))]
+ pub unsafe fn data_start(&self) -> NonNull<T> {
+ NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
}
- /// Returns pointer to start of data table.
+ /// Return the information about memory allocated by the table.
+ ///
+ /// `RawTable` allocates single memory block to store both data and metadata.
+ /// This function returns allocation size and alignment and the beginning of the area.
+ /// These are the arguments which will be passed to `dealloc` when the table is dropped.
+ ///
+ /// This function might be useful for memory profiling.
#[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn data_start(&self) -> *mut T {
- self.data_end().as_ptr().wrapping_sub(self.buckets())
+ #[cfg(feature = "raw")]
+ pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
+ // SAFETY: We use the same `table_layout` that was used to allocate
+ // this table.
+ unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
}
/// Returns the index of a bucket from a `Bucket`.
@@ -516,8 +968,55 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
/// Returns a pointer to an element in the table.
+ ///
+ /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
+ /// following safety rules:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`.
+ ///
+ /// It is safe to call this function with index of zero (`index == 0`) on a table that has
+ /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
+ /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
+ /// `(index + 1) <= self.buckets()`.
+ ///
+ /// [`RawTable::buckets`]: RawTable::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
+ // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
+ //
+ // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
+ // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
+ // |
+ // | `base = self.data_end()` points here
+ // | (to the start of CT0 or to the end of T0)
+ // v v
+ // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ // ^ \__________ __________/
+ // `table.bucket(3)` returns a pointer that points \/
+ // here in the `data` part of the `RawTable` (to additional control bytes
+ // the end of T3) `m = Group::WIDTH - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`;
+ // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
+ // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
debug_assert_ne!(self.table.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
@@ -525,8 +1024,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
- #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
- pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
+ unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
self.table.erase(index);
}
@@ -534,7 +1032,6 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
@@ -558,12 +1055,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
/// Removes an element from the table, returning it.
+ ///
+ /// This also returns an `InsertSlot` pointing to the newly free bucket.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
- pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
+ pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
self.erase_no_drop(&item);
- item.read()
+ (
+ item.read(),
+ InsertSlot {
+ index: self.bucket_index(&item),
+ },
+ )
}
/// Finds and removes an element from the table, returning it.
@@ -571,7 +1074,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
- Some(bucket) => Some(unsafe { self.remove(bucket) }),
+ Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
None => None,
}
}
@@ -585,18 +1088,17 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
+ if self.is_empty() {
+ // Special case empty table to avoid surprising O(capacity) time.
+ return;
+ }
// Ensure that the table is reset even if one of the drops panic
let mut self_ = guard(self, |self_| self_.clear_no_drop());
unsafe {
- self_.drop_elements();
- }
- }
-
- unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && !self.is_empty() {
- for item in self.iter() {
- item.drop();
- }
+ // SAFETY: ScopeGuard sets to zero the `items` field of the table
+ // even in case of panic during the dropping of the elements so
+ // that there will be no double drop of the elements.
+ self_.table.drop_elements::<T>();
}
}
@@ -607,7 +1109,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
// space for.
let min_size = usize::max(self.table.items, min_size);
if min_size == 0 {
- *self = Self::new_in(self.table.alloc.clone());
+ let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
return;
}
@@ -624,14 +1135,33 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.table.items == 0 {
- *self = Self::with_capacity_in(min_size, self.table.alloc.clone());
+ let new_inner =
+ RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
+ let mut old_inner = mem::replace(&mut self.table, new_inner);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
} else {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- if self
- .resize(min_size, hasher, Fallibility::Infallible)
- .is_err()
- {
- unsafe { hint::unreachable_unchecked() }
+ unsafe {
+ // SAFETY:
+ // 1. We know for sure that `min_size >= self.table.items`.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose RawTable::new_uninitialized in a public API.
+ if self
+ .resize(min_size, hasher, Fallibility::Infallible)
+ .is_err()
+ {
+ // SAFETY: The result of calling the `resize` function cannot be an error
+ // because `fallibility == Fallibility::Infallible.
+ hint::unreachable_unchecked()
+ }
}
}
}
@@ -641,13 +1171,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
- if additional > self.table.growth_left {
+ if unlikely(additional > self.table.growth_left) {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- if self
- .reserve_rehash(additional, hasher, Fallibility::Infallible)
- .is_err()
- {
- unsafe { hint::unreachable_unchecked() }
+ unsafe {
+ // SAFETY: The [`RawTableInner`] must already have properly initialized control
+ // bytes since we will never expose RawTable::new_uninitialized in a public API.
+ if self
+ .reserve_rehash(additional, hasher, Fallibility::Infallible)
+ .is_err()
+ {
+ // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
+ hint::unreachable_unchecked()
+ }
}
}
}
@@ -661,28 +1196,45 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.table.growth_left {
- self.reserve_rehash(additional, hasher, Fallibility::Fallible)
+ // SAFETY: The [`RawTableInner`] must already have properly initialized control
+ // bytes since we will never expose RawTable::new_uninitialized in a public API.
+ unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes,
+ /// otherwise calling this function results in [`undefined behavior`]
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[cold]
#[inline(never)]
- fn reserve_rehash(
+ unsafe fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
+ // SAFETY:
+ // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 2. The `drop` function is the actual drop function of the elements stored in
+ // the table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.table.reserve_rehash_inner(
+ &self.alloc,
additional,
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
fallibility,
- TableLayout::new::<T>(),
- if mem::needs_drop::<T>() {
+ Self::TABLE_LAYOUT,
+ if T::NEEDS_DROP {
Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
} else {
None
@@ -693,20 +1245,50 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
- fn resize(
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes,
+ /// otherwise calling this function results in [`undefined behavior`]
+ ///
+ /// The caller of this function must ensure that `capacity >= self.table.items`
+ /// otherwise:
+ ///
+ /// * If `self.table.items != 0`, calling of this function with `capacity`
+ /// equal to 0 (`capacity == 0`) results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
+ /// `self.table.items > capacity_to_buckets(capacity)`
+ /// calling this function results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
+ /// `self.table.items > capacity_to_buckets(capacity)`
+ /// calling this function are never return (will go into an
+ /// infinite loop).
+ ///
+ /// See [`RawTableInner::find_insert_slot`] for more information.
+ ///
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
- unsafe {
- self.table.resize_inner(
- capacity,
- &|table, index| hasher(table.bucket::<T>(index).as_ref()),
- fallibility,
- TableLayout::new::<T>(),
- )
- }
+ // SAFETY:
+ // 1. The caller of this function guarantees that `capacity >= self.table.items`.
+ // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
+ self.table.resize_inner(
+ &self.alloc,
+ capacity,
+ &|table, index| hasher(table.bucket::<T>(index).as_ref()),
+ fallibility,
+ Self::TABLE_LAYOUT,
+ )
}
/// Inserts a new element into the table, and returns its raw bucket.
@@ -715,22 +1297,27 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
- let mut index = self.table.find_insert_slot(hash);
+ // SAFETY:
+ // 1. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose `RawTable::new_uninitialized` in a public API.
+ //
+ // 2. We reserve additional space (if necessary) right after calling this function.
+ let mut slot = self.table.find_insert_slot(hash);
- // We can avoid growing the table once we have reached our load
- // factor if we are replacing a tombstone. This works since the
- // number of EMPTY slots does not change in this case.
- let old_ctrl = *self.table.ctrl(index);
+ // We can avoid growing the table once we have reached our load factor if we are replacing
+ // a tombstone. This works since the number of EMPTY slots does not change in this case.
+ //
+ // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
+ // in the range `0..=self.buckets()`.
+ let old_ctrl = *self.table.ctrl(slot.index);
if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
- index = self.table.find_insert_slot(hash);
+ // SAFETY: We know for sure that `RawTableInner` has control bytes
+ // initialized and that there is extra space in the table.
+ slot = self.table.find_insert_slot(hash);
}
- self.table.record_item_insert_at(index, old_ctrl, hash);
-
- let bucket = self.bucket(index);
- bucket.write(value);
- bucket
+ self.insert_in_slot(hash, slot, value)
}
}
@@ -796,9 +1383,9 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.table.ctrl(index);
- debug_assert!(is_full(old_ctrl));
+ debug_assert!(self.is_bucket_full(index));
let old_growth_left = self.table.growth_left;
- let item = self.remove(bucket);
+ let item = self.remove(bucket).0;
if let Some(new_item) = f(item) {
self.table.growth_left = old_growth_left;
self.table.set_ctrl(index, old_ctrl);
@@ -810,17 +1397,78 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
+ /// Searches for an element in the table. If the element is not found,
+ /// returns `Err` with the position of a slot where an element with the
+ /// same hash could be inserted.
+ ///
+ /// This function may resize the table if additional space is required for
+ /// inserting an element.
+ #[inline]
+ pub fn find_or_find_insert_slot(
+ &mut self,
+ hash: u64,
+ mut eq: impl FnMut(&T) -> bool,
+ hasher: impl Fn(&T) -> u64,
+ ) -> Result<Bucket<T>, InsertSlot> {
+ self.reserve(1, hasher);
+
+ unsafe {
+ // SAFETY:
+ // 1. We know for sure that there is at least one empty `bucket` in the table.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
+ // never expose `RawTable::new_uninitialized` in a public API.
+ // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
+ // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
+ // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
+ match self
+ .table
+ .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
+ {
+ // SAFETY: See explanation above.
+ Ok(index) => Ok(self.bucket(index)),
+ Err(slot) => Err(slot),
+ }
+ }
+ }
+
+ /// Inserts a new element into the table in the given slot, and returns its
+ /// raw bucket.
+ ///
+ /// # Safety
+ ///
+ /// `slot` must point to a slot previously returned by
+ /// `find_or_find_insert_slot`, and no mutation of the table must have
+ /// occurred since that call.
+ #[inline]
+ pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
+ let old_ctrl = *self.table.ctrl(slot.index);
+ self.table.record_item_insert_at(slot.index, old_ctrl, hash);
+
+ let bucket = self.bucket(slot.index);
+ bucket.write(value);
+ bucket
+ }
+
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
- let result = self.table.find_inner(hash, &mut |index| unsafe {
- eq(self.bucket(index).as_ref())
- });
-
- // Avoid `Option::map` because it bloats LLVM IR.
- match result {
- Some(index) => Some(unsafe { self.bucket(index) }),
- None => None,
+ unsafe {
+ // SAFETY:
+ // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
+ // will never expose `RawTable::new_uninitialized` in a public API.
+ // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
+ // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
+ // is safe.
+ let result = self
+ .table
+ .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
+
+ // Avoid `Option::map` because it bloats LLVM IR.
+ match result {
+ // SAFETY: See explanation above.
+ Some(index) => Some(self.bucket(index)),
+ None => None,
+ }
}
}
@@ -928,17 +1576,27 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
self.table.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ self.table.is_bucket_full(index)
+ }
+
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[inline]
pub unsafe fn iter(&self) -> RawIter<T> {
- let data = Bucket::from_base_index(self.data_end(), 0);
- RawIter {
- iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()),
- items: self.table.items,
- }
+ // SAFETY:
+ // 1. The caller must uphold the safety contract for `iter` method.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose RawTable::new_uninitialized in a public API.
+ self.table.iter()
}
/// Returns an iterator over occupied buckets that could match a given hash.
@@ -952,7 +1610,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
+ pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
RawIterHash::new(self, hash)
}
@@ -978,8 +1636,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
- table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))),
- orig_table: NonNull::from(self),
+ table: mem::replace(&mut self.table, RawTableInner::NEW),
+ orig_table: NonNull::from(&mut self.table),
marker: PhantomData,
}
}
@@ -993,31 +1651,31 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
debug_assert_eq!(iter.len(), self.len());
- let alloc = self.table.alloc.clone();
let allocation = self.into_allocation();
RawIntoIter {
iter,
allocation,
marker: PhantomData,
- alloc,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
- pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
+ pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
let alloc = if self.table.is_empty_singleton() {
None
} else {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
- let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
- Some(lco) => lco,
- None => unsafe { hint::unreachable_unchecked() },
- };
+ let (layout, ctrl_offset) =
+ match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
+ Some(lco) => lco,
+ None => unsafe { hint::unreachable_unchecked() },
+ };
Some((
unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
+ unsafe { ptr::read(&self.alloc) },
))
};
mem::forget(self);
@@ -1025,41 +1683,62 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A>
+unsafe impl<T, A: Allocator> Send for RawTable<T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A>
+unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
where
T: Sync,
A: Sync,
{
}
-impl<A> RawTableInner<A> {
+impl RawTableInner {
+ const NEW: Self = RawTableInner::new();
+
+ /// Creates a new empty hash table without allocating any memory.
+ ///
+ /// In effect this returns a table with exactly 1 bucket. However we can
+ /// leave the data pointer dangling since that bucket is never accessed
+ /// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
- const fn new_in(alloc: A) -> Self {
+ const fn new() -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
- alloc,
}
}
}
-impl<A: Allocator + Clone> RawTableInner<A> {
+impl RawTableInner {
+ /// Allocates a new [`RawTableInner`] with the given number of buckets.
+ /// The control bytes and buckets are left uninitialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller of this function must ensure that the `buckets` is power of two
+ /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
+ /// Group::WIDTH` with the [`EMPTY`] bytes.
+ ///
+ /// See also [`Allocator`] API for other safety concerns.
+ ///
+ /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
#[cfg_attr(feature = "inline-more", inline)]
- unsafe fn new_uninitialized(
- alloc: A,
+ unsafe fn new_uninitialized<A>(
+ alloc: &A,
table_layout: TableLayout,
buckets: usize,
fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ ) -> Result<Self, TryReserveError>
+ where
+ A: Allocator,
+ {
debug_assert!(buckets.is_power_of_two());
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
@@ -1068,45 +1747,48 @@ impl<A: Allocator + Clone> RawTableInner<A> {
None => return Err(fallibility.capacity_overflow()),
};
- // We need an additional check to ensure that the allocation doesn't
- // exceed `isize::MAX`. We can skip this check on 64-bit systems since
- // such allocations will never succeed anyways.
- //
- // This mirrors what Vec does in the standard library.
- if mem::size_of::<usize>() < 8 && layout.size() > isize::MAX as usize {
- return Err(fallibility.capacity_overflow());
- }
-
- let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
+ let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
};
+ // SAFETY: null pointer will be caught in above check
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
- alloc,
})
}
+ /// Attempts to allocate a new [`RawTableInner`] with at least enough
+ /// capacity for inserting the given number of elements without reallocating.
+ ///
+ /// All the control bytes are initialized with the [`EMPTY`] bytes.
#[inline]
- fn fallible_with_capacity(
- alloc: A,
+ fn fallible_with_capacity<A>(
+ alloc: &A,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ ) -> Result<Self, TryReserveError>
+ where
+ A: Allocator,
+ {
if capacity == 0 {
- Ok(Self::new_in(alloc))
+ Ok(Self::NEW)
} else {
+ // SAFETY: We checked that we could successfully allocate the new table, and then
+ // initialized all control bytes with the constant `EMPTY` byte.
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
+ // SAFETY: We checked that the table is allocated and therefore the table already has
+ // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
+ // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
@@ -1114,66 +1796,397 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
- /// Searches for an empty or deleted bucket which is suitable for inserting
- /// a new element and sets the hash for that slot.
+ /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
+ /// the given number of elements without reallocating.
+ ///
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
+ /// handle memory allocation failure.
+ ///
+ /// All the control bytes are initialized with the [`EMPTY`] bytes.
+ ///
+ /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
+ fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
+ where
+ A: Allocator,
+ {
+ // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
+ match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
+ Ok(table_inner) => table_inner,
+ // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
+ Err(_) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
+ ///
+ /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
+ /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately
+ /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
+ /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
+ /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
+ /// We detect this situation here and perform a second scan starting at the beginning of the table.
+ /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
+ /// trailing control bytes (containing [`EMPTY`] bytes).
+ ///
+ /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
+ /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
+ /// `Safety`).
+ ///
+ /// # Warning
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
+ /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
+ /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
+ /// index will cause immediate [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
+ /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
+ /// of this crate, the following rules are necessary and sufficient:
///
- /// There must be at least 1 empty bucket in the table.
+ /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
+ /// function results in [`undefined behavior`].
+ ///
+ /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
+ /// (after the `find_insert_slot_in_group` function, but before insertion into the table).
+ ///
+ /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
+ /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
+ ///
+ /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
+ /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
+ /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
+ ///
+ /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
+ /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
- let index = self.find_insert_slot(hash);
+ unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
+ // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
+ if unlikely(self.is_bucket_full(index)) {
+ debug_assert!(self.bucket_mask < Group::WIDTH);
+ // SAFETY:
+ //
+ // * Since the caller of this function ensures that the control bytes are properly
+ // initialized and `ptr = self.ctrl(0)` points to the start of the array of control
+ // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
+ // and points to the properly initialized control bytes (see also
+ // `TableLayout::calculate_layout_for` and `ptr::read`);
+ //
+ // * Because the caller of this function ensures that the index was provided by the
+ // `self.find_insert_slot_in_group()` function, so for for tables larger than the
+ // group width (self.buckets() >= Group::WIDTH), we will never end up in the given
+ // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
+ // cannot return a full bucket index. For tables smaller than the group width, calling
+ // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
+ // the range of the table are filled with EMPTY bytes (and we know for sure that there
+ // is at least one FULL bucket), so this second scan either finds an empty slot (due to
+ // the load factor) or hits the trailing control bytes (containing EMPTY).
+ index = Group::load_aligned(self.ctrl(0))
+ .match_empty_or_deleted()
+ .lowest_set_bit()
+ .unwrap_unchecked();
+ }
+ InsertSlot { index }
+ }
+
+ /// Finds the position to insert something in a group.
+ ///
+ /// **This may have false positives and must be fixed up with `fix_insert_slot`
+ /// before it's used.**
+ ///
+ /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
+ /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
+ #[inline]
+ fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
+ let bit = group.match_empty_or_deleted().lowest_set_bit();
+
+ if likely(bit.is_some()) {
+ // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
+ } else {
+ None
+ }
+ }
+
+ /// Searches for an element in the table, or a potential slot where that element could
+ /// be inserted (an empty or deleted [`Bucket`] index).
+ ///
+ /// This uses dynamic dispatch to reduce the amount of code generated, but that is
+ /// eliminated by LLVM optimizations.
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
+ /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
+ /// will never return (will go into an infinite loop) for tables larger than the group
+ /// width, or return an index outside of the table indices range if the table is less
+ /// than the group width.
+ ///
+ /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
+ /// function with only `FULL` buckets' indices and return the `index` of the found
+ /// element (as `Ok(index)`). If the element is not found and there is at least 1
+ /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
+ /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case,
+ /// if this function returns [`InsertSlot`], it will contain an index in the range
+ /// `0..=self.buckets()`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ unsafe fn find_or_find_insert_slot_inner(
+ &self,
+ hash: u64,
+ eq: &mut dyn FnMut(usize) -> bool,
+ ) -> Result<usize, InsertSlot> {
+ let mut insert_slot = None;
+
+ let h2_hash = h2(hash);
+ let mut probe_seq = self.probe_seq(hash);
+
+ loop {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask` and also because mumber of
+ // buckets is a power of two (see `self.probe_seq` function).
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new).
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
+
+ for bit in group.match_byte(h2_hash) {
+ let index = (probe_seq.pos + bit) & self.bucket_mask;
+
+ if likely(eq(index)) {
+ return Ok(index);
+ }
+ }
+
+ // We didn't find the element we were looking for in the group, try to get an
+ // insertion slot from the group if we don't have one yet.
+ if likely(insert_slot.is_none()) {
+ insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
+ }
+
+ // Only stop the search if the group contains at least one empty element.
+ // Otherwise, the element that we are looking for might be in a following group.
+ if likely(group.match_empty().any_bit_set()) {
+ // We must have found a insert slot by now, since the current group contains at
+ // least one. For tables smaller than the group width, there will still be an
+ // empty element in the current (and only) group due to the load factor.
+ unsafe {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
+ return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
+ }
+ }
+
+ probe_seq.move_next(self.bucket_mask);
+ }
+ }
+
+ /// Searches for an empty or deleted bucket which is suitable for inserting a new
+ /// element and sets the hash for that slot. Returns an index of that slot and the
+ /// old control byte stored in the found index.
+ ///
+ /// This function does not check if the given element exists in the table. Also,
+ /// this function does not check if there is enough space in the table to insert
+ /// a new element. Caller of the funtion must make ensure that the table has at
+ /// least 1 empty or deleted `bucket`, otherwise this function will never return
+ /// (will go into an infinite loop) for tables larger than the group width, or
+ /// return an index outside of the table indices range if the table is less than
+ /// the group width.
+ ///
+ /// If there is at least 1 empty or deleted `bucket` in the table, the function is
+ /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
+ /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for the
+ /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods.
+ /// Thus, in order to uphold the safety contracts for that methods, as well as for
+ /// the correct logic of the work of this crate, you must observe the following rules
+ /// when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated and has properly initialized
+ /// control bytes otherwise calling this function results in [`undefined behavior`].
+ ///
+ /// * The caller of this function must ensure that the "data" parts of the table
+ /// will have an entry in the returned index (matching the given hash) right
+ /// after calling this function.
+ ///
+ /// Attempt to write data at the `index` returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// The caller must independently increase the `items` field of the table, and also,
+ /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
+ /// field, and do not change it if the old control byte was [`DELETED`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
+ /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ #[inline]
+ unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
+ // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
+ let index: usize = self.find_insert_slot(hash).index;
+ // SAFETY:
+ // 1. The `find_insert_slot` function either returns an `index` less than or
+ // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
+ // returns if it cannot find an empty or deleted slot.
+ // 2. The caller of this function guarantees that the table has already been
+ // allocated
let old_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
(index, old_ctrl)
}
/// Searches for an empty or deleted bucket which is suitable for inserting
- /// a new element.
+ /// a new element, returning the `index` for the new [`Bucket`].
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
+ /// will never return (will go into an infinite loop) for tables larger than the group
+ /// width, or return an index outside of the table indices range if the table is less
+ /// than the group width.
+ ///
+ /// If there is at least 1 empty or deleted `bucket` in the table, the function is
+ /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
+ /// but in any case, if this function returns [`InsertSlot`], it will contain an index
+ /// in the range `0..=self.buckets()`.
///
- /// There must be at least 1 empty bucket in the table.
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- fn find_insert_slot(&self, hash: u64) -> usize {
+ unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
let mut probe_seq = self.probe_seq(hash);
loop {
- unsafe {
- let group = Group::load(self.ctrl(probe_seq.pos));
- if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
- let result = (probe_seq.pos + bit) & self.bucket_mask;
-
- // In tables smaller than the group width, trailing control
- // bytes outside the range of the table are filled with
- // EMPTY entries. These will unfortunately trigger a
- // match, but once masked may point to a full bucket that
- // is already occupied. We detect this situation here and
- // perform a second scan starting at the beginning of the
- // table. This second scan is guaranteed to find an empty
- // slot (due to the load factor) before hitting the trailing
- // control bytes (containing EMPTY).
- if unlikely(is_full(*self.ctrl(result))) {
- debug_assert!(self.bucket_mask < Group::WIDTH);
- debug_assert_ne!(probe_seq.pos, 0);
- return Group::load_aligned(self.ctrl(0))
- .match_empty_or_deleted()
- .lowest_set_bit_nonzero();
- }
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask` and also because mumber of
+ // buckets is a power of two (see `self.probe_seq` function).
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new).
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
- return result;
+ let index = self.find_insert_slot_in_group(&group, &probe_seq);
+ if likely(index.is_some()) {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
+ unsafe {
+ return self.fix_insert_slot(index.unwrap_unchecked());
}
}
probe_seq.move_next(self.bucket_mask);
}
}
- /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
- /// code generated, but it is eliminated by LLVM optimizations.
- #[inline]
- fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
+ /// Searches for an element in a table, returning the `index` of the found element.
+ /// This uses dynamic dispatch to reduce the amount of code generated, but it is
+ /// eliminated by LLVM optimizations.
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty `bucket`, otherwise, if the
+ /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
+ /// this function will also never return (will go into an infinite loop).
+ ///
+ /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
+ /// function with only `FULL` buckets' indices and return the `index` of the found
+ /// element as `Some(index)`, so the index will always be in the range
+ /// `0..self.buckets()`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline(always)]
+ unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
let h2_hash = h2(hash);
let mut probe_seq = self.probe_seq(hash);
loop {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask`.
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new_in).
let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
for bit in group.match_byte(h2_hash) {
+ // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index = (probe_seq.pos + bit) & self.bucket_mask;
if likely(eq(index)) {
@@ -1189,12 +2202,52 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Prepares for rehashing data in place (that is, without allocating new memory).
+ /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
+ /// bytes to `EMPTY`, i.e. performs the following conversion:
+ ///
+ /// - `EMPTY` control bytes -> `EMPTY`;
+ /// - `DELETED` control bytes -> `EMPTY`;
+ /// - `FULL` control bytes -> `DELETED`.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
+ /// bytes when re-inserting them into their ideal position (which was impossible
+ /// to do during the first insert due to tombstones). If the caller does not do
+ /// this, then calling this function may result in a memory leak.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
+ /// calling this function results in [`undefined behavior`].
+ ///
+ /// Calling this function on a table that has not been allocated results in
+ /// [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_rehash_in_place(&mut self) {
- // Bulk convert all full control bytes to DELETED, and all DELETED
- // control bytes to EMPTY. This effectively frees up all buckets
- // containing a DELETED entry.
+ // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
+ // This effectively frees up all buckets containing a DELETED entry.
+ //
+ // SAFETY:
+ // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
+ // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
+ // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
+ // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
+ // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
+ // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
@@ -1203,15 +2256,245 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
- if self.buckets() < Group::WIDTH {
+ //
+ // SAFETY: The caller of this function guarantees that [`RawTableInner`]
+ // has already been allocated
+ if unlikely(self.buckets() < Group::WIDTH) {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
+ // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
+ // `Group::WIDTH` is safe
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
+ // control bytes,so copying `Group::WIDTH` bytes with offset equal
+ // to `self.buckets() == self.bucket_mask + 1` is safe
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
}
+ /// Returns an iterator over every element in the table.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result
+ /// is [`undefined behavior`]:
+ ///
+ /// * The caller has to ensure that the `RawTableInner` outlives the
+ /// `RawIter`. Because we cannot make the `next` method unsafe on
+ /// the `RawIter` struct, we have to make the `iter` method unsafe.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table,
+ /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ unsafe fn iter<T>(&self) -> RawIter<T> {
+ // SAFETY:
+ // 1. Since the caller of this function ensures that the control bytes
+ // are properly initialized and `self.data_end()` points to the start
+ // of the array of control bytes, therefore: `ctrl` is valid for reads,
+ // properly aligned to `Group::WIDTH` and points to the properly initialized
+ // control bytes.
+ // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
+ // equal to zero).
+ // 3. We pass the exact value of buckets of the table to the function.
+ //
+ // `ctrl` points here (to the start
+ // of the first control byte `CT0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ // with loading `Group` bytes from the heap works properly, even if the result
+ // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ // `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ let data = Bucket::from_base_index(self.data_end(), 0);
+ RawIter {
+ // SAFETY: See explanation above
+ iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
+ items: self.items,
+ }
+ }
+
+ /// Executes the destructors (if any) of the values stored in the table.
+ ///
+ /// # Note
+ ///
+ /// This function does not erase the control bytes of the table and does
+ /// not make any changes to the `items` or `growth_left` fields of the
+ /// table. If necessary, the caller of this function must manually set
+ /// up these table fields, for example using the [`clear_no_drop`] function.
+ ///
+ /// Be careful during calling this function, because drop function of
+ /// the elements can panic, and this can leave table in an inconsistent
+ /// state.
+ ///
+ /// # Safety
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table,
+ /// otherwise calling this function may result in [`undefined behavior`].
+ ///
+ /// If `T` is a type that should be dropped and **the table is not empty**,
+ /// calling this function more than once results in [`undefined behavior`].
+ ///
+ /// If `T` is not [`Copy`], attempting to use values stored in the table after
+ /// calling this function may result in [`undefined behavior`].
+ ///
+ /// It is safe to call this function on a table that has not been allocated,
+ /// on a table with uninitialized control bytes, and on a table with no actual
+ /// data but with `Full` control bytes if `self.items == 0`.
+ ///
+ /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
+ /// about of properly removing or saving `element` from / into the [`RawTable`] /
+ /// [`RawTableInner`].
+ ///
+ /// [`Bucket::drop`]: Bucket::drop
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`clear_no_drop`]: RawTableInner::clear_no_drop
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn drop_elements<T>(&mut self) {
+ // Check that `self.items != 0`. Protects against the possibility
+ // of creating an iterator on an table with uninitialized control bytes.
+ if T::NEEDS_DROP && self.items != 0 {
+ // SAFETY: We know for sure that RawTableInner will outlive the
+ // returned `RawIter` iterator, and the caller of this function
+ // must uphold the safety contract for `drop_elements` method.
+ for item in self.iter::<T>() {
+ // SAFETY: The caller must uphold the safety contract for
+ // `drop_elements` method.
+ item.drop();
+ }
+ }
+ }
+
+ /// Executes the destructors (if any) of the values stored in the table and than
+ /// deallocates the table.
+ ///
+ /// # Note
+ ///
+ /// Calling this function automatically makes invalid (dangling) all instances of
+ /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
+ ///
+ /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
+ /// fields of the table. If necessary, the caller of this function must manually set
+ /// up these table fields.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * Calling this function more than once;
+ ///
+ /// * The type `T` must be the actual type of the elements stored in the table.
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
+ /// was used to allocate this table.
+ ///
+ /// The caller of this function should pay attention to the possibility of the
+ /// elements' drop function panicking, because this:
+ ///
+ /// * May leave the table in an inconsistent state;
+ ///
+ /// * Memory is never deallocated, so a memory leak may occur.
+ ///
+ /// Attempt to use the `ctrl` field of the table (dereference) after calling this
+ /// function results in [`undefined behavior`].
+ ///
+ /// It is safe to call this function on a table that has not been allocated,
+ /// on a table with uninitialized control bytes, and on a table with no actual
+ /// data but with `Full` control bytes if `self.items == 0`.
+ ///
+ /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
+ /// for more information.
+ ///
+ /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
+ /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
+ if !self.is_empty_singleton() {
+ unsafe {
+ // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
+ self.drop_elements::<T>();
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. The caller must uphold the safety contract for `drop_inner_table` method.
+ self.free_buckets(alloc, table_layout);
+ }
+ }
+ }
+
+ /// Returns a pointer to an element in the table (convenience for
+ /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
+ /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
+ /// this function, the following safety rules must be observed:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`.
+ ///
+ /// * The type `T` must be the actual type of the elements stored in the table, otherwise
+ /// using the returned [`Bucket`] may result in [`undefined behavior`].
+ ///
+ /// It is safe to call this function with index of zero (`index == 0`) on a table that has
+ /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
+ /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
+ /// `(index + 1) <= self.buckets()`.
+ ///
+ /// ```none
+ /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
+ ///
+ /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
+ /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
+ /// |
+ /// | `base = table.data_end::<T>()` points here
+ /// | (to the start of CT0 or to the end of T0)
+ /// v v
+ /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ /// ^ \__________ __________/
+ /// `table.bucket(3)` returns a pointer that points \/
+ /// here in the `data` part of the `RawTableInner` additional control bytes
+ /// (to the end of T3) `m = Group::WIDTH - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`;
+ /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
+ /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`Bucket::from_base_index`]: Bucket::from_base_index
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
@@ -1219,6 +2502,52 @@ impl<A: Allocator + Clone> RawTableInner<A> {
Bucket::from_base_index(self.data_end(), index)
}
+ /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
+ /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`;
+ ///
+ /// * The `size_of` must be equal to the size of the elements stored in the table;
+ ///
+ /// ```none
+ /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
+ ///
+ /// `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
+ /// `data` part of the `RawTableInner`, i.e. to the start of T3
+ /// |
+ /// | `base = table.data_end::<u8>()` points here
+ /// | (to the start of CT0 or to the end of T0)
+ /// v v
+ /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ /// \__________ __________/
+ /// \/
+ /// additional control bytes
+ /// `m = Group::WIDTH - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`;
+ /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
+ /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
debug_assert_ne!(self.bucket_mask, 0);
@@ -1227,9 +2556,44 @@ impl<A: Allocator + Clone> RawTableInner<A> {
base.sub((index + 1) * size_of)
}
+ /// Returns pointer to one past last `data` element in the table as viewed from
+ /// the start point of the allocation (convenience for `self.ctrl.cast()`).
+ ///
+ /// This function actually returns a pointer to the end of the `data element` at
+ /// index "0" (zero).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Note
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table, otherwise
+ /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
+ ///
+ /// ```none
+ /// `table.data_end::<T>()` returns pointer that points here
+ /// (to the end of `T0`)
+ /// ∨
+ /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ /// \________ ________/
+ /// \/
+ /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`.
+ /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ /// with loading `Group` bytes from the heap works properly, even if the result
+ /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ /// `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn data_end<T>(&self) -> NonNull<T> {
- NonNull::new_unchecked(self.ctrl.as_ptr().cast())
+ fn data_end<T>(&self) -> NonNull<T> {
+ self.ctrl.cast()
}
/// Returns an iterator-like object for a probe sequence on the table.
@@ -1240,6 +2604,8 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[inline]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
+ // This is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
@@ -1250,7 +2616,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[cfg(feature = "raw")]
#[inline]
unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
- let index = self.find_insert_slot(hash);
+ let index = self.find_insert_slot(hash).index;
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(())
@@ -1277,13 +2643,68 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte to the hash, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
+ /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
+ /// following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
+ unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
self.set_ctrl(index, h2(hash));
}
+ /// Replaces the hash in the control byte at the given index with the provided one,
+ /// and possibly also replicates the new control byte at the end of the array of control
+ /// bytes, returning the old control byte.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
+ /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
+ /// methods, you must observe the following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 {
+ unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
let prev_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
prev_ctrl
@@ -1291,10 +2712,35 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
+ unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
- // the array without using a branch:
+ // the array without using a branch. If the tables smaller than
+ // the group width (self.buckets() < Group::WIDTH),
+ // `index2 = Group::WIDTH + index`, otherwise `index2` is:
+ //
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
@@ -1311,16 +2757,43 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
+
+ // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
+ // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Returns a pointer to a control byte.
+ ///
+ /// # Safety
+ ///
+ /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
+ /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
+ /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
+ /// will return a pointer to the end of the allocated table and it is useless on its own.
+ ///
+ /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
+ /// table that has not been allocated results in [`Undefined Behavior`].
+ ///
+ /// So to satisfy both requirements you should always follow the rule that
+ /// `index < self.bucket_mask + 1 + Group::WIDTH`
+ ///
+ /// Calling this function on [`RawTableInner`] that are not already allocated is safe
+ /// for read-only purpose.
+ ///
+ /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
self.ctrl.as_ptr().add(index)
}
@@ -1329,6 +2802,17 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ debug_assert!(index < self.buckets());
+ is_full(*self.ctrl(index))
+ }
+
#[inline]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
@@ -1339,25 +2823,45 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.bucket_mask == 0
}
+ /// Attempts to allocate a new hash table with at least enough capacity
+ /// for inserting the given number of elements without reallocating,
+ /// and return it inside ScopeGuard to protect against panic in the hash
+ /// function.
+ ///
+ /// # Note
+ ///
+ /// It is recommended (but not required):
+ ///
+ /// * That the new table's `capacity` be greater than or equal to `self.items`.
+ ///
+ /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
+ /// to allocate this table.
+ ///
+ /// If `table_layout` does not match the `TableLayout` that was used to allocate
+ /// this table, then using `mem::swap` with the `self` and the new table returned
+ /// by this function results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::mut_mut)]
#[inline]
- unsafe fn prepare_resize(
+ fn prepare_resize<'a, A>(
&self,
+ alloc: &'a A,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
- ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> {
+ ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
+ where
+ A: Allocator,
+ {
debug_assert!(self.items <= capacity);
// Allocate and initialize the new table.
- let mut new_table = RawTableInner::fallible_with_capacity(
- self.alloc.clone(),
- table_layout,
- capacity,
- fallibility,
- )?;
- new_table.growth_left -= self.items;
- new_table.items = self.items;
+ let new_table =
+ RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
@@ -1367,7 +2871,11 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// the comment at the bottom of this function.
Ok(guard(new_table, move |self_| {
if !self_.is_empty_singleton() {
- self_.free_buckets(table_layout);
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. We know for sure that the `alloc` and `table_layout` matches the
+ // [`Allocator`] and [`TableLayout`] used to allocate this table.
+ unsafe { self_.free_buckets(alloc, table_layout) };
}
}))
}
@@ -1376,16 +2884,38 @@ impl<A: Allocator + Clone> RawTableInner<A> {
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table.
+ ///
+ /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// used to allocate this table.
+ ///
+ /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
+ /// the elements stored in the table.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[inline(always)]
- unsafe fn reserve_rehash_inner(
+ unsafe fn reserve_rehash_inner<A>(
&mut self,
+ alloc: &A,
additional: usize,
hasher: &dyn Fn(&mut Self, usize) -> u64,
fallibility: Fallibility,
layout: TableLayout,
drop: Option<fn(*mut u8)>,
- ) -> Result<(), TryReserveError> {
+ ) -> Result<(), TryReserveError>
+ where
+ A: Allocator,
+ {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let new_items = match self.items.checked_add(additional) {
Some(new_items) => new_items,
@@ -1395,12 +2925,30 @@ impl<A: Allocator + Clone> RawTableInner<A> {
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
+
+ // SAFETY:
+ // 1. We know for sure that `[`RawTableInner`]` has already been allocated
+ // (since new_items <= full_capacity / 2);
+ // 2. The caller ensures that `drop` function is the actual drop function of
+ // the elements stored in the table.
+ // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
+ // used to allocate this table.
+ // 4. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.rehash_in_place(hasher, layout.size, drop);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
+ //
+ // SAFETY:
+ // 1. We know for sure that `capacity >= self.items`.
+ // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.resize_inner(
+ alloc,
usize::max(new_items, full_capacity + 1),
hasher,
fallibility,
@@ -1409,48 +2957,160 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Returns an iterator over full buckets indices in the table.
+ ///
+ /// # Safety
+ ///
+ /// Behavior is undefined if any of the following conditions are violated:
+ ///
+ /// * The caller has to ensure that the `RawTableInner` outlives the
+ /// `FullBucketsIndices`. Because we cannot make the `next` method
+ /// unsafe on the `FullBucketsIndices` struct, we have to make the
+ /// `full_buckets_indices` method unsafe.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ #[inline(always)]
+ unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
+ // SAFETY:
+ // 1. Since the caller of this function ensures that the control bytes
+ // are properly initialized and `self.ctrl(0)` points to the start
+ // of the array of control bytes, therefore: `ctrl` is valid for reads,
+ // properly aligned to `Group::WIDTH` and points to the properly initialized
+ // control bytes.
+ // 2. The value of `items` is equal to the amount of data (values) added
+ // to the table.
+ //
+ // `ctrl` points here (to the start
+ // of the first control byte `CT0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ let ctrl = NonNull::new_unchecked(self.ctrl(0));
+
+ FullBucketsIndices {
+ // Load the first group
+ // SAFETY: See explanation above.
+ current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
+ group_first_index: 0,
+ ctrl,
+ items: self.items,
+ }
+ }
+
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table;
+ ///
+ /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// used to allocate this table;
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// The caller of this function must ensure that `capacity >= self.items`
+ /// otherwise:
+ ///
+ /// * If `self.items != 0`, calling of this function with `capacity == 0`
+ /// results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
+ /// `self.items > capacity_to_buckets(capacity)` calling this function
+ /// results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
+ /// `self.items > capacity_to_buckets(capacity)` calling this function
+ /// are never return (will go into an infinite loop).
+ ///
+ /// Note: It is recommended (but not required) that the new table's `capacity`
+ /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
+ /// this function can never return. See [`RawTableInner::find_insert_slot`] for
+ /// more information.
+ ///
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[inline(always)]
- unsafe fn resize_inner(
+ unsafe fn resize_inner<A>(
&mut self,
+ alloc: &A,
capacity: usize,
hasher: &dyn Fn(&mut Self, usize) -> u64,
fallibility: Fallibility,
layout: TableLayout,
- ) -> Result<(), TryReserveError> {
- let mut new_table = self.prepare_resize(layout, capacity, fallibility)?;
-
- // Copy all elements to the new table.
- for i in 0..self.buckets() {
- if !is_full(*self.ctrl(i)) {
- continue;
- }
-
+ ) -> Result<(), TryReserveError>
+ where
+ A: Allocator,
+ {
+ // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
+ // that were used to allocate this table.
+ let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
+
+ // SAFETY: We know for sure that RawTableInner will outlive the
+ // returned `FullBucketsIndices` iterator, and the caller of this
+ // function ensures that the control bytes are properly initialized.
+ for full_byte_index in self.full_buckets_indices() {
// This may panic.
- let hash = hasher(self, i);
+ let hash = hasher(self, full_byte_index);
+ // SAFETY:
// We can use a simpler version of insert() here since:
- // - there are no DELETED entries.
- // - we know there is enough space in the table.
- // - all elements are unique.
- let (index, _) = new_table.prepare_insert_slot(hash);
-
+ // 1. There are no DELETED entries.
+ // 2. We know there is enough space in the table.
+ // 3. All elements are unique.
+ // 4. The caller of this function guarantees that `capacity > 0`
+ // so `new_table` must already have some allocated memory.
+ // 5. We set `growth_left` and `items` fields of the new table
+ // after the loop.
+ // 6. We insert into the table, at the returned index, the data
+ // matching the given hash immediately after calling this function.
+ let (new_index, _) = new_table.prepare_insert_slot(hash);
+
+ // SAFETY:
+ //
+ // * `src` is valid for reads of `layout.size` bytes, since the
+ // table is alive and the `full_byte_index` is guaranteed to be
+ // within bounds (see `FullBucketsIndices::next_impl`);
+ //
+ // * `dst` is valid for writes of `layout.size` bytes, since the
+ // caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate old table and we have the `new_index`
+ // returned by `prepare_insert_slot`.
+ //
+ // * Both `src` and `dst` are properly aligned.
+ //
+ // * Both `src` and `dst` point to different region of memory.
ptr::copy_nonoverlapping(
- self.bucket_ptr(i, layout.size),
- new_table.bucket_ptr(index, layout.size),
+ self.bucket_ptr(full_byte_index, layout.size),
+ new_table.bucket_ptr(new_index, layout.size),
layout.size,
);
}
+ // The hash function didn't panic, so we can safely set the
+ // `growth_left` and `items` fields of the new table.
+ new_table.growth_left -= self.items;
+ new_table.items = self.items;
+
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
+ // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate this table.
mem::swap(self, &mut new_table);
Ok(())
@@ -1463,6 +3123,21 @@ impl<A: Allocator + Clone> RawTableInner<A> {
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * The `size_of` must be equal to the size of the elements stored in the table;
+ ///
+ /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
+ /// the elements stored in the table.
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[cfg_attr(feature = "inline-more", inline(always))]
#[cfg_attr(not(feature = "inline-more"), inline)]
@@ -1506,8 +3181,10 @@ impl<A: Allocator + Clone> RawTableInner<A> {
let hash = hasher(*guard, i);
// Search for a suitable place to put it
- let new_i = guard.find_insert_slot(hash);
- let new_i_p = guard.bucket_ptr(new_i, size_of);
+ //
+ // SAFETY: Caller of this function ensures that the control bytes
+ // are properly initialized.
+ let new_i = guard.find_insert_slot(hash).index;
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
@@ -1519,6 +3196,8 @@ impl<A: Allocator + Clone> RawTableInner<A> {
continue 'outer;
}
+ let new_i_p = guard.bucket_ptr(new_i, size_of);
+
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
@@ -1545,17 +3224,107 @@ impl<A: Allocator + Clone> RawTableInner<A> {
mem::forget(guard);
}
+ /// Deallocates the table without dropping any entries.
+ ///
+ /// # Note
+ ///
+ /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
+ /// else it can lead to leaking of memory. Also calling this function automatically
+ /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
+ /// (dangling) the `ctrl` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
+ /// to allocate this table.
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
+ #[inline]
+ unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
+ where
+ A: Allocator,
+ {
+ // SAFETY: The caller must uphold the safety contract for `free_buckets`
+ // method.
+ let (ptr, layout) = self.allocation_info(table_layout);
+ alloc.deallocate(ptr, layout);
+ }
+
+ /// Returns a pointer to the allocated memory and the layout that was used to
+ /// allocate the table.
+ ///
+ /// # Safety
+ ///
+ /// Caller of this function must observe the following safety rules:
+ ///
+ /// * The [`RawTableInner`] has already been allocated, otherwise
+ /// calling this function results in [`undefined behavior`]
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// that was used to allocate this table. Failure to comply with this condition
+ /// may result in [`undefined behavior`].
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
#[inline]
- unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
+ unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
+ debug_assert!(
+ !self.is_empty_singleton(),
+ "this function can only be called on non-empty tables"
+ );
+
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
Some(lco) => lco,
- None => hint::unreachable_unchecked(),
+ None => unsafe { hint::unreachable_unchecked() },
};
- self.alloc.deallocate(
- NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
+ (
+ // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
+ unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
- );
+ )
+ }
+
+ /// Returns a pointer to the allocated memory and the layout that was used to
+ /// allocate the table. If [`RawTableInner`] has not been allocated, this
+ /// function return `dangling` pointer and `()` (unit) layout.
+ ///
+ /// # Safety
+ ///
+ /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// that was used to allocate this table. Failure to comply with this condition
+ /// may result in [`undefined behavior`].
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
+ #[cfg(feature = "raw")]
+ unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
+ if self.is_empty_singleton() {
+ (NonNull::dangling(), Layout::new::<()>())
+ } else {
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate this table.
+ unsafe { self.allocation_info(table_layout) }
+ }
}
/// Marks all table buckets as empty without dropping their contents.
@@ -1570,27 +3339,95 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
+ /// Erases the [`Bucket`]'s control byte at the given index so that it does not
+ /// triggered as full, decreases the `items` of the table and, if it can be done,
+ /// increases `self.growth_left`.
+ ///
+ /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
+ /// does not make any changes to the `data` parts of the table. The caller of this
+ /// function must take care to properly drop the `data`, otherwise calling this
+ /// function may result in a memory leak.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * It must be the full control byte at the given position;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// Calling this function on a table with no elements is unspecified, but calling subsequent
+ /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
+ /// (`self.items -= 1 cause overflow when self.items == 0`).
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn erase(&mut self, index: usize) {
- debug_assert!(is_full(*self.ctrl(index)));
+ debug_assert!(self.is_bucket_full(index));
+
+ // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
+ // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
+ // SAFETY:
+ // - The caller must uphold the safety contract for `erase` method;
+ // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
- // If we are inside a continuous block of Group::WIDTH full or deleted
- // cells then a probe window may have seen a full block when trying to
- // insert. We therefore need to keep that block non-empty so that
- // lookups will continue searching to the next probe window.
+ // Inserting and searching in the map is performed by two key functions:
+ //
+ // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
+ // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
+ // slot immediately in the first group, it jumps to the next `Group` looking for it,
+ // and so on until it has gone through all the groups in the control bytes.
+ //
+ // - The `find_inner` function that looks for the index of the desired element by looking
+ // at all the `FULL` bytes in the group. If it did not find the element right away, and
+ // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
+ // function may have found a suitable slot in the next group. Therefore, `find_inner`
+ // jumps further, and if it does not find the desired element and again there is no `EMPTY`
+ // byte, then it jumps further, and so on. The search stops only if `find_inner` function
+ // finds the desired element or hits an `EMPTY` slot/byte.
+ //
+ // Accordingly, this leads to two consequences:
//
- // Note that in this context `leading_zeros` refers to the bytes at the
- // end of a group, while `trailing_zeros` refers to the bytes at the
- // beginning of a group.
+ // - The map must have `EMPTY` slots (bytes);
+ //
+ // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
+ // function may stumble upon an `EMPTY` byte before finding the desired element and stop
+ // searching.
+ //
+ // Thus it is necessary to check all bytes after and before the erased element. If we are in
+ // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
+ // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
+ // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
+ // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
+ // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
+ //
+ // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
+ // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
+ // cannot have `DELETED` bytes.
+ //
+ // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
+ // `trailing_zeros` refers to the bytes at the beginning of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
+ // SAFETY: the caller must uphold the safety contract for `erase` method.
self.set_ctrl(index, ctrl);
self.items -= 1;
}
@@ -1599,12 +3436,16 @@ impl<A: Allocator + Clone> RawTableInner<A> {
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
fn clone(&self) -> Self {
if self.table.is_empty_singleton() {
- Self::new_in(self.table.alloc.clone())
+ Self::new_in(self.alloc.clone())
} else {
unsafe {
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
- let new_table = match Self::new_uninitialized(
- self.table.alloc.clone(),
+ //
+ // SAFETY: This is safe as we are taking the size of an already allocated table
+ // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
+ // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
+ let mut new_table = match Self::new_uninitialized(
+ self.alloc.clone(),
self.table.buckets(),
Fallibility::Infallible,
) {
@@ -1612,24 +3453,32 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
Err(_) => hint::unreachable_unchecked(),
};
- // If cloning fails then we need to free the allocation for the
- // new table. However we don't run its drop since its control
- // bytes are not initialized yet.
- let mut guard = guard(ManuallyDrop::new(new_table), |new_table| {
- new_table.free_buckets();
- });
-
- guard.clone_from_spec(self);
-
- // Disarm the scope guard and return the newly created table.
- ManuallyDrop::into_inner(ScopeGuard::into_inner(guard))
+ // Cloning elements may fail (the clone function may panic). But we don't
+ // need to worry about uninitialized control bits, since:
+ // 1. The number of items (elements) in the table is zero, which means that
+ // the control bits will not be readed by Drop function.
+ // 2. The `clone_from_spec` method will first copy all control bits from
+ // `self` (thus initializing them). But this will not affect the `Drop`
+ // function, since the `clone_from_spec` function sets `items` only after
+ // successfully clonning all elements.
+ new_table.clone_from_spec(self);
+ new_table
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.table.is_empty_singleton() {
- *self = Self::new_in(self.table.alloc.clone());
+ let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
} else {
unsafe {
// Make sure that if any panics occurs, we clear the table and
@@ -1644,27 +3493,38 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
//
// This leak is unavoidable: we can't try dropping more elements
// since this could lead to another panic and abort the process.
- self_.drop_elements();
+ //
+ // SAFETY: If something gets wrong we clear our table right after
+ // dropping the elements, so there is no double drop, since `items`
+ // will be equal to zero.
+ self_.table.drop_elements::<T>();
// If necessary, resize our table to match the source.
if self_.buckets() != source.buckets() {
- // Skip our drop by using ptr::write.
- if !self_.table.is_empty_singleton() {
- self_.free_buckets();
+ let new_inner = match RawTableInner::new_uninitialized(
+ &self_.alloc,
+ Self::TABLE_LAYOUT,
+ source.buckets(),
+ Fallibility::Infallible,
+ ) {
+ Ok(table) => table,
+ Err(_) => hint::unreachable_unchecked(),
+ };
+ // Replace the old inner with new uninitialized one. It's ok, since if something gets
+ // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
+ let mut old_inner = mem::replace(&mut self_.table, new_inner);
+ if !old_inner.is_empty_singleton() {
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. We know for sure that `alloc` and `table_layout` matches
+ // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
+ old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
}
- (&mut **self_ as *mut Self).write(
- // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- match Self::new_uninitialized(
- self_.table.alloc.clone(),
- source.buckets(),
- Fallibility::Infallible,
- ) {
- Ok(table) => table,
- Err(_) => hint::unreachable_unchecked(),
- },
- );
}
+ // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
+ // inside the `clone_from_impl` function will take care of that, dropping all
+ // cloned elements if necessary. Our `ScopeGuard` will clear the table.
self_.clone_from_spec(source);
// Disarm the scope guard if cloning was successful.
@@ -1696,7 +3556,8 @@ impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
source
.data_start()
- .copy_to_nonoverlapping(self.data_start(), self.table.buckets());
+ .as_ptr()
+ .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
@@ -1720,9 +3581,9 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
- if mem::needs_drop::<T>() && !self_.is_empty() {
- for i in 0..=*index {
- if is_full(*self_.table.ctrl(i)) {
+ if T::NEEDS_DROP {
+ for i in 0..*index {
+ if self_.is_bucket_full(i) {
self_.bucket(i).drop();
}
}
@@ -1735,7 +3596,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
- guard.0 = index;
+ guard.0 = index + 1;
}
// Successfully cloned all items, no need to clean up.
@@ -1757,7 +3618,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
{
self.clear();
- let guard_self = guard(&mut *self, |self_| {
+ let mut guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
@@ -1790,7 +3651,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
}
}
-impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
+impl<T, A: Allocator + Default> Default for RawTable<T, A> {
#[inline]
fn default() -> Self {
Self::new_in(Default::default())
@@ -1798,31 +3659,41 @@ impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
}
#[cfg(feature = "nightly")]
-unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
- if !self.table.is_empty_singleton() {
- unsafe {
- self.drop_elements();
- self.free_buckets();
- }
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If the drop function of any elements fails, then only a memory leak will occur,
+ // and we don't care because we are inside the `Drop` function of the `RawTable`,
+ // so there won't be any table left in an inconsistent state.
+ self.table
+ .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
}
}
}
#[cfg(not(feature = "nightly"))]
-impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
+impl<T, A: Allocator> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
- if !self.table.is_empty_singleton() {
- unsafe {
- self.drop_elements();
- self.free_buckets();
- }
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If the drop function of any elements fails, then only a memory leak will occur,
+ // and we don't care because we are inside the `Drop` function of the `RawTable`,
+ // so there won't be any table left in an inconsistent state.
+ self.table
+ .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
}
}
}
-impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
+impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
type Item = T;
type IntoIter = RawIntoIter<T, A>;
@@ -1840,7 +3711,7 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
- current_group: BitMask,
+ current_group: BitMaskIter,
// Pointer to the buckets for the current group.
data: Bucket<T>,
@@ -1856,19 +3727,44 @@ pub(crate) struct RawIterRange<T> {
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
- /// The control byte address must be aligned to the group size.
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
+ ///
+ /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
+ ///
+ /// * `ctrl` must point to the array of properly initialized control bytes;
+ ///
+ /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
+ ///
+ /// * the value of `len` must be less than or equal to the number of table buckets,
+ /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
+ /// must be positive.
+ ///
+ /// * The `ctrl.add(len)` pointer must be either in bounds or one
+ /// byte past the end of the same [allocated table].
+ ///
+ /// * The `len` must be a power of two.
+ ///
+ /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
+ // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
+ // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
- current_group,
+ current_group: current_group.into_iter(),
data,
next_ctrl,
end,
@@ -1925,8 +3821,7 @@ impl<T> RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
loop {
- if let Some(index) = self.current_group.lowest_set_bit() {
- self.current_group = self.current_group.remove_lowest_bit();
+ if let Some(index) = self.current_group.next() {
return Some(self.data.next_n(index));
}
@@ -1939,7 +3834,86 @@ impl<T> RawIterRange<T> {
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
- self.current_group = Group::load_aligned(self.next_ctrl).match_full();
+ self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
+ self.data = self.data.next_n(Group::WIDTH);
+ self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
+ }
+ }
+
+ /// Folds every element into an accumulator by applying an operation,
+ /// returning the final result.
+ ///
+ /// `fold_impl()` takes three arguments: the number of items remaining in
+ /// the iterator, an initial value, and a closure with two arguments: an
+ /// 'accumulator', and an element. The closure returns the value that the
+ /// accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first call.
+ ///
+ /// After applying this closure to every element of the iterator, `fold_impl()`
+ /// returns the accumulator.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
+ /// i.e. table outlives the `RawIterRange`;
+ ///
+ /// * The provided `n` value must match the actual number of items
+ /// in the table.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[allow(clippy::while_let_on_iterator)]
+ #[cfg_attr(feature = "inline-more", inline)]
+ unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
+ where
+ F: FnMut(B, Bucket<T>) -> B,
+ {
+ loop {
+ while let Some(index) = self.current_group.next() {
+ // The returned `index` will always be in the range `0..Group::WIDTH`,
+ // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
+ debug_assert!(n != 0);
+ let bucket = self.data.next_n(index);
+ acc = f(acc, bucket);
+ n -= 1;
+ }
+
+ if n == 0 {
+ return acc;
+ }
+
+ // SAFETY: The caller of this function ensures that:
+ //
+ // 1. The provided `n` value matches the actual number of items in the table;
+ // 2. The table is alive and did not moved.
+ //
+ // Taking the above into account, we always stay within the bounds, because:
+ //
+ // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
+ // we will never end up in the given branch, since we should have already
+ // yielded all the elements of the table.
+ //
+ // 2. For tables larger than the group width. The number of buckets is a
+ // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
+ // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
+ // start of the array of control bytes, and never try to iterate after
+ // getting all the elements, the last `self.current_group` will read bytes
+ // from the `self.buckets() - Group::WIDTH` index. We know also that
+ // `self.current_group.next()` will always retun indices within the range
+ // `0..Group::WIDTH`.
+ //
+ // Knowing all of the above and taking into account that we are synchronizing
+ // the `self.data` index with the index we used to read the `self.current_group`,
+ // the subsequent `self.data.next_n(index)` will always return a bucket with
+ // an index number less than `self.buckets()`.
+ //
+ // The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
+ // actually be read, since we should have already yielded all the elements of
+ // the table.
+ self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
@@ -2016,7 +3990,7 @@ impl<T> RawIter<T> {
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
- pub fn reflect_remove(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
@@ -2030,36 +4004,76 @@ impl<T> RawIter<T> {
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
- pub fn reflect_insert(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
- fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
- unsafe {
- if b.as_ptr() > self.iter.data.as_ptr() {
- // The iterator has already passed the bucket's group.
- // So the toggle isn't relevant to this iterator.
- return;
+ unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
+ if b.as_ptr() > self.iter.data.as_ptr() {
+ // The iterator has already passed the bucket's group.
+ // So the toggle isn't relevant to this iterator.
+ return;
+ }
+
+ if self.iter.next_ctrl < self.iter.end
+ && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
+ {
+ // The iterator has not yet reached the bucket's group.
+ // We don't need to reload anything, but we do need to adjust the item count.
+
+ if cfg!(debug_assertions) {
+ // Double-check that the user isn't lying to us by checking the bucket state.
+ // To do that, we need to find its control byte. We know that self.iter.data is
+ // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
+ let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
+ // This method should be called _before_ a removal, or _after_ an insert,
+ // so in both cases the ctrl byte should indicate that the bucket is full.
+ assert!(is_full(*ctrl));
}
- if self.iter.next_ctrl < self.iter.end
- && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
- {
- // The iterator has not yet reached the bucket's group.
- // We don't need to reload anything, but we do need to adjust the item count.
+ if is_insert {
+ self.items += 1;
+ } else {
+ self.items -= 1;
+ }
- if cfg!(debug_assertions) {
- // Double-check that the user isn't lying to us by checking the bucket state.
- // To do that, we need to find its control byte. We know that self.iter.data is
- // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
- let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
- // This method should be called _before_ a removal, or _after_ an insert,
- // so in both cases the ctrl byte should indicate that the bucket is full.
- assert!(is_full(*ctrl));
- }
+ return;
+ }
+
+ // The iterator is at the bucket group that the toggled bucket is in.
+ // We need to do two things:
+ //
+ // - Determine if the iterator already yielded the toggled bucket.
+ // If it did, we're done.
+ // - Otherwise, update the iterator cached group so that it won't
+ // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
+ // We'll also need to update the item count accordingly.
+ if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
+ let next_bucket = self.iter.data.next_n(index);
+ if b.as_ptr() > next_bucket.as_ptr() {
+ // The toggled bucket is "before" the bucket the iterator would yield next. We
+ // therefore don't need to do anything --- the iterator has already passed the
+ // bucket in question.
+ //
+ // The item count must already be correct, since a removal or insert "prior" to
+ // the iterator's position wouldn't affect the item count.
+ } else {
+ // The removed bucket is an upcoming bucket. We need to make sure it does _not_
+ // get yielded, and also that it's no longer included in the item count.
+ //
+ // NOTE: We can't just reload the group here, both since that might reflect
+ // inserts we've already passed, and because that might inadvertently unset the
+ // bits for _other_ removals. If we do that, we'd have to also decrement the
+ // item count for those other bits that we unset. But the presumably subsequent
+ // call to reflect for those buckets might _also_ decrement the item count.
+ // Instead, we _just_ flip the bit for the particular bucket the caller asked
+ // us to reflect.
+ let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let was_full = self.iter.current_group.flip(our_bit);
+ debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
@@ -2067,65 +4081,23 @@ impl<T> RawIter<T> {
self.items -= 1;
}
- return;
- }
-
- // The iterator is at the bucket group that the toggled bucket is in.
- // We need to do two things:
- //
- // - Determine if the iterator already yielded the toggled bucket.
- // If it did, we're done.
- // - Otherwise, update the iterator cached group so that it won't
- // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
- // We'll also need to update the item count accordingly.
- if let Some(index) = self.iter.current_group.lowest_set_bit() {
- let next_bucket = self.iter.data.next_n(index);
- if b.as_ptr() > next_bucket.as_ptr() {
- // The toggled bucket is "before" the bucket the iterator would yield next. We
- // therefore don't need to do anything --- the iterator has already passed the
- // bucket in question.
- //
- // The item count must already be correct, since a removal or insert "prior" to
- // the iterator's position wouldn't affect the item count.
- } else {
- // The removed bucket is an upcoming bucket. We need to make sure it does _not_
- // get yielded, and also that it's no longer included in the item count.
- //
- // NOTE: We can't just reload the group here, both since that might reflect
- // inserts we've already passed, and because that might inadvertently unset the
- // bits for _other_ removals. If we do that, we'd have to also decrement the
- // item count for those other bits that we unset. But the presumably subsequent
- // call to reflect for those buckets might _also_ decrement the item count.
- // Instead, we _just_ flip the bit for the particular bucket the caller asked
- // us to reflect.
- let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let was_full = self.iter.current_group.flip(our_bit);
- debug_assert_ne!(was_full, is_insert);
-
- if is_insert {
- self.items += 1;
+ if cfg!(debug_assertions) {
+ if b.as_ptr() == next_bucket.as_ptr() {
+ // The removed bucket should no longer be next
+ debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
} else {
- self.items -= 1;
- }
-
- if cfg!(debug_assertions) {
- if b.as_ptr() == next_bucket.as_ptr() {
- // The removed bucket should no longer be next
- debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
- } else {
- // We should not have changed what bucket comes next.
- debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
- }
+ // We should not have changed what bucket comes next.
+ debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
}
}
- } else {
- // We must have already iterated past the removed item.
}
+ } else {
+ // We must have already iterated past the removed item.
}
}
unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && self.len() != 0 {
+ if T::NEEDS_DROP && self.items != 0 {
for item in self {
item.drop();
}
@@ -2159,9 +4131,8 @@ impl<T> Iterator for RawIter<T> {
self.iter.next_impl::<false>()
};
- if nxt.is_some() {
- self.items -= 1;
- }
+ debug_assert!(nxt.is_some());
+ self.items -= 1;
nxt
}
@@ -2170,33 +4141,160 @@ impl<T> Iterator for RawIter<T> {
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
+
+ #[inline]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ unsafe { self.iter.fold_impl(self.items, init, f) }
+ }
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
+/// Iterator which returns an index of every full bucket in the table.
+///
+/// For maximum flexibility this iterator is not bound by a lifetime, but you
+/// must observe several rules when using it:
+/// - You must not free the hash table while iterating (including via growing/shrinking).
+/// - It is fine to erase a bucket that has been yielded by the iterator.
+/// - Erasing a bucket that has not yet been yielded by the iterator may still
+/// result in the iterator yielding index of that bucket.
+/// - It is unspecified whether an element inserted after the iterator was
+/// created will be yielded by that iterator.
+/// - The order in which the iterator yields indices of the buckets is unspecified
+/// and may change in the future.
+pub(crate) struct FullBucketsIndices {
+ // Mask of full buckets in the current group. Bits are cleared from this
+ // mask as each element is processed.
+ current_group: BitMaskIter,
+
+ // Initial value of the bytes' indices of the current group (relative
+ // to the start of the control bytes).
+ group_first_index: usize,
+
+ // Pointer to the current group of control bytes,
+ // Must be aligned to the group size (Group::WIDTH).
+ ctrl: NonNull<u8>,
+
+ // Number of elements in the table.
+ items: usize,
+}
+
+impl FullBucketsIndices {
+ /// Advances the iterator and returns the next value.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
+ /// i.e. table outlives the `FullBucketsIndices`;
+ ///
+ /// * It never tries to iterate after getting all elements.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline(always)]
+ unsafe fn next_impl(&mut self) -> Option<usize> {
+ loop {
+ if let Some(index) = self.current_group.next() {
+ // The returned `self.group_first_index + index` will always
+ // be in the range `0..self.buckets()`. See explanation below.
+ return Some(self.group_first_index + index);
+ }
+
+ // SAFETY: The caller of this function ensures that:
+ //
+ // 1. It never tries to iterate after getting all the elements;
+ // 2. The table is alive and did not moved;
+ // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
+ //
+ // Taking the above into account, we always stay within the bounds, because:
+ //
+ // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
+ // we will never end up in the given branch, since we should have already
+ // yielded all the elements of the table.
+ //
+ // 2. For tables larger than the group width. The number of buckets is a
+ // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
+ // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
+ // the start of the array of control bytes, and never try to iterate after
+ // getting all the elements, the last `self.ctrl` will be equal to
+ // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
+ // will always contains indices within the range `0..Group::WIDTH`,
+ // and subsequent `self.group_first_index + index` will always return a
+ // number less than `self.buckets()`.
+ self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
+
+ // SAFETY: See explanation above.
+ self.current_group = Group::load_aligned(self.ctrl.as_ptr())
+ .match_full()
+ .into_iter();
+ self.group_first_index += Group::WIDTH;
+ }
+ }
+}
+
+impl Iterator for FullBucketsIndices {
+ type Item = usize;
+
+ /// Advances the iterator and returns the next value. It is up to
+ /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
+ /// because we cannot make the `next` method unsafe.
+ #[inline(always)]
+ fn next(&mut self) -> Option<usize> {
+ // Return if we already yielded all items.
+ if self.items == 0 {
+ return None;
+ }
+
+ let nxt = unsafe {
+ // SAFETY:
+ // 1. We check number of items to yield using `items` field.
+ // 2. The caller ensures that the table is alive and has not moved.
+ self.next_impl()
+ };
+
+ debug_assert!(nxt.is_some());
+ self.items -= 1;
+
+ nxt
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.items, Some(self.items))
+ }
+}
+
+impl ExactSizeIterator for FullBucketsIndices {}
+impl FusedIterator for FullBucketsIndices {}
+
/// Iterator which consumes a table and returns elements.
-pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
+pub struct RawIntoIter<T, A: Allocator = Global> {
iter: RawIter<T>,
- allocation: Option<(NonNull<u8>, Layout)>,
+ allocation: Option<(NonNull<u8>, Layout, A)>,
marker: PhantomData<T>,
- alloc: A,
}
-impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
+impl<T, A: Allocator> RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A>
+unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A>
+unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
where
T: Sync,
A: Sync,
@@ -2204,7 +4302,7 @@ where
}
#[cfg(feature = "nightly")]
-unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2212,14 +4310,14 @@ unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
self.iter.drop_elements();
// Free the table
- if let Some((ptr, layout)) = self.allocation {
- self.alloc.deallocate(ptr, layout);
+ if let Some((ptr, layout, ref alloc)) = self.allocation {
+ alloc.deallocate(ptr, layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
-impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
+impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2227,14 +4325,14 @@ impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
self.iter.drop_elements();
// Free the table
- if let Some((ptr, layout)) = self.allocation {
- self.alloc.deallocate(ptr, layout);
+ if let Some((ptr, layout, ref alloc)) = self.allocation {
+ alloc.deallocate(ptr, layout);
}
}
}
}
-impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
+impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -2248,45 +4346,45 @@ impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
}
}
-impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
-impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
+impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
+impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
/// Iterator which consumes elements without freeing the table storage.
-pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
+pub struct RawDrain<'a, T, A: Allocator = Global> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
- table: ManuallyDrop<RawTable<T, A>>,
- orig_table: NonNull<RawTable<T, A>>,
+ table: RawTableInner,
+ orig_table: NonNull<RawTableInner>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T, A>>,
}
-impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
+impl<T, A: Allocator> RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
-unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A>
+unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A>
+unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
where
T: Sync,
A: Sync,
{
}
-impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
+impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2300,12 +4398,12 @@ impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
- .copy_from_nonoverlapping(&*self.table, 1);
+ .copy_from_nonoverlapping(&self.table, 1);
}
}
}
-impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
+impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -2322,21 +4420,36 @@ impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
}
}
-impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
-impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
+impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
+impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
/// Iterator over occupied buckets that could match a given hash.
///
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
/// items that have a hash value different than the one provided. You should
/// always validate the returned values before using them.
-pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
- inner: RawIterHashInner<'a, A>,
+///
+/// For maximum flexibility this iterator is not bound by a lifetime, but you
+/// must observe several rules when using it:
+/// - You must not free the hash table while iterating (including via growing/shrinking).
+/// - It is fine to erase a bucket that has been yielded by the iterator.
+/// - Erasing a bucket that has not yet been yielded by the iterator may still
+/// result in the iterator yielding that bucket.
+/// - It is unspecified whether an element inserted after the iterator was
+/// created will be yielded by that iterator.
+/// - The order in which the iterator yields buckets is unspecified and may
+/// change in the future.
+pub struct RawIterHash<T> {
+ inner: RawIterHashInner,
_marker: PhantomData<T>,
}
-struct RawIterHashInner<'a, A: Allocator + Clone> {
- table: &'a RawTableInner<A>,
+struct RawIterHashInner {
+ // See `RawTableInner`'s corresponding fields for details.
+ // We can't store a `*const RawTableInner` as it would get
+ // invalidated by the user calling `&mut` methods on `RawTable`.
+ bucket_mask: usize,
+ ctrl: NonNull<u8>,
// The top 7 bits of the hash.
h2_hash: u8,
@@ -2350,71 +4463,105 @@ struct RawIterHashInner<'a, A: Allocator + Clone> {
bitmask: BitMaskIter,
}
-impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
+impl<T> RawIterHash<T> {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
+ unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
RawIterHash {
inner: RawIterHashInner::new(&table.table, hash),
_marker: PhantomData,
}
}
}
-impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
+impl RawIterHashInner {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
- unsafe {
- let h2_hash = h2(hash);
- let probe_seq = table.probe_seq(hash);
- let group = Group::load(table.ctrl(probe_seq.pos));
- let bitmask = group.match_byte(h2_hash).into_iter();
-
- RawIterHashInner {
- table,
- h2_hash,
- probe_seq,
- group,
- bitmask,
- }
+ unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
+ let h2_hash = h2(hash);
+ let probe_seq = table.probe_seq(hash);
+ let group = Group::load(table.ctrl(probe_seq.pos));
+ let bitmask = group.match_byte(h2_hash).into_iter();
+
+ RawIterHashInner {
+ bucket_mask: table.bucket_mask,
+ ctrl: table.ctrl,
+ h2_hash,
+ probe_seq,
+ group,
+ bitmask,
}
}
}
-impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
+impl<T> Iterator for RawIterHash<T> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
match self.inner.next() {
- Some(index) => Some(self.inner.table.bucket(index)),
+ Some(index) => {
+ // Can't use `RawTable::bucket` here as we don't have
+ // an actual `RawTable` reference to use.
+ debug_assert!(index <= self.inner.bucket_mask);
+ let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
+ Some(bucket)
+ }
None => None,
}
}
}
}
-impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
+impl Iterator for RawIterHashInner {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
- let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
+ let index = (self.probe_seq.pos + bit) & self.bucket_mask;
return Some(index);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
- self.probe_seq.move_next(self.table.bucket_mask);
- self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
+ self.probe_seq.move_next(self.bucket_mask);
+
+ // Can't use `RawTableInner::ctrl` here as we don't have
+ // an actual `RawTableInner` reference to use.
+ let index = self.probe_seq.pos;
+ debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
+ let group_ctrl = self.ctrl.as_ptr().add(index);
+
+ self.group = Group::load(group_ctrl);
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
}
}
+pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
+ pub iter: RawIter<T>,
+ pub table: &'a mut RawTable<T, A>,
+}
+
+impl<T, A: Allocator> RawExtractIf<'_, T, A> {
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ unsafe {
+ for item in &mut self.iter {
+ if f(item.as_mut()) {
+ return Some(self.table.remove(item).0);
+ }
+ }
+ }
+ None
+ }
+}
+
#[cfg(test)]
mod test_map {
use super::*;
@@ -2457,4 +4604,214 @@ mod test_map {
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
}
}
+
+ /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
+ /// AN UNINITIALIZED TABLE DURING THE DROP
+ #[test]
+ fn test_drop_uninitialized() {
+ use ::alloc::vec::Vec;
+
+ let table = unsafe {
+ // SAFETY: The `buckets` is power of two and we're not
+ // trying to actually use the returned RawTable.
+ RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
+ .unwrap()
+ };
+ drop(table);
+ }
+
+ /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
+ /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
+ #[test]
+ fn test_drop_zero_items() {
+ use ::alloc::vec::Vec;
+ unsafe {
+ // SAFETY: The `buckets` is power of two and we're not
+ // trying to actually use the returned RawTable.
+ let table =
+ RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
+ .unwrap();
+
+ // WE SIMULATE, AS IT WERE, A FULL TABLE.
+
+ // SAFETY: We checked that the table is allocated and therefore the table already has
+ // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
+ // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
+ table
+ .table
+ .ctrl(0)
+ .write_bytes(EMPTY, table.table.num_ctrl_bytes());
+
+ // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
+ table.table.ctrl(0).write_bytes(0, table.capacity());
+
+ // Fix up the trailing control bytes. See the comments in set_ctrl
+ // for the handling of tables smaller than the group width.
+ if table.buckets() < Group::WIDTH {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
+ // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
+ // `Group::WIDTH` is safe
+ table
+ .table
+ .ctrl(0)
+ .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
+ } else {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
+ // control bytes,so copying `Group::WIDTH` bytes with offset equal
+ // to `self.buckets() == self.bucket_mask + 1` is safe
+ table
+ .table
+ .ctrl(0)
+ .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
+ }
+ drop(table);
+ }
+ }
+
+ /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
+ /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
+ #[test]
+ fn test_catch_panic_clone_from() {
+ use ::alloc::sync::Arc;
+ use ::alloc::vec::Vec;
+ use allocator_api2::alloc::{AllocError, Allocator, Global};
+ use core::sync::atomic::{AtomicI8, Ordering};
+ use std::thread;
+
+ struct MyAllocInner {
+ drop_count: Arc<AtomicI8>,
+ }
+
+ #[derive(Clone)]
+ struct MyAlloc {
+ _inner: Arc<MyAllocInner>,
+ }
+
+ impl Drop for MyAllocInner {
+ fn drop(&mut self) {
+ println!("MyAlloc freed.");
+ self.drop_count.fetch_sub(1, Ordering::SeqCst);
+ }
+ }
+
+ unsafe impl Allocator for MyAlloc {
+ fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
+ let g = Global;
+ g.allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ let g = Global;
+ g.deallocate(ptr, layout)
+ }
+ }
+
+ const DISARMED: bool = false;
+ const ARMED: bool = true;
+
+ struct CheckedCloneDrop {
+ panic_in_clone: bool,
+ dropped: bool,
+ need_drop: Vec<u64>,
+ }
+
+ impl Clone for CheckedCloneDrop {
+ fn clone(&self) -> Self {
+ if self.panic_in_clone {
+ panic!("panic in clone")
+ }
+ Self {
+ panic_in_clone: self.panic_in_clone,
+ dropped: self.dropped,
+ need_drop: self.need_drop.clone(),
+ }
+ }
+ }
+
+ impl Drop for CheckedCloneDrop {
+ fn drop(&mut self) {
+ if self.dropped {
+ panic!("double drop");
+ }
+ self.dropped = true;
+ }
+ }
+
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ let mut table = RawTable::new_in(MyAlloc {
+ _inner: Arc::new(MyAllocInner {
+ drop_count: dropped.clone(),
+ }),
+ });
+
+ for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
+ let idx = idx as u64;
+ table.insert(
+ idx,
+ (
+ idx,
+ CheckedCloneDrop {
+ panic_in_clone,
+ dropped: false,
+ need_drop: vec![idx],
+ },
+ ),
+ |(k, _)| *k,
+ );
+ }
+
+ assert_eq!(table.len(), 7);
+
+ thread::scope(|s| {
+ let result = s.spawn(|| {
+ let armed_flags = [
+ DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
+ ];
+ let mut scope_table = RawTable::new_in(MyAlloc {
+ _inner: Arc::new(MyAllocInner {
+ drop_count: dropped.clone(),
+ }),
+ });
+ for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
+ let idx = idx as u64;
+ scope_table.insert(
+ idx,
+ (
+ idx,
+ CheckedCloneDrop {
+ panic_in_clone,
+ dropped: false,
+ need_drop: vec![idx + 100],
+ },
+ ),
+ |(k, _)| *k,
+ );
+ }
+ table.clone_from(&scope_table);
+ });
+ assert!(result.join().is_err());
+ });
+
+ // Let's check that all iterators work fine and do not return elements
+ // (especially `RawIterRange`, which does not depend on the number of
+ // elements in the table, but looks directly at the control bytes)
+ //
+ // SAFETY: We know for sure that `RawTable` will outlive
+ // the returned `RawIter / RawIterRange` iterator.
+ assert_eq!(table.len(), 0);
+ assert_eq!(unsafe { table.iter().count() }, 0);
+ assert_eq!(unsafe { table.iter().iter.count() }, 0);
+
+ for idx in 0..table.buckets() {
+ let idx = idx as u64;
+ assert!(
+ table.find(idx, |(k, _)| *k == idx).is_none(),
+ "Index: {idx}"
+ );
+ }
+
+ // All allocator clones should already be dropped.
+ assert_eq!(dropped.load(Ordering::SeqCst), 1);
+ }
}
diff --git a/third_party/rust/hashbrown/src/raw/neon.rs b/third_party/rust/hashbrown/src/raw/neon.rs
new file mode 100644
index 0000000000..44e82d57d5
--- /dev/null
+++ b/third_party/rust/hashbrown/src/raw/neon.rs
@@ -0,0 +1,124 @@
+use super::bitmask::BitMask;
+use super::EMPTY;
+use core::arch::aarch64 as neon;
+use core::mem;
+use core::num::NonZeroU64;
+
+pub(crate) type BitMaskWord = u64;
+pub(crate) type NonZeroBitMaskWord = NonZeroU64;
+pub(crate) const BITMASK_STRIDE: usize = 8;
+pub(crate) const BITMASK_MASK: BitMaskWord = !0;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080;
+
+/// Abstraction over a group of control bytes which can be scanned in
+/// parallel.
+///
+/// This implementation uses a 64-bit NEON value.
+#[derive(Copy, Clone)]
+pub(crate) struct Group(neon::uint8x8_t);
+
+#[allow(clippy::use_self)]
+impl Group {
+ /// Number of bytes in the group.
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
+
+ /// Returns a full group of empty bytes, suitable for use as the initial
+ /// value for an empty hash table.
+ ///
+ /// This is guaranteed to be aligned to the group size.
+ #[inline]
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ #[repr(C)]
+ struct AlignedBytes {
+ _align: [Group; 0],
+ bytes: [u8; Group::WIDTH],
+ }
+ const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
+ _align: [],
+ bytes: [EMPTY; Group::WIDTH],
+ };
+ &ALIGNED_BYTES.bytes
+ }
+
+ /// Loads a group of bytes starting at the given address.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)] // unaligned load
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Loads a group of bytes starting at the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Stores the group of bytes to the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ neon::vst1_u8(ptr, self.0);
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which *may*
+ /// have the given value.
+ #[inline]
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
+ unsafe {
+ let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY`.
+ #[inline]
+ pub(crate) fn match_empty(self) -> BitMask {
+ self.match_byte(EMPTY)
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY` or `DELETED`.
+ #[inline]
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are full.
+ #[inline]
+ pub(crate) fn match_full(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Performs the following transformation on all bytes in the group:
+ /// - `EMPTY => EMPTY`
+ /// - `DELETED => EMPTY`
+ /// - `FULL => DELETED`
+ #[inline]
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
+ // and high_bit = 0 (FULL) to 1000_0000
+ //
+ // Here's this logic expanded to concrete values:
+ // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
+ // 1111_1111 | 1000_0000 = 1111_1111
+ // 0000_0000 | 1000_0000 = 1000_0000
+ unsafe {
+ let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80)))
+ }
+ }
+}
diff --git a/third_party/rust/hashbrown/src/raw/sse2.rs b/third_party/rust/hashbrown/src/raw/sse2.rs
index a0bf6da804..956ba5d265 100644
--- a/third_party/rust/hashbrown/src/raw/sse2.rs
+++ b/third_party/rust/hashbrown/src/raw/sse2.rs
@@ -1,28 +1,31 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
+use core::num::NonZeroU16;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
-pub type BitMaskWord = u16;
-pub const BITMASK_STRIDE: usize = 1;
-pub const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) type BitMaskWord = u16;
+pub(crate) type NonZeroBitMaskWord = NonZeroU16;
+pub(crate) const BITMASK_STRIDE: usize = 1;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
-pub struct Group(x86::__m128i);
+pub(crate) struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
@@ -30,7 +33,7 @@ impl Group {
/// This is guaranteed to be aligned to the group size.
#[inline]
#[allow(clippy::items_after_statements)]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -46,7 +49,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(x86::_mm_loadu_si128(ptr.cast()))
}
@@ -54,7 +57,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(x86::_mm_load_si128(ptr.cast()))
@@ -64,7 +67,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
x86::_mm_store_si128(ptr.cast(), self.0);
@@ -73,7 +76,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // byte: u8 as i8
// byte: i32 as u16
@@ -91,14 +94,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(&self) -> BitMask {
+ pub(crate) fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -123,7 +126,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//