diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 12:47:55 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 12:47:55 +0000 |
commit | 2aadc03ef15cb5ca5cc2af8a7c08e070742f0ac4 (patch) | |
tree | 033cc839730fda84ff08db877037977be94e5e3a /vendor/hashbrown/src/raw/bitmask.rs | |
parent | Initial commit. (diff) | |
download | cargo-upstream.tar.xz cargo-upstream.zip |
Adding upstream version 0.70.1+ds1.upstream/0.70.1+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/hashbrown/src/raw/bitmask.rs')
-rw-r--r-- | vendor/hashbrown/src/raw/bitmask.rs | 133 |
1 files changed, 133 insertions, 0 deletions
diff --git a/vendor/hashbrown/src/raw/bitmask.rs b/vendor/hashbrown/src/raw/bitmask.rs new file mode 100644 index 0000000..6576b3c --- /dev/null +++ b/vendor/hashbrown/src/raw/bitmask.rs @@ -0,0 +1,133 @@ +use super::imp::{ + BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, +}; + +/// A bit mask which contains the result of a `Match` operation on a `Group` and +/// allows iterating through them. +/// +/// The bit mask is arranged so that low-order bits represent lower memory +/// addresses for group match results. +/// +/// For implementation reasons, the bits in the set may be sparsely packed with +/// groups of 8 bits representing one element. If any of these bits are non-zero +/// then this element is considered to true in the mask. If this is the +/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be +/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is +/// similarly a mask of all the actually-used bits. +/// +/// To iterate over a bit mask, it must be converted to a form where only 1 bit +/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the +/// mask bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMask(pub(crate) BitMaskWord); + +#[allow(clippy::use_self)] +impl BitMask { + /// Returns a new `BitMask` with all bits inverted. + #[inline] + #[must_use] + #[allow(dead_code)] + pub(crate) fn invert(self) -> Self { + BitMask(self.0 ^ BITMASK_MASK) + } + + /// Returns a new `BitMask` with the lowest bit removed. + #[inline] + #[must_use] + fn remove_lowest_bit(self) -> Self { + BitMask(self.0 & (self.0 - 1)) + } + + /// Returns whether the `BitMask` has at least one set bit. + #[inline] + pub(crate) fn any_bit_set(self) -> bool { + self.0 != 0 + } + + /// Returns the first set bit in the `BitMask`, if there is one. + #[inline] + pub(crate) fn lowest_set_bit(self) -> Option<usize> { + if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) { + Some(Self::nonzero_trailing_zeros(nonzero)) + } else { + None + } + } + + /// Returns the number of trailing zeroes in the `BitMask`. + #[inline] + pub(crate) fn trailing_zeros(self) -> usize { + // ARM doesn't have a trailing_zeroes instruction, and instead uses + // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM + // versions (pre-ARMv7) don't have RBIT and need to emulate it + // instead. Since we only have 1 bit set in each byte on ARM, we can + // use swap_bytes (REV) + leading_zeroes instead. + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE + } else { + self.0.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Same as above but takes a `NonZeroBitMaskWord`. + #[inline] + fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize { + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + // SAFETY: A byte-swapped non-zero value is still non-zero. + let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) }; + swapped.leading_zeros() as usize / BITMASK_STRIDE + } else { + nonzero.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Returns the number of leading zeroes in the `BitMask`. + #[inline] + pub(crate) fn leading_zeros(self) -> usize { + self.0.leading_zeros() as usize / BITMASK_STRIDE + } +} + +impl IntoIterator for BitMask { + type Item = usize; + type IntoIter = BitMaskIter; + + #[inline] + fn into_iter(self) -> BitMaskIter { + // A BitMask only requires each element (group of bits) to be non-zero. + // However for iteration we need each element to only contain 1 bit. + BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK)) + } +} + +/// Iterator over the contents of a `BitMask`, returning the indices of set +/// bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMaskIter(pub(crate) BitMask); + +impl BitMaskIter { + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub(crate) unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 .0 ^= mask; + // The bit was set if the bit is now 0. + self.0 .0 & mask == 0 + } +} + +impl Iterator for BitMaskIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option<usize> { + let bit = self.0.lowest_set_bit()?; + self.0 = self.0.remove_lowest_bit(); + Some(bit) + } +} |