summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_index
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_index')
-rw-r--r--compiler/rustc_index/Cargo.toml13
-rw-r--r--compiler/rustc_index/src/bit_set.rs2098
-rw-r--r--compiler/rustc_index/src/bit_set/tests.rs873
-rw-r--r--compiler/rustc_index/src/interval.rs305
-rw-r--r--compiler/rustc_index/src/interval/tests.rs199
-rw-r--r--compiler/rustc_index/src/lib.rs23
-rw-r--r--compiler/rustc_index/src/vec.rs409
-rw-r--r--compiler/rustc_index/src/vec/tests.rs55
8 files changed, 3975 insertions, 0 deletions
diff --git a/compiler/rustc_index/Cargo.toml b/compiler/rustc_index/Cargo.toml
new file mode 100644
index 000000000..8a81a93a9
--- /dev/null
+++ b/compiler/rustc_index/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+name = "rustc_index"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+arrayvec = { version = "0.7", default-features = false }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
+smallvec = "1.8.1"
diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs
new file mode 100644
index 000000000..777112442
--- /dev/null
+++ b/compiler/rustc_index/src/bit_set.rs
@@ -0,0 +1,2098 @@
+use crate::vec::{Idx, IndexVec};
+use arrayvec::ArrayVec;
+use std::fmt;
+use std::iter;
+use std::marker::PhantomData;
+use std::mem;
+use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
+use std::rc::Rc;
+use std::slice;
+
+use rustc_macros::{Decodable, Encodable};
+
+use Chunk::*;
+
+#[cfg(test)]
+mod tests;
+
+type Word = u64;
+const WORD_BYTES: usize = mem::size_of::<Word>();
+const WORD_BITS: usize = WORD_BYTES * 8;
+
+// The choice of chunk size has some trade-offs.
+//
+// A big chunk size tends to favour cases where many large `ChunkedBitSet`s are
+// present, because they require fewer `Chunk`s, reducing the number of
+// allocations and reducing peak memory usage. Also, fewer chunk operations are
+// required, though more of them might be `Mixed`.
+//
+// A small chunk size tends to favour cases where many small `ChunkedBitSet`s
+// are present, because less space is wasted at the end of the final chunk (if
+// it's not full).
+const CHUNK_WORDS: usize = 32;
+const CHUNK_BITS: usize = CHUNK_WORDS * WORD_BITS; // 2048 bits
+
+/// ChunkSize is small to keep `Chunk` small. The static assertion ensures it's
+/// not too small.
+type ChunkSize = u16;
+const _: () = assert!(CHUNK_BITS <= ChunkSize::MAX as usize);
+
+pub trait BitRelations<Rhs> {
+ fn union(&mut self, other: &Rhs) -> bool;
+ fn subtract(&mut self, other: &Rhs) -> bool;
+ fn intersect(&mut self, other: &Rhs) -> bool;
+}
+
+#[inline]
+fn inclusive_start_end<T: Idx>(
+ range: impl RangeBounds<T>,
+ domain: usize,
+) -> Option<(usize, usize)> {
+ // Both start and end are inclusive.
+ let start = match range.start_bound().cloned() {
+ Bound::Included(start) => start.index(),
+ Bound::Excluded(start) => start.index() + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match range.end_bound().cloned() {
+ Bound::Included(end) => end.index(),
+ Bound::Excluded(end) => end.index().checked_sub(1)?,
+ Bound::Unbounded => domain - 1,
+ };
+ assert!(end < domain);
+ if start > end {
+ return None;
+ }
+ Some((start, end))
+}
+
+macro_rules! bit_relations_inherent_impls {
+ () => {
+ /// Sets `self = self | other` and returns `true` if `self` changed
+ /// (i.e., if new bits were added).
+ pub fn union<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::union(self, other)
+ }
+
+ /// Sets `self = self - other` and returns `true` if `self` changed.
+ /// (i.e., if any bits were removed).
+ pub fn subtract<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::subtract(self, other)
+ }
+
+ /// Sets `self = self & other` and return `true` if `self` changed.
+ /// (i.e., if any bits were removed).
+ pub fn intersect<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::intersect(self, other)
+ }
+ };
+}
+
+/// A fixed-size bitset type with a dense representation.
+///
+/// NOTE: Use [`GrowableBitSet`] if you need support for resizing after creation.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+///
+#[derive(Eq, PartialEq, Hash, Decodable, Encodable)]
+pub struct BitSet<T> {
+ domain_size: usize,
+ words: Vec<Word>,
+ marker: PhantomData<T>,
+}
+
+impl<T> BitSet<T> {
+ /// Gets the domain size.
+ pub fn domain_size(&self) -> usize {
+ self.domain_size
+ }
+}
+
+impl<T: Idx> BitSet<T> {
+ /// Creates a new, empty bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_empty(domain_size: usize) -> BitSet<T> {
+ let num_words = num_words(domain_size);
+ BitSet { domain_size, words: vec![0; num_words], marker: PhantomData }
+ }
+
+ /// Creates a new, filled bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_filled(domain_size: usize) -> BitSet<T> {
+ let num_words = num_words(domain_size);
+ let mut result = BitSet { domain_size, words: vec![!0; num_words], marker: PhantomData };
+ result.clear_excess_bits();
+ result
+ }
+
+ /// Clear all elements.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.words.fill(0);
+ }
+
+ /// Clear excess bits in the final word.
+ fn clear_excess_bits(&mut self) {
+ clear_excess_bits_in_final_word(self.domain_size, &mut self.words);
+ }
+
+ /// Count the number of set bits in the set.
+ pub fn count(&self) -> usize {
+ self.words.iter().map(|e| e.count_ones() as usize).sum()
+ }
+
+ /// Returns `true` if `self` contains `elem`.
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ (self.words[word_index] & mask) != 0
+ }
+
+ /// Is `self` is a (non-strict) superset of `other`?
+ #[inline]
+ pub fn superset(&self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ self.words.iter().zip(&other.words).all(|(a, b)| (a & b) == *b)
+ }
+
+ /// Is the set empty?
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.words.iter().all(|a| *a == 0)
+ }
+
+ /// Insert `elem`. Returns whether the set has changed.
+ #[inline]
+ pub fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ let word_ref = &mut self.words[word_index];
+ let word = *word_ref;
+ let new_word = word | mask;
+ *word_ref = new_word;
+ new_word != word
+ }
+
+ #[inline]
+ pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
+ let Some((start, end)) = inclusive_start_end(elems, self.domain_size) else {
+ return;
+ };
+
+ let (start_word_index, start_mask) = word_index_and_mask(start);
+ let (end_word_index, end_mask) = word_index_and_mask(end);
+
+ // Set all words in between start and end (exclusively of both).
+ for word_index in (start_word_index + 1)..end_word_index {
+ self.words[word_index] = !0;
+ }
+
+ if start_word_index != end_word_index {
+ // Start and end are in different words, so we handle each in turn.
+ //
+ // We set all leading bits. This includes the start_mask bit.
+ self.words[start_word_index] |= !(start_mask - 1);
+ // And all trailing bits (i.e. from 0..=end) in the end word,
+ // including the end.
+ self.words[end_word_index] |= end_mask | end_mask - 1;
+ } else {
+ self.words[start_word_index] |= end_mask | (end_mask - start_mask);
+ }
+ }
+
+ /// Sets all bits to true.
+ pub fn insert_all(&mut self) {
+ self.words.fill(!0);
+ self.clear_excess_bits();
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ let word_ref = &mut self.words[word_index];
+ let word = *word_ref;
+ let new_word = word & !mask;
+ *word_ref = new_word;
+ new_word != word
+ }
+
+ /// Gets a slice of the underlying words.
+ pub fn words(&self) -> &[Word] {
+ &self.words
+ }
+
+ /// Iterates over the indices of set bits in a sorted order.
+ #[inline]
+ pub fn iter(&self) -> BitIter<'_, T> {
+ BitIter::new(&self.words)
+ }
+
+ /// Duplicates the set as a hybrid set.
+ pub fn to_hybrid(&self) -> HybridBitSet<T> {
+ // Note: we currently don't bother trying to make a Sparse set.
+ HybridBitSet::Dense(self.to_owned())
+ }
+
+ /// Set `self = self | other`. In contrast to `union` returns `true` if the set contains at
+ /// least one bit that is not in `other` (i.e. `other` is not a superset of `self`).
+ ///
+ /// This is an optimization for union of a hybrid bitset.
+ fn reverse_union_sparse(&mut self, sparse: &SparseBitSet<T>) -> bool {
+ assert!(sparse.domain_size == self.domain_size);
+ self.clear_excess_bits();
+
+ let mut not_already = false;
+ // Index of the current word not yet merged.
+ let mut current_index = 0;
+ // Mask of bits that came from the sparse set in the current word.
+ let mut new_bit_mask = 0;
+ for (word_index, mask) in sparse.iter().map(|x| word_index_and_mask(*x)) {
+ // Next bit is in a word not inspected yet.
+ if word_index > current_index {
+ self.words[current_index] |= new_bit_mask;
+ // Were there any bits in the old word that did not occur in the sparse set?
+ not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
+ // Check all words we skipped for any set bit.
+ not_already |= self.words[current_index + 1..word_index].iter().any(|&x| x != 0);
+ // Update next word.
+ current_index = word_index;
+ // Reset bit mask, no bits have been merged yet.
+ new_bit_mask = 0;
+ }
+ // Add bit and mark it as coming from the sparse set.
+ // self.words[word_index] |= mask;
+ new_bit_mask |= mask;
+ }
+ self.words[current_index] |= new_bit_mask;
+ // Any bits in the last inspected word that were not in the sparse set?
+ not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
+ // Any bits in the tail? Note `clear_excess_bits` before.
+ not_already |= self.words[current_index + 1..].iter().any(|&x| x != 0);
+
+ not_already
+ }
+
+ fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
+ let (start, end) = inclusive_start_end(range, self.domain_size)?;
+ let (start_word_index, _) = word_index_and_mask(start);
+ let (end_word_index, end_mask) = word_index_and_mask(end);
+
+ let end_word = self.words[end_word_index] & (end_mask | (end_mask - 1));
+ if end_word != 0 {
+ let pos = max_bit(end_word) + WORD_BITS * end_word_index;
+ if start <= pos {
+ return Some(T::new(pos));
+ }
+ }
+
+ // We exclude end_word_index from the range here, because we don't want
+ // to limit ourselves to *just* the last word: the bits set it in may be
+ // after `end`, so it may not work out.
+ if let Some(offset) =
+ self.words[start_word_index..end_word_index].iter().rposition(|&w| w != 0)
+ {
+ let word_idx = start_word_index + offset;
+ let start_word = self.words[word_idx];
+ let pos = max_bit(start_word) + WORD_BITS * word_idx;
+ if start <= pos {
+ return Some(T::new(pos));
+ }
+ }
+
+ None
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+// dense REL dense
+impl<T: Idx> BitRelations<BitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a | b)
+ }
+
+ fn subtract(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a & !b)
+ }
+
+ fn intersect(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a & b)
+ }
+}
+
+impl<T: Idx> From<GrowableBitSet<T>> for BitSet<T> {
+ fn from(bit_set: GrowableBitSet<T>) -> Self {
+ bit_set.bit_set
+ }
+}
+
+/// A fixed-size bitset type with a partially dense, partially sparse
+/// representation. The bitset is broken into chunks, and chunks that are all
+/// zeros or all ones are represented and handled very efficiently.
+///
+/// This type is especially efficient for sets that typically have a large
+/// `domain_size` with significant stretches of all zeros or all ones, and also
+/// some stretches with lots of 0s and 1s mixed in a way that causes trouble
+/// for `IntervalSet`.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+#[derive(Debug, PartialEq, Eq)]
+pub struct ChunkedBitSet<T> {
+ domain_size: usize,
+
+ /// The chunks. Each one contains exactly CHUNK_BITS values, except the
+ /// last one which contains 1..=CHUNK_BITS values.
+ chunks: Box<[Chunk]>,
+
+ marker: PhantomData<T>,
+}
+
+// Note: the chunk domain size is duplicated in each variant. This is a bit
+// inconvenient, but it allows the type size to be smaller than if we had an
+// outer struct containing a chunk domain size plus the `Chunk`, because the
+// compiler can place the chunk domain size after the tag.
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum Chunk {
+ /// A chunk that is all zeros; we don't represent the zeros explicitly.
+ Zeros(ChunkSize),
+
+ /// A chunk that is all ones; we don't represent the ones explicitly.
+ Ones(ChunkSize),
+
+ /// A chunk that has a mix of zeros and ones, which are represented
+ /// explicitly and densely. It never has all zeros or all ones.
+ ///
+ /// If this is the final chunk there may be excess, unused words. This
+ /// turns out to be both simpler and have better performance than
+ /// allocating the minimum number of words, largely because we avoid having
+ /// to store the length, which would make this type larger. These excess
+ /// words are always be zero, as are any excess bits in the final in-use
+ /// word.
+ ///
+ /// The second field is the count of 1s set in the chunk, and must satisfy
+ /// `0 < count < chunk_domain_size`.
+ ///
+ /// The words are within an `Rc` because it's surprisingly common to
+ /// duplicate an entire chunk, e.g. in `ChunkedBitSet::clone_from()`, or
+ /// when a `Mixed` chunk is union'd into a `Zeros` chunk. When we do need
+ /// to modify a chunk we use `Rc::make_mut`.
+ Mixed(ChunkSize, ChunkSize, Rc<[Word; CHUNK_WORDS]>),
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+crate::static_assert_size!(Chunk, 16);
+
+impl<T> ChunkedBitSet<T> {
+ pub fn domain_size(&self) -> usize {
+ self.domain_size
+ }
+
+ #[cfg(test)]
+ fn assert_valid(&self) {
+ if self.domain_size == 0 {
+ assert!(self.chunks.is_empty());
+ return;
+ }
+
+ assert!((self.chunks.len() - 1) * CHUNK_BITS <= self.domain_size);
+ assert!(self.chunks.len() * CHUNK_BITS >= self.domain_size);
+ for chunk in self.chunks.iter() {
+ chunk.assert_valid();
+ }
+ }
+}
+
+impl<T: Idx> ChunkedBitSet<T> {
+ /// Creates a new bitset with a given `domain_size` and chunk kind.
+ fn new(domain_size: usize, is_empty: bool) -> Self {
+ let chunks = if domain_size == 0 {
+ Box::new([])
+ } else {
+ // All the chunks have a chunk_domain_size of `CHUNK_BITS` except
+ // the final one.
+ let final_chunk_domain_size = {
+ let n = domain_size % CHUNK_BITS;
+ if n == 0 { CHUNK_BITS } else { n }
+ };
+ let mut chunks =
+ vec![Chunk::new(CHUNK_BITS, is_empty); num_chunks(domain_size)].into_boxed_slice();
+ *chunks.last_mut().unwrap() = Chunk::new(final_chunk_domain_size, is_empty);
+ chunks
+ };
+ ChunkedBitSet { domain_size, chunks, marker: PhantomData }
+ }
+
+ /// Creates a new, empty bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_empty(domain_size: usize) -> Self {
+ ChunkedBitSet::new(domain_size, /* is_empty */ true)
+ }
+
+ /// Creates a new, filled bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_filled(domain_size: usize) -> Self {
+ ChunkedBitSet::new(domain_size, /* is_empty */ false)
+ }
+
+ #[cfg(test)]
+ fn chunks(&self) -> &[Chunk] {
+ &self.chunks
+ }
+
+ /// Count the number of bits in the set.
+ pub fn count(&self) -> usize {
+ self.chunks.iter().map(|chunk| chunk.count()).sum()
+ }
+
+ /// Returns `true` if `self` contains `elem`.
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk = &self.chunks[chunk_index(elem)];
+ match &chunk {
+ Zeros(_) => false,
+ Ones(_) => true,
+ Mixed(_, _, words) => {
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ (words[word_index] & mask) != 0
+ }
+ }
+ }
+
+ #[inline]
+ pub fn iter(&self) -> ChunkedBitIter<'_, T> {
+ ChunkedBitIter::new(self)
+ }
+
+ /// Insert `elem`. Returns whether the set has changed.
+ pub fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk_index = chunk_index(elem);
+ let chunk = &mut self.chunks[chunk_index];
+ match *chunk {
+ Zeros(chunk_domain_size) => {
+ if chunk_domain_size > 1 {
+ // We take some effort to avoid copying the words.
+ let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
+ // SAFETY: `words` can safely be all zeroes.
+ let mut words = unsafe { words.assume_init() };
+ let words_ref = Rc::get_mut(&mut words).unwrap();
+
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ words_ref[word_index] |= mask;
+ *chunk = Mixed(chunk_domain_size, 1, words);
+ } else {
+ *chunk = Ones(chunk_domain_size);
+ }
+ true
+ }
+ Ones(_) => false,
+ Mixed(chunk_domain_size, ref mut count, ref mut words) => {
+ // We skip all the work if the bit is already set.
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) == 0 {
+ *count += 1;
+ if *count < chunk_domain_size {
+ let words = Rc::make_mut(words);
+ words[word_index] |= mask;
+ } else {
+ *chunk = Ones(chunk_domain_size);
+ }
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
+
+ /// Sets all bits to true.
+ pub fn insert_all(&mut self) {
+ for chunk in self.chunks.iter_mut() {
+ *chunk = match *chunk {
+ Zeros(chunk_domain_size)
+ | Ones(chunk_domain_size)
+ | Mixed(chunk_domain_size, ..) => Ones(chunk_domain_size),
+ }
+ }
+ }
+
+ /// Returns `true` if the set has changed.
+ pub fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk_index = chunk_index(elem);
+ let chunk = &mut self.chunks[chunk_index];
+ match *chunk {
+ Zeros(_) => false,
+ Ones(chunk_domain_size) => {
+ if chunk_domain_size > 1 {
+ // We take some effort to avoid copying the words.
+ let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
+ // SAFETY: `words` can safely be all zeroes.
+ let mut words = unsafe { words.assume_init() };
+ let words_ref = Rc::get_mut(&mut words).unwrap();
+
+ // Set only the bits in use.
+ let num_words = num_words(chunk_domain_size as usize);
+ words_ref[..num_words].fill(!0);
+ clear_excess_bits_in_final_word(
+ chunk_domain_size as usize,
+ &mut words_ref[..num_words],
+ );
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ words_ref[word_index] &= !mask;
+ *chunk = Mixed(chunk_domain_size, chunk_domain_size - 1, words);
+ } else {
+ *chunk = Zeros(chunk_domain_size);
+ }
+ true
+ }
+ Mixed(chunk_domain_size, ref mut count, ref mut words) => {
+ // We skip all the work if the bit is already clear.
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) != 0 {
+ *count -= 1;
+ if *count > 0 {
+ let words = Rc::make_mut(words);
+ words[word_index] &= !mask;
+ } else {
+ *chunk = Zeros(chunk_domain_size);
+ }
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
+ fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ debug_assert_eq!(self.chunks.len(), other.chunks.len());
+
+ let mut changed = false;
+ for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
+ match (&mut self_chunk, &other_chunk) {
+ (_, Zeros(_)) | (Ones(_), _) => {}
+ (Zeros(self_chunk_domain_size), Ones(other_chunk_domain_size))
+ | (Mixed(self_chunk_domain_size, ..), Ones(other_chunk_domain_size))
+ | (Zeros(self_chunk_domain_size), Mixed(other_chunk_domain_size, ..)) => {
+ // `other_chunk` fully overwrites `self_chunk`
+ debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
+ *self_chunk = other_chunk.clone();
+ changed = true;
+ }
+ (
+ Mixed(
+ self_chunk_domain_size,
+ ref mut self_chunk_count,
+ ref mut self_chunk_words,
+ ),
+ Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
+ ) => {
+ // First check if the operation would change
+ // `self_chunk.words`. If not, we can avoid allocating some
+ // words, and this happens often enough that it's a
+ // performance win. Also, we only need to operate on the
+ // in-use words, hence the slicing.
+ let op = |a, b| a | b;
+ let num_words = num_words(*self_chunk_domain_size as usize);
+ if bitwise_changes(
+ &self_chunk_words[0..num_words],
+ &other_chunk_words[0..num_words],
+ op,
+ ) {
+ let self_chunk_words = Rc::make_mut(self_chunk_words);
+ let has_changed = bitwise(
+ &mut self_chunk_words[0..num_words],
+ &other_chunk_words[0..num_words],
+ op,
+ );
+ debug_assert!(has_changed);
+ *self_chunk_count = self_chunk_words[0..num_words]
+ .iter()
+ .map(|w| w.count_ones() as ChunkSize)
+ .sum();
+ if *self_chunk_count == *self_chunk_domain_size {
+ *self_chunk = Ones(*self_chunk_domain_size);
+ }
+ changed = true;
+ }
+ }
+ }
+ }
+ changed
+ }
+
+ fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+
+ fn intersect(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+}
+
+impl<T: Idx> BitRelations<HybridBitSet<T>> for ChunkedBitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ // FIXME: This is slow if `other` is dense, but it hasn't been a problem
+ // in practice so far.
+ // If a faster implementation of this operation is required, consider
+ // reopening https://github.com/rust-lang/rust/pull/94625
+ assert_eq!(self.domain_size, other.domain_size());
+ sequential_update(|elem| self.insert(elem), other.iter())
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ // FIXME: This is slow if `other` is dense, but it hasn't been a problem
+ // in practice so far.
+ // If a faster implementation of this operation is required, consider
+ // reopening https://github.com/rust-lang/rust/pull/94625
+ assert_eq!(self.domain_size, other.domain_size());
+ sequential_update(|elem| self.remove(elem), other.iter())
+ }
+
+ fn intersect(&mut self, _other: &HybridBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+}
+
+impl<T: Idx> BitRelations<ChunkedBitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ sequential_update(|elem| self.insert(elem), other.iter())
+ }
+
+ fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+
+ fn intersect(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ let mut changed = false;
+ for (i, chunk) in other.chunks.iter().enumerate() {
+ let mut words = &mut self.words[i * CHUNK_WORDS..];
+ if words.len() > CHUNK_WORDS {
+ words = &mut words[..CHUNK_WORDS];
+ }
+ match chunk {
+ Chunk::Zeros(..) => {
+ for word in words {
+ if *word != 0 {
+ changed = true;
+ *word = 0;
+ }
+ }
+ }
+ Chunk::Ones(..) => (),
+ Chunk::Mixed(_, _, data) => {
+ for (i, word) in words.iter_mut().enumerate() {
+ let new_val = *word & data[i];
+ if new_val != *word {
+ changed = true;
+ *word = new_val;
+ }
+ }
+ }
+ }
+ }
+ changed
+ }
+}
+
+impl<T> Clone for ChunkedBitSet<T> {
+ fn clone(&self) -> Self {
+ ChunkedBitSet {
+ domain_size: self.domain_size,
+ chunks: self.chunks.clone(),
+ marker: PhantomData,
+ }
+ }
+
+ /// WARNING: this implementation of clone_from will panic if the two
+ /// bitsets have different domain sizes. This constraint is not inherent to
+ /// `clone_from`, but it works with the existing call sites and allows a
+ /// faster implementation, which is important because this function is hot.
+ fn clone_from(&mut self, from: &Self) {
+ assert_eq!(self.domain_size, from.domain_size);
+ debug_assert_eq!(self.chunks.len(), from.chunks.len());
+
+ self.chunks.clone_from(&from.chunks)
+ }
+}
+
+pub struct ChunkedBitIter<'a, T: Idx> {
+ index: usize,
+ bitset: &'a ChunkedBitSet<T>,
+}
+
+impl<'a, T: Idx> ChunkedBitIter<'a, T> {
+ #[inline]
+ fn new(bitset: &'a ChunkedBitSet<T>) -> ChunkedBitIter<'a, T> {
+ ChunkedBitIter { index: 0, bitset }
+ }
+}
+
+impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ while self.index < self.bitset.domain_size() {
+ let elem = T::new(self.index);
+ let chunk = &self.bitset.chunks[chunk_index(elem)];
+ match &chunk {
+ Zeros(chunk_domain_size) => {
+ self.index += *chunk_domain_size as usize;
+ }
+ Ones(_chunk_domain_size) => {
+ self.index += 1;
+ return Some(elem);
+ }
+ Mixed(_chunk_domain_size, _, words) => loop {
+ let elem = T::new(self.index);
+ self.index += 1;
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) != 0 {
+ return Some(elem);
+ }
+ if self.index % CHUNK_BITS == 0 {
+ break;
+ }
+ },
+ }
+ }
+ None
+ }
+
+ fn fold<B, F>(mut self, mut init: B, mut f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ // If `next` has already been called, we may not be at the start of a chunk, so we first
+ // advance the iterator to the start of the next chunk, before proceeding in chunk sized
+ // steps.
+ while self.index % CHUNK_BITS != 0 {
+ let Some(item) = self.next() else {
+ return init
+ };
+ init = f(init, item);
+ }
+ let start_chunk = self.index / CHUNK_BITS;
+ let chunks = &self.bitset.chunks[start_chunk..];
+ for (i, chunk) in chunks.iter().enumerate() {
+ let base = (start_chunk + i) * CHUNK_BITS;
+ match chunk {
+ Chunk::Zeros(_) => (),
+ Chunk::Ones(limit) => {
+ for j in 0..(*limit as usize) {
+ init = f(init, T::new(base + j));
+ }
+ }
+ Chunk::Mixed(_, _, words) => {
+ init = BitIter::new(&**words).fold(init, |val, mut item: T| {
+ item.increment_by(base);
+ f(val, item)
+ });
+ }
+ }
+ }
+ init
+ }
+}
+
+impl Chunk {
+ #[cfg(test)]
+ fn assert_valid(&self) {
+ match *self {
+ Zeros(chunk_domain_size) | Ones(chunk_domain_size) => {
+ assert!(chunk_domain_size as usize <= CHUNK_BITS);
+ }
+ Mixed(chunk_domain_size, count, ref words) => {
+ assert!(chunk_domain_size as usize <= CHUNK_BITS);
+ assert!(0 < count && count < chunk_domain_size);
+
+ // Check the number of set bits matches `count`.
+ assert_eq!(
+ words.iter().map(|w| w.count_ones() as ChunkSize).sum::<ChunkSize>(),
+ count
+ );
+
+ // Check the not-in-use words are all zeroed.
+ let num_words = num_words(chunk_domain_size as usize);
+ if num_words < CHUNK_WORDS {
+ assert_eq!(
+ words[num_words..]
+ .iter()
+ .map(|w| w.count_ones() as ChunkSize)
+ .sum::<ChunkSize>(),
+ 0
+ );
+ }
+ }
+ }
+ }
+
+ fn new(chunk_domain_size: usize, is_empty: bool) -> Self {
+ debug_assert!(chunk_domain_size <= CHUNK_BITS);
+ let chunk_domain_size = chunk_domain_size as ChunkSize;
+ if is_empty { Zeros(chunk_domain_size) } else { Ones(chunk_domain_size) }
+ }
+
+ /// Count the number of 1s in the chunk.
+ fn count(&self) -> usize {
+ match *self {
+ Zeros(_) => 0,
+ Ones(chunk_domain_size) => chunk_domain_size as usize,
+ Mixed(_, count, _) => count as usize,
+ }
+ }
+}
+
+// Applies a function to mutate a bitset, and returns true if any
+// of the applications return true
+fn sequential_update<T: Idx>(
+ mut self_update: impl FnMut(T) -> bool,
+ it: impl Iterator<Item = T>,
+) -> bool {
+ it.fold(false, |changed, elem| self_update(elem) | changed)
+}
+
+// Optimization of intersection for SparseBitSet that's generic
+// over the RHS
+fn sparse_intersect<T: Idx>(
+ set: &mut SparseBitSet<T>,
+ other_contains: impl Fn(&T) -> bool,
+) -> bool {
+ let size = set.elems.len();
+ set.elems.retain(|elem| other_contains(elem));
+ set.elems.len() != size
+}
+
+// Optimization of dense/sparse intersection. The resulting set is
+// guaranteed to be at most the size of the sparse set, and hence can be
+// represented as a sparse set. Therefore the sparse set is copied and filtered,
+// then returned as the new set.
+fn dense_sparse_intersect<T: Idx>(
+ dense: &BitSet<T>,
+ sparse: &SparseBitSet<T>,
+) -> (SparseBitSet<T>, bool) {
+ let mut sparse_copy = sparse.clone();
+ sparse_intersect(&mut sparse_copy, |el| dense.contains(*el));
+ let n = sparse_copy.len();
+ (sparse_copy, n != dense.count())
+}
+
+// hybrid REL dense
+impl<T: Idx> BitRelations<BitSet<T>> for HybridBitSet<T> {
+ fn union(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => {
+ // `self` is sparse and `other` is dense. To
+ // merge them, we have two available strategies:
+ // * Densify `self` then merge other
+ // * Clone other then integrate bits from `self`
+ // The second strategy requires dedicated method
+ // since the usual `union` returns the wrong
+ // result. In the dedicated case the computation
+ // is slightly faster if the bits of the sparse
+ // bitset map to only few words of the dense
+ // representation, i.e. indices are near each
+ // other.
+ //
+ // Benchmarking seems to suggest that the second
+ // option is worth it.
+ let mut new_dense = other.clone();
+ let changed = new_dense.reverse_union_sparse(sparse);
+ *self = HybridBitSet::Dense(new_dense);
+ changed
+ }
+
+ HybridBitSet::Dense(dense) => dense.union(other),
+ }
+ }
+
+ fn subtract(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| sparse.remove(elem), other.iter())
+ }
+ HybridBitSet::Dense(dense) => dense.subtract(other),
+ }
+ }
+
+ fn intersect(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse_intersect(sparse, |elem| other.contains(*elem)),
+ HybridBitSet::Dense(dense) => dense.intersect(other),
+ }
+ }
+}
+
+// dense REL hybrid
+impl<T: Idx> BitRelations<HybridBitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| self.insert(elem), sparse.iter().cloned())
+ }
+ HybridBitSet::Dense(dense) => self.union(dense),
+ }
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| self.remove(elem), sparse.iter().cloned())
+ }
+ HybridBitSet::Dense(dense) => self.subtract(dense),
+ }
+ }
+
+ fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ let (updated, changed) = dense_sparse_intersect(self, sparse);
+
+ // We can't directly assign the SparseBitSet to the BitSet, and
+ // doing `*self = updated.to_dense()` would cause a drop / reallocation. Instead,
+ // the BitSet is cleared and `updated` is copied into `self`.
+ self.clear();
+ for elem in updated.iter() {
+ self.insert(*elem);
+ }
+ changed
+ }
+ HybridBitSet::Dense(dense) => self.intersect(dense),
+ }
+ }
+}
+
+// hybrid REL hybrid
+impl<T: Idx> BitRelations<HybridBitSet<T>> for HybridBitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(_) => {
+ match other {
+ HybridBitSet::Sparse(other_sparse) => {
+ // Both sets are sparse. Add the elements in
+ // `other_sparse` to `self` one at a time. This
+ // may or may not cause `self` to be densified.
+ let mut changed = false;
+ for elem in other_sparse.iter() {
+ changed |= self.insert(*elem);
+ }
+ changed
+ }
+
+ HybridBitSet::Dense(other_dense) => self.union(other_dense),
+ }
+ }
+
+ HybridBitSet::Dense(self_dense) => self_dense.union(other),
+ }
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(self_sparse) => {
+ sequential_update(|elem| self_sparse.remove(elem), other.iter())
+ }
+ HybridBitSet::Dense(self_dense) => self_dense.subtract(other),
+ }
+ }
+
+ fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(self_sparse) => {
+ sparse_intersect(self_sparse, |elem| other.contains(*elem))
+ }
+ HybridBitSet::Dense(self_dense) => match other {
+ HybridBitSet::Sparse(other_sparse) => {
+ let (updated, changed) = dense_sparse_intersect(self_dense, other_sparse);
+ *self = HybridBitSet::Sparse(updated);
+ changed
+ }
+ HybridBitSet::Dense(other_dense) => self_dense.intersect(other_dense),
+ },
+ }
+ }
+}
+
+impl<T> Clone for BitSet<T> {
+ fn clone(&self) -> Self {
+ BitSet { domain_size: self.domain_size, words: self.words.clone(), marker: PhantomData }
+ }
+
+ fn clone_from(&mut self, from: &Self) {
+ self.domain_size = from.domain_size;
+ self.words.clone_from(&from.words);
+ }
+}
+
+impl<T: Idx> fmt::Debug for BitSet<T> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ w.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Idx> ToString for BitSet<T> {
+ fn to_string(&self) -> String {
+ let mut result = String::new();
+ let mut sep = '[';
+
+ // Note: this is a little endian printout of bytes.
+
+ // i tracks how many bits we have printed so far.
+ let mut i = 0;
+ for word in &self.words {
+ let mut word = *word;
+ for _ in 0..WORD_BYTES {
+ // for each byte in `word`:
+ let remain = self.domain_size - i;
+ // If less than a byte remains, then mask just that many bits.
+ let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF };
+ assert!(mask <= 0xFF);
+ let byte = word & mask;
+
+ result.push_str(&format!("{}{:02x}", sep, byte));
+
+ if remain <= 8 {
+ break;
+ }
+ word >>= 8;
+ i += 8;
+ sep = '-';
+ }
+ sep = '|';
+ }
+ result.push(']');
+
+ result
+ }
+}
+
+pub struct BitIter<'a, T: Idx> {
+ /// A copy of the current word, but with any already-visited bits cleared.
+ /// (This lets us use `trailing_zeros()` to find the next set bit.) When it
+ /// is reduced to 0, we move onto the next word.
+ word: Word,
+
+ /// The offset (measured in bits) of the current word.
+ offset: usize,
+
+ /// Underlying iterator over the words.
+ iter: slice::Iter<'a, Word>,
+
+ marker: PhantomData<T>,
+}
+
+impl<'a, T: Idx> BitIter<'a, T> {
+ #[inline]
+ fn new(words: &'a [Word]) -> BitIter<'a, T> {
+ // We initialize `word` and `offset` to degenerate values. On the first
+ // call to `next()` we will fall through to getting the first word from
+ // `iter`, which sets `word` to the first word (if there is one) and
+ // `offset` to 0. Doing it this way saves us from having to maintain
+ // additional state about whether we have started.
+ BitIter {
+ word: 0,
+ offset: usize::MAX - (WORD_BITS - 1),
+ iter: words.iter(),
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: Idx> Iterator for BitIter<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ loop {
+ if self.word != 0 {
+ // Get the position of the next set bit in the current word,
+ // then clear the bit.
+ let bit_pos = self.word.trailing_zeros() as usize;
+ let bit = 1 << bit_pos;
+ self.word ^= bit;
+ return Some(T::new(bit_pos + self.offset));
+ }
+
+ // Move onto the next word. `wrapping_add()` is needed to handle
+ // the degenerate initial value given to `offset` in `new()`.
+ let word = self.iter.next()?;
+ self.word = *word;
+ self.offset = self.offset.wrapping_add(WORD_BITS);
+ }
+ }
+}
+
+#[inline]
+fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool
+where
+ Op: Fn(Word, Word) -> Word,
+{
+ assert_eq!(out_vec.len(), in_vec.len());
+ let mut changed = 0;
+ for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
+ let old_val = *out_elem;
+ let new_val = op(old_val, *in_elem);
+ *out_elem = new_val;
+ // This is essentially equivalent to a != with changed being a bool, but
+ // in practice this code gets auto-vectorized by the compiler for most
+ // operators. Using != here causes us to generate quite poor code as the
+ // compiler tries to go back to a boolean on each loop iteration.
+ changed |= old_val ^ new_val;
+ }
+ changed != 0
+}
+
+/// Does this bitwise operation change `out_vec`?
+#[inline]
+fn bitwise_changes<Op>(out_vec: &[Word], in_vec: &[Word], op: Op) -> bool
+where
+ Op: Fn(Word, Word) -> Word,
+{
+ assert_eq!(out_vec.len(), in_vec.len());
+ for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
+ let old_val = *out_elem;
+ let new_val = op(old_val, *in_elem);
+ if old_val != new_val {
+ return true;
+ }
+ }
+ false
+}
+
+const SPARSE_MAX: usize = 8;
+
+/// A fixed-size bitset type with a sparse representation and a maximum of
+/// `SPARSE_MAX` elements. The elements are stored as a sorted `ArrayVec` with
+/// no duplicates.
+///
+/// This type is used by `HybridBitSet`; do not use directly.
+#[derive(Clone, Debug)]
+pub struct SparseBitSet<T> {
+ domain_size: usize,
+ elems: ArrayVec<T, SPARSE_MAX>,
+}
+
+impl<T: Idx> SparseBitSet<T> {
+ fn new_empty(domain_size: usize) -> Self {
+ SparseBitSet { domain_size, elems: ArrayVec::new() }
+ }
+
+ fn len(&self) -> usize {
+ self.elems.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.elems.len() == 0
+ }
+
+ fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ self.elems.contains(&elem)
+ }
+
+ fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let changed = if let Some(i) = self.elems.iter().position(|&e| e.index() >= elem.index()) {
+ if self.elems[i] == elem {
+ // `elem` is already in the set.
+ false
+ } else {
+ // `elem` is smaller than one or more existing elements.
+ self.elems.insert(i, elem);
+ true
+ }
+ } else {
+ // `elem` is larger than all existing elements.
+ self.elems.push(elem);
+ true
+ };
+ assert!(self.len() <= SPARSE_MAX);
+ changed
+ }
+
+ fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ if let Some(i) = self.elems.iter().position(|&e| e == elem) {
+ self.elems.remove(i);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn to_dense(&self) -> BitSet<T> {
+ let mut dense = BitSet::new_empty(self.domain_size);
+ for elem in self.elems.iter() {
+ dense.insert(*elem);
+ }
+ dense
+ }
+
+ fn iter(&self) -> slice::Iter<'_, T> {
+ self.elems.iter()
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+impl<T: Idx + Ord> SparseBitSet<T> {
+ fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
+ let mut last_leq = None;
+ for e in self.iter() {
+ if range.contains(e) {
+ last_leq = Some(*e);
+ }
+ }
+ last_leq
+ }
+}
+
+/// A fixed-size bitset type with a hybrid representation: sparse when there
+/// are up to a `SPARSE_MAX` elements in the set, but dense when there are more
+/// than `SPARSE_MAX`.
+///
+/// This type is especially efficient for sets that typically have a small
+/// number of elements, but a large `domain_size`, and are cleared frequently.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+#[derive(Clone)]
+pub enum HybridBitSet<T> {
+ Sparse(SparseBitSet<T>),
+ Dense(BitSet<T>),
+}
+
+impl<T: Idx> fmt::Debug for HybridBitSet<T> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Sparse(b) => b.fmt(w),
+ Self::Dense(b) => b.fmt(w),
+ }
+ }
+}
+
+impl<T: Idx> HybridBitSet<T> {
+ pub fn new_empty(domain_size: usize) -> Self {
+ HybridBitSet::Sparse(SparseBitSet::new_empty(domain_size))
+ }
+
+ pub fn domain_size(&self) -> usize {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.domain_size,
+ HybridBitSet::Dense(dense) => dense.domain_size,
+ }
+ }
+
+ pub fn clear(&mut self) {
+ let domain_size = self.domain_size();
+ *self = HybridBitSet::new_empty(domain_size);
+ }
+
+ pub fn contains(&self, elem: T) -> bool {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.contains(elem),
+ HybridBitSet::Dense(dense) => dense.contains(elem),
+ }
+ }
+
+ pub fn superset(&self, other: &HybridBitSet<T>) -> bool {
+ match (self, other) {
+ (HybridBitSet::Dense(self_dense), HybridBitSet::Dense(other_dense)) => {
+ self_dense.superset(other_dense)
+ }
+ _ => {
+ assert!(self.domain_size() == other.domain_size());
+ other.iter().all(|elem| self.contains(elem))
+ }
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.is_empty(),
+ HybridBitSet::Dense(dense) => dense.is_empty(),
+ }
+ }
+
+ /// Returns the previous element present in the bitset from `elem`,
+ /// inclusively of elem. That is, will return `Some(elem)` if elem is in the
+ /// bitset.
+ pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T>
+ where
+ T: Ord,
+ {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.last_set_in(range),
+ HybridBitSet::Dense(dense) => dense.last_set_in(range),
+ }
+ }
+
+ pub fn insert(&mut self, elem: T) -> bool {
+ // No need to check `elem` against `self.domain_size` here because all
+ // the match cases check it, one way or another.
+ match self {
+ HybridBitSet::Sparse(sparse) if sparse.len() < SPARSE_MAX => {
+ // The set is sparse and has space for `elem`.
+ sparse.insert(elem)
+ }
+ HybridBitSet::Sparse(sparse) if sparse.contains(elem) => {
+ // The set is sparse and does not have space for `elem`, but
+ // that doesn't matter because `elem` is already present.
+ false
+ }
+ HybridBitSet::Sparse(sparse) => {
+ // The set is sparse and full. Convert to a dense set.
+ let mut dense = sparse.to_dense();
+ let changed = dense.insert(elem);
+ assert!(changed);
+ *self = HybridBitSet::Dense(dense);
+ changed
+ }
+ HybridBitSet::Dense(dense) => dense.insert(elem),
+ }
+ }
+
+ pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
+ // No need to check `elem` against `self.domain_size` here because all
+ // the match cases check it, one way or another.
+ let start = match elems.start_bound().cloned() {
+ Bound::Included(start) => start.index(),
+ Bound::Excluded(start) => start.index() + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match elems.end_bound().cloned() {
+ Bound::Included(end) => end.index() + 1,
+ Bound::Excluded(end) => end.index(),
+ Bound::Unbounded => self.domain_size() - 1,
+ };
+ let Some(len) = end.checked_sub(start) else { return };
+ match self {
+ HybridBitSet::Sparse(sparse) if sparse.len() + len < SPARSE_MAX => {
+ // The set is sparse and has space for `elems`.
+ for elem in start..end {
+ sparse.insert(T::new(elem));
+ }
+ }
+ HybridBitSet::Sparse(sparse) => {
+ // The set is sparse and full. Convert to a dense set.
+ let mut dense = sparse.to_dense();
+ dense.insert_range(elems);
+ *self = HybridBitSet::Dense(dense);
+ }
+ HybridBitSet::Dense(dense) => dense.insert_range(elems),
+ }
+ }
+
+ pub fn insert_all(&mut self) {
+ let domain_size = self.domain_size();
+ match self {
+ HybridBitSet::Sparse(_) => {
+ *self = HybridBitSet::Dense(BitSet::new_filled(domain_size));
+ }
+ HybridBitSet::Dense(dense) => dense.insert_all(),
+ }
+ }
+
+ pub fn remove(&mut self, elem: T) -> bool {
+ // Note: we currently don't bother going from Dense back to Sparse.
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.remove(elem),
+ HybridBitSet::Dense(dense) => dense.remove(elem),
+ }
+ }
+
+ /// Converts to a dense set, consuming itself in the process.
+ pub fn to_dense(self) -> BitSet<T> {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.to_dense(),
+ HybridBitSet::Dense(dense) => dense,
+ }
+ }
+
+ pub fn iter(&self) -> HybridIter<'_, T> {
+ match self {
+ HybridBitSet::Sparse(sparse) => HybridIter::Sparse(sparse.iter()),
+ HybridBitSet::Dense(dense) => HybridIter::Dense(dense.iter()),
+ }
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+pub enum HybridIter<'a, T: Idx> {
+ Sparse(slice::Iter<'a, T>),
+ Dense(BitIter<'a, T>),
+}
+
+impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ match self {
+ HybridIter::Sparse(sparse) => sparse.next().copied(),
+ HybridIter::Dense(dense) => dense.next(),
+ }
+ }
+}
+
+/// A resizable bitset type with a dense representation.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size.
+#[derive(Clone, Debug, PartialEq)]
+pub struct GrowableBitSet<T: Idx> {
+ bit_set: BitSet<T>,
+}
+
+impl<T: Idx> Default for GrowableBitSet<T> {
+ fn default() -> Self {
+ GrowableBitSet::new_empty()
+ }
+}
+
+impl<T: Idx> GrowableBitSet<T> {
+ /// Ensure that the set can hold at least `min_domain_size` elements.
+ pub fn ensure(&mut self, min_domain_size: usize) {
+ if self.bit_set.domain_size < min_domain_size {
+ self.bit_set.domain_size = min_domain_size;
+ }
+
+ let min_num_words = num_words(min_domain_size);
+ if self.bit_set.words.len() < min_num_words {
+ self.bit_set.words.resize(min_num_words, 0)
+ }
+ }
+
+ pub fn new_empty() -> GrowableBitSet<T> {
+ GrowableBitSet { bit_set: BitSet::new_empty(0) }
+ }
+
+ pub fn with_capacity(capacity: usize) -> GrowableBitSet<T> {
+ GrowableBitSet { bit_set: BitSet::new_empty(capacity) }
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn insert(&mut self, elem: T) -> bool {
+ self.ensure(elem.index() + 1);
+ self.bit_set.insert(elem)
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn remove(&mut self, elem: T) -> bool {
+ self.ensure(elem.index() + 1);
+ self.bit_set.remove(elem)
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.bit_set.is_empty()
+ }
+
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ let (word_index, mask) = word_index_and_mask(elem);
+ self.bit_set.words.get(word_index).map_or(false, |word| (word & mask) != 0)
+ }
+
+ #[inline]
+ pub fn iter(&self) -> BitIter<'_, T> {
+ self.bit_set.iter()
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.bit_set.count()
+ }
+}
+
+impl<T: Idx> From<BitSet<T>> for GrowableBitSet<T> {
+ fn from(bit_set: BitSet<T>) -> Self {
+ Self { bit_set }
+ }
+}
+
+/// A fixed-size 2D bit matrix type with a dense representation.
+///
+/// `R` and `C` are index types used to identify rows and columns respectively;
+/// typically newtyped `usize` wrappers, but they can also just be `usize`.
+///
+/// All operations that involve a row and/or column index will panic if the
+/// index exceeds the relevant bound.
+#[derive(Clone, Eq, PartialEq, Hash, Decodable, Encodable)]
+pub struct BitMatrix<R: Idx, C: Idx> {
+ num_rows: usize,
+ num_columns: usize,
+ words: Vec<Word>,
+ marker: PhantomData<(R, C)>,
+}
+
+impl<R: Idx, C: Idx> BitMatrix<R, C> {
+ /// Creates a new `rows x columns` matrix, initially empty.
+ pub fn new(num_rows: usize, num_columns: usize) -> BitMatrix<R, C> {
+ // For every element, we need one bit for every other
+ // element. Round up to an even number of words.
+ let words_per_row = num_words(num_columns);
+ BitMatrix {
+ num_rows,
+ num_columns,
+ words: vec![0; num_rows * words_per_row],
+ marker: PhantomData,
+ }
+ }
+
+ /// Creates a new matrix, with `row` used as the value for every row.
+ pub fn from_row_n(row: &BitSet<C>, num_rows: usize) -> BitMatrix<R, C> {
+ let num_columns = row.domain_size();
+ let words_per_row = num_words(num_columns);
+ assert_eq!(words_per_row, row.words().len());
+ BitMatrix {
+ num_rows,
+ num_columns,
+ words: iter::repeat(row.words()).take(num_rows).flatten().cloned().collect(),
+ marker: PhantomData,
+ }
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ (0..self.num_rows).map(R::new)
+ }
+
+ /// The range of bits for a given row.
+ fn range(&self, row: R) -> (usize, usize) {
+ let words_per_row = num_words(self.num_columns);
+ let start = row.index() * words_per_row;
+ (start, start + words_per_row)
+ }
+
+ /// Sets the cell at `(row, column)` to true. Put another way, insert
+ /// `column` to the bitset for `row`.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn insert(&mut self, row: R, column: C) -> bool {
+ assert!(row.index() < self.num_rows && column.index() < self.num_columns);
+ let (start, _) = self.range(row);
+ let (word_index, mask) = word_index_and_mask(column);
+ let words = &mut self.words[..];
+ let word = words[start + word_index];
+ let new_word = word | mask;
+ words[start + word_index] = new_word;
+ word != new_word
+ }
+
+ /// Do the bits from `row` contain `column`? Put another way, is
+ /// the matrix cell at `(row, column)` true? Put yet another way,
+ /// if the matrix represents (transitive) reachability, can
+ /// `row` reach `column`?
+ pub fn contains(&self, row: R, column: C) -> bool {
+ assert!(row.index() < self.num_rows && column.index() < self.num_columns);
+ let (start, _) = self.range(row);
+ let (word_index, mask) = word_index_and_mask(column);
+ (self.words[start + word_index] & mask) != 0
+ }
+
+ /// Returns those indices that are true in rows `a` and `b`. This
+ /// is an *O*(*n*) operation where *n* is the number of elements
+ /// (somewhat independent from the actual size of the
+ /// intersection, in particular).
+ pub fn intersect_rows(&self, row1: R, row2: R) -> Vec<C> {
+ assert!(row1.index() < self.num_rows && row2.index() < self.num_rows);
+ let (row1_start, row1_end) = self.range(row1);
+ let (row2_start, row2_end) = self.range(row2);
+ let mut result = Vec::with_capacity(self.num_columns);
+ for (base, (i, j)) in (row1_start..row1_end).zip(row2_start..row2_end).enumerate() {
+ let mut v = self.words[i] & self.words[j];
+ for bit in 0..WORD_BITS {
+ if v == 0 {
+ break;
+ }
+ if v & 0x1 != 0 {
+ result.push(C::new(base * WORD_BITS + bit));
+ }
+ v >>= 1;
+ }
+ }
+ result
+ }
+
+ /// Adds the bits from row `read` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ ///
+ /// This is used when computing transitive reachability because if
+ /// you have an edge `write -> read`, because in that case
+ /// `write` can reach everything that `read` can (and
+ /// potentially more).
+ pub fn union_rows(&mut self, read: R, write: R) -> bool {
+ assert!(read.index() < self.num_rows && write.index() < self.num_rows);
+ let (read_start, read_end) = self.range(read);
+ let (write_start, write_end) = self.range(write);
+ let words = &mut self.words[..];
+ let mut changed = false;
+ for (read_index, write_index) in iter::zip(read_start..read_end, write_start..write_end) {
+ let word = words[write_index];
+ let new_word = word | words[read_index];
+ words[write_index] = new_word;
+ changed |= word != new_word;
+ }
+ changed
+ }
+
+ /// Adds the bits from `with` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ pub fn union_row_with(&mut self, with: &BitSet<C>, write: R) -> bool {
+ assert!(write.index() < self.num_rows);
+ assert_eq!(with.domain_size(), self.num_columns);
+ let (write_start, write_end) = self.range(write);
+ let mut changed = false;
+ for (read_index, write_index) in iter::zip(0..with.words().len(), write_start..write_end) {
+ let word = self.words[write_index];
+ let new_word = word | with.words()[read_index];
+ self.words[write_index] = new_word;
+ changed |= word != new_word;
+ }
+ changed
+ }
+
+ /// Sets every cell in `row` to true.
+ pub fn insert_all_into_row(&mut self, row: R) {
+ assert!(row.index() < self.num_rows);
+ let (start, end) = self.range(row);
+ let words = &mut self.words[..];
+ for index in start..end {
+ words[index] = !0;
+ }
+ clear_excess_bits_in_final_word(self.num_columns, &mut self.words[..end]);
+ }
+
+ /// Gets a slice of the underlying words.
+ pub fn words(&self) -> &[Word] {
+ &self.words
+ }
+
+ /// Iterates through all the columns set to true in a given row of
+ /// the matrix.
+ pub fn iter(&self, row: R) -> BitIter<'_, C> {
+ assert!(row.index() < self.num_rows);
+ let (start, end) = self.range(row);
+ BitIter::new(&self.words[start..end])
+ }
+
+ /// Returns the number of elements in `row`.
+ pub fn count(&self, row: R) -> usize {
+ let (start, end) = self.range(row);
+ self.words[start..end].iter().map(|e| e.count_ones() as usize).sum()
+ }
+}
+
+impl<R: Idx, C: Idx> fmt::Debug for BitMatrix<R, C> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Forces its contents to print in regular mode instead of alternate mode.
+ struct OneLinePrinter<T>(T);
+ impl<T: fmt::Debug> fmt::Debug for OneLinePrinter<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}", self.0)
+ }
+ }
+
+ write!(fmt, "BitMatrix({}x{}) ", self.num_rows, self.num_columns)?;
+ let items = self.rows().flat_map(|r| self.iter(r).map(move |c| (r, c)));
+ fmt.debug_set().entries(items.map(OneLinePrinter)).finish()
+ }
+}
+
+/// A fixed-column-size, variable-row-size 2D bit matrix with a moderately
+/// sparse representation.
+///
+/// Initially, every row has no explicit representation. If any bit within a
+/// row is set, the entire row is instantiated as `Some(<HybridBitSet>)`.
+/// Furthermore, any previously uninstantiated rows prior to it will be
+/// instantiated as `None`. Those prior rows may themselves become fully
+/// instantiated later on if any of their bits are set.
+///
+/// `R` and `C` are index types used to identify rows and columns respectively;
+/// typically newtyped `usize` wrappers, but they can also just be `usize`.
+#[derive(Clone, Debug)]
+pub struct SparseBitMatrix<R, C>
+where
+ R: Idx,
+ C: Idx,
+{
+ num_columns: usize,
+ rows: IndexVec<R, Option<HybridBitSet<C>>>,
+}
+
+impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
+ /// Creates a new empty sparse bit matrix with no rows or columns.
+ pub fn new(num_columns: usize) -> Self {
+ Self { num_columns, rows: IndexVec::new() }
+ }
+
+ fn ensure_row(&mut self, row: R) -> &mut HybridBitSet<C> {
+ // Instantiate any missing rows up to and including row `row` with an empty HybridBitSet.
+ // Then replace row `row` with a full HybridBitSet if necessary.
+ self.rows.get_or_insert_with(row, || HybridBitSet::new_empty(self.num_columns))
+ }
+
+ /// Sets the cell at `(row, column)` to true. Put another way, insert
+ /// `column` to the bitset for `row`.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn insert(&mut self, row: R, column: C) -> bool {
+ self.ensure_row(row).insert(column)
+ }
+
+ /// Sets the cell at `(row, column)` to false. Put another way, delete
+ /// `column` from the bitset for `row`. Has no effect if `row` does not
+ /// exist.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn remove(&mut self, row: R, column: C) -> bool {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.remove(column),
+ _ => false,
+ }
+ }
+
+ /// Sets all columns at `row` to false. Has no effect if `row` does
+ /// not exist.
+ pub fn clear(&mut self, row: R) {
+ if let Some(Some(row)) = self.rows.get_mut(row) {
+ row.clear();
+ }
+ }
+
+ /// Do the bits from `row` contain `column`? Put another way, is
+ /// the matrix cell at `(row, column)` true? Put yet another way,
+ /// if the matrix represents (transitive) reachability, can
+ /// `row` reach `column`?
+ pub fn contains(&self, row: R, column: C) -> bool {
+ self.row(row).map_or(false, |r| r.contains(column))
+ }
+
+ /// Adds the bits from row `read` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ ///
+ /// This is used when computing transitive reachability because if
+ /// you have an edge `write -> read`, because in that case
+ /// `write` can reach everything that `read` can (and
+ /// potentially more).
+ pub fn union_rows(&mut self, read: R, write: R) -> bool {
+ if read == write || self.row(read).is_none() {
+ return false;
+ }
+
+ self.ensure_row(write);
+ if let (Some(read_row), Some(write_row)) = self.rows.pick2_mut(read, write) {
+ write_row.union(read_row)
+ } else {
+ unreachable!()
+ }
+ }
+
+ /// Insert all bits in the given row.
+ pub fn insert_all_into_row(&mut self, row: R) {
+ self.ensure_row(row).insert_all();
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ self.rows.indices()
+ }
+
+ /// Iterates through all the columns set to true in a given row of
+ /// the matrix.
+ pub fn iter<'a>(&'a self, row: R) -> impl Iterator<Item = C> + 'a {
+ self.row(row).into_iter().flat_map(|r| r.iter())
+ }
+
+ pub fn row(&self, row: R) -> Option<&HybridBitSet<C>> {
+ self.rows.get(row)?.as_ref()
+ }
+
+ /// Intersects `row` with `set`. `set` can be either `BitSet` or
+ /// `HybridBitSet`. Has no effect if `row` does not exist.
+ ///
+ /// Returns true if the row was changed.
+ pub fn intersect_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.intersect(set),
+ _ => false,
+ }
+ }
+
+ /// Subtracts `set from `row`. `set` can be either `BitSet` or
+ /// `HybridBitSet`. Has no effect if `row` does not exist.
+ ///
+ /// Returns true if the row was changed.
+ pub fn subtract_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.subtract(set),
+ _ => false,
+ }
+ }
+
+ /// Unions `row` with `set`. `set` can be either `BitSet` or
+ /// `HybridBitSet`.
+ ///
+ /// Returns true if the row was changed.
+ pub fn union_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ self.ensure_row(row).union(set)
+ }
+}
+
+#[inline]
+fn num_words<T: Idx>(domain_size: T) -> usize {
+ (domain_size.index() + WORD_BITS - 1) / WORD_BITS
+}
+
+#[inline]
+fn num_chunks<T: Idx>(domain_size: T) -> usize {
+ assert!(domain_size.index() > 0);
+ (domain_size.index() + CHUNK_BITS - 1) / CHUNK_BITS
+}
+
+#[inline]
+fn word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
+ let elem = elem.index();
+ let word_index = elem / WORD_BITS;
+ let mask = 1 << (elem % WORD_BITS);
+ (word_index, mask)
+}
+
+#[inline]
+fn chunk_index<T: Idx>(elem: T) -> usize {
+ elem.index() / CHUNK_BITS
+}
+
+#[inline]
+fn chunk_word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
+ let chunk_elem = elem.index() % CHUNK_BITS;
+ word_index_and_mask(chunk_elem)
+}
+
+fn clear_excess_bits_in_final_word(domain_size: usize, words: &mut [Word]) {
+ let num_bits_in_final_word = domain_size % WORD_BITS;
+ if num_bits_in_final_word > 0 {
+ let mask = (1 << num_bits_in_final_word) - 1;
+ words[words.len() - 1] &= mask;
+ }
+}
+
+#[inline]
+fn max_bit(word: Word) -> usize {
+ WORD_BITS - 1 - word.leading_zeros() as usize
+}
+
+/// Integral type used to represent the bit set.
+pub trait FiniteBitSetTy:
+ BitAnd<Output = Self>
+ + BitAndAssign
+ + BitOrAssign
+ + Clone
+ + Copy
+ + Shl
+ + Not<Output = Self>
+ + PartialEq
+ + Sized
+{
+ /// Size of the domain representable by this type, e.g. 64 for `u64`.
+ const DOMAIN_SIZE: u32;
+
+ /// Value which represents the `FiniteBitSet` having every bit set.
+ const FILLED: Self;
+ /// Value which represents the `FiniteBitSet` having no bits set.
+ const EMPTY: Self;
+
+ /// Value for one as the integral type.
+ const ONE: Self;
+ /// Value for zero as the integral type.
+ const ZERO: Self;
+
+ /// Perform a checked left shift on the integral type.
+ fn checked_shl(self, rhs: u32) -> Option<Self>;
+ /// Perform a checked right shift on the integral type.
+ fn checked_shr(self, rhs: u32) -> Option<Self>;
+}
+
+impl FiniteBitSetTy for u32 {
+ const DOMAIN_SIZE: u32 = 32;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u32;
+ const ZERO: Self = 0u32;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u32> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:032b}", self.0)
+ }
+}
+
+impl FiniteBitSetTy for u64 {
+ const DOMAIN_SIZE: u32 = 64;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u64;
+ const ZERO: Self = 0u64;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u64> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:064b}", self.0)
+ }
+}
+
+impl FiniteBitSetTy for u128 {
+ const DOMAIN_SIZE: u32 = 128;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u128;
+ const ZERO: Self = 0u128;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u128> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:0128b}", self.0)
+ }
+}
+
+/// A fixed-sized bitset type represented by an integer type. Indices outwith than the range
+/// representable by `T` are considered set.
+#[derive(Copy, Clone, Eq, PartialEq, Decodable, Encodable)]
+pub struct FiniteBitSet<T: FiniteBitSetTy>(pub T);
+
+impl<T: FiniteBitSetTy> FiniteBitSet<T> {
+ /// Creates a new, empty bitset.
+ pub fn new_empty() -> Self {
+ Self(T::EMPTY)
+ }
+
+ /// Sets the `index`th bit.
+ pub fn set(&mut self, index: u32) {
+ self.0 |= T::ONE.checked_shl(index).unwrap_or(T::ZERO);
+ }
+
+ /// Unsets the `index`th bit.
+ pub fn clear(&mut self, index: u32) {
+ self.0 &= !T::ONE.checked_shl(index).unwrap_or(T::ZERO);
+ }
+
+ /// Sets the `i`th to `j`th bits.
+ pub fn set_range(&mut self, range: Range<u32>) {
+ let bits = T::FILLED
+ .checked_shl(range.end - range.start)
+ .unwrap_or(T::ZERO)
+ .not()
+ .checked_shl(range.start)
+ .unwrap_or(T::ZERO);
+ self.0 |= bits;
+ }
+
+ /// Is the set empty?
+ pub fn is_empty(&self) -> bool {
+ self.0 == T::EMPTY
+ }
+
+ /// Returns the domain size of the bitset.
+ pub fn within_domain(&self, index: u32) -> bool {
+ index < T::DOMAIN_SIZE
+ }
+
+ /// Returns if the `index`th bit is set.
+ pub fn contains(&self, index: u32) -> Option<bool> {
+ self.within_domain(index)
+ .then(|| ((self.0.checked_shr(index).unwrap_or(T::ONE)) & T::ONE) == T::ONE)
+ }
+}
+
+impl<T: FiniteBitSetTy> Default for FiniteBitSet<T> {
+ fn default() -> Self {
+ Self::new_empty()
+ }
+}
diff --git a/compiler/rustc_index/src/bit_set/tests.rs b/compiler/rustc_index/src/bit_set/tests.rs
new file mode 100644
index 000000000..351d62fee
--- /dev/null
+++ b/compiler/rustc_index/src/bit_set/tests.rs
@@ -0,0 +1,873 @@
+use super::*;
+
+extern crate test;
+use std::hint::black_box;
+use test::Bencher;
+
+#[test]
+fn test_new_filled() {
+ for i in 0..128 {
+ let idx_buf = BitSet::new_filled(i);
+ let elems: Vec<usize> = idx_buf.iter().collect();
+ let expected: Vec<usize> = (0..i).collect();
+ assert_eq!(elems, expected);
+ }
+}
+
+#[test]
+fn bitset_iter_works() {
+ let mut bitset: BitSet<usize> = BitSet::new_empty(100);
+ bitset.insert(1);
+ bitset.insert(10);
+ bitset.insert(19);
+ bitset.insert(62);
+ bitset.insert(63);
+ bitset.insert(64);
+ bitset.insert(65);
+ bitset.insert(66);
+ bitset.insert(99);
+ assert_eq!(bitset.iter().collect::<Vec<_>>(), [1, 10, 19, 62, 63, 64, 65, 66, 99]);
+}
+
+#[test]
+fn bitset_iter_works_2() {
+ let mut bitset: BitSet<usize> = BitSet::new_empty(320);
+ bitset.insert(0);
+ bitset.insert(127);
+ bitset.insert(191);
+ bitset.insert(255);
+ bitset.insert(319);
+ assert_eq!(bitset.iter().collect::<Vec<_>>(), [0, 127, 191, 255, 319]);
+}
+
+#[test]
+fn bitset_clone_from() {
+ let mut a: BitSet<usize> = BitSet::new_empty(10);
+ a.insert(4);
+ a.insert(7);
+ a.insert(9);
+
+ let mut b = BitSet::new_empty(2);
+ b.clone_from(&a);
+ assert_eq!(b.domain_size(), 10);
+ assert_eq!(b.iter().collect::<Vec<_>>(), [4, 7, 9]);
+
+ b.clone_from(&BitSet::new_empty(40));
+ assert_eq!(b.domain_size(), 40);
+ assert_eq!(b.iter().collect::<Vec<_>>(), []);
+}
+
+#[test]
+fn union_two_sets() {
+ let mut set1: BitSet<usize> = BitSet::new_empty(65);
+ let mut set2: BitSet<usize> = BitSet::new_empty(65);
+ assert!(set1.insert(3));
+ assert!(!set1.insert(3));
+ assert!(set2.insert(5));
+ assert!(set2.insert(64));
+ assert!(set1.union(&set2));
+ assert!(!set1.union(&set2));
+ assert!(set1.contains(3));
+ assert!(!set1.contains(4));
+ assert!(set1.contains(5));
+ assert!(!set1.contains(63));
+ assert!(set1.contains(64));
+}
+
+#[test]
+fn hybrid_bitset() {
+ let mut sparse038: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ assert!(sparse038.is_empty());
+ assert!(sparse038.insert(0));
+ assert!(sparse038.insert(1));
+ assert!(sparse038.insert(8));
+ assert!(sparse038.insert(3));
+ assert!(!sparse038.insert(3));
+ assert!(sparse038.remove(1));
+ assert!(!sparse038.is_empty());
+ assert_eq!(sparse038.iter().collect::<Vec<_>>(), [0, 3, 8]);
+
+ for i in 0..256 {
+ if i == 0 || i == 3 || i == 8 {
+ assert!(sparse038.contains(i));
+ } else {
+ assert!(!sparse038.contains(i));
+ }
+ }
+
+ let mut sparse01358 = sparse038.clone();
+ assert!(sparse01358.insert(1));
+ assert!(sparse01358.insert(5));
+ assert_eq!(sparse01358.iter().collect::<Vec<_>>(), [0, 1, 3, 5, 8]);
+
+ let mut dense10 = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(dense10.insert(i));
+ }
+ assert!(!dense10.is_empty());
+ assert_eq!(dense10.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut dense256 = HybridBitSet::new_empty(256);
+ assert!(dense256.is_empty());
+ dense256.insert_all();
+ assert!(!dense256.is_empty());
+ for i in 0..256 {
+ assert!(dense256.contains(i));
+ }
+
+ assert!(sparse038.superset(&sparse038)); // sparse + sparse (self)
+ assert!(sparse01358.superset(&sparse038)); // sparse + sparse
+ assert!(dense10.superset(&sparse038)); // dense + sparse
+ assert!(dense10.superset(&dense10)); // dense + dense (self)
+ assert!(dense256.superset(&dense10)); // dense + dense
+
+ let mut hybrid = sparse038.clone();
+ assert!(!sparse01358.union(&hybrid)); // no change
+ assert!(hybrid.union(&sparse01358));
+ assert!(hybrid.superset(&sparse01358) && sparse01358.superset(&hybrid));
+ assert!(!dense256.union(&dense10));
+
+ // dense / sparse where dense superset sparse
+ assert!(!dense10.clone().union(&sparse01358));
+ assert!(sparse01358.clone().union(&dense10));
+ assert!(dense10.clone().intersect(&sparse01358));
+ assert!(!sparse01358.clone().intersect(&dense10));
+ assert!(dense10.clone().subtract(&sparse01358));
+ assert!(sparse01358.clone().subtract(&dense10));
+
+ // dense / sparse where sparse superset dense
+ let dense038 = sparse038.to_dense();
+ assert!(!sparse01358.clone().union(&dense038));
+ assert!(dense038.clone().union(&sparse01358));
+ assert!(sparse01358.clone().intersect(&dense038));
+ assert!(!dense038.clone().intersect(&sparse01358));
+ assert!(sparse01358.clone().subtract(&dense038));
+ assert!(dense038.clone().subtract(&sparse01358));
+
+ let mut dense = dense10.clone();
+ assert!(dense.union(&dense256));
+ assert!(dense.superset(&dense256) && dense256.superset(&dense));
+ assert!(hybrid.union(&dense256));
+ assert!(hybrid.superset(&dense256) && dense256.superset(&hybrid));
+
+ assert!(!dense10.clone().intersect(&dense256));
+ assert!(dense256.clone().intersect(&dense10));
+ assert!(dense10.clone().subtract(&dense256));
+ assert!(dense256.clone().subtract(&dense10));
+
+ assert_eq!(dense256.iter().count(), 256);
+ let mut dense0 = dense256;
+ for i in 0..256 {
+ assert!(dense0.remove(i));
+ }
+ assert!(!dense0.remove(0));
+ assert!(dense0.is_empty());
+}
+
+#[test]
+fn chunked_bitset() {
+ let mut b0 = ChunkedBitSet::<usize>::new_empty(0);
+ let b0b = b0.clone();
+ assert_eq!(b0, ChunkedBitSet { domain_size: 0, chunks: Box::new([]), marker: PhantomData });
+
+ // There are no valid insert/remove/contains operations on a 0-domain
+ // bitset, but we can test `union`.
+ b0.assert_valid();
+ assert!(!b0.union(&b0b));
+ assert_eq!(b0.chunks(), vec![]);
+ assert_eq!(b0.count(), 0);
+ b0.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b1 = ChunkedBitSet::<usize>::new_empty(1);
+ assert_eq!(
+ b1,
+ ChunkedBitSet { domain_size: 1, chunks: Box::new([Zeros(1)]), marker: PhantomData }
+ );
+
+ b1.assert_valid();
+ assert!(!b1.contains(0));
+ assert_eq!(b1.count(), 0);
+ assert!(b1.insert(0));
+ assert!(b1.contains(0));
+ assert_eq!(b1.count(), 1);
+ assert_eq!(b1.chunks(), [Ones(1)]);
+ assert!(!b1.insert(0));
+ assert!(b1.remove(0));
+ assert!(!b1.contains(0));
+ assert_eq!(b1.count(), 0);
+ assert_eq!(b1.chunks(), [Zeros(1)]);
+ b1.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b100 = ChunkedBitSet::<usize>::new_filled(100);
+ assert_eq!(
+ b100,
+ ChunkedBitSet { domain_size: 100, chunks: Box::new([Ones(100)]), marker: PhantomData }
+ );
+
+ b100.assert_valid();
+ for i in 0..100 {
+ assert!(b100.contains(i));
+ }
+ assert_eq!(b100.count(), 100);
+ assert!(b100.remove(3));
+ assert!(b100.insert(3));
+ assert_eq!(b100.chunks(), vec![Ones(100)]);
+ assert!(
+ b100.remove(20) && b100.remove(30) && b100.remove(40) && b100.remove(99) && b100.insert(30)
+ );
+ assert_eq!(b100.count(), 97);
+ assert!(!b100.contains(20) && b100.contains(30) && !b100.contains(99) && b100.contains(50));
+ assert_eq!(
+ b100.chunks(),
+ vec![Mixed(
+ 100,
+ 97,
+ #[rustfmt::skip]
+ Rc::new([
+ 0b11111111_11111111_11111110_11111111_11111111_11101111_11111111_11111111,
+ 0b00000000_00000000_00000000_00000111_11111111_11111111_11111111_11111111,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ ])
+ )],
+ );
+ b100.assert_valid();
+ let mut num_removed = 0;
+ for i in 0..100 {
+ if b100.remove(i) {
+ num_removed += 1;
+ }
+ }
+ assert_eq!(num_removed, 97);
+ assert_eq!(b100.chunks(), vec![Zeros(100)]);
+ b100.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b2548 = ChunkedBitSet::<usize>::new_empty(2548);
+ assert_eq!(
+ b2548,
+ ChunkedBitSet {
+ domain_size: 2548,
+ chunks: Box::new([Zeros(2048), Zeros(500)]),
+ marker: PhantomData,
+ }
+ );
+
+ b2548.assert_valid();
+ b2548.insert(14);
+ b2548.remove(14);
+ assert_eq!(b2548.chunks(), vec![Zeros(2048), Zeros(500)]);
+ b2548.insert_all();
+ for i in 0..2548 {
+ assert!(b2548.contains(i));
+ }
+ assert_eq!(b2548.count(), 2548);
+ assert_eq!(b2548.chunks(), vec![Ones(2048), Ones(500)]);
+ b2548.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b4096 = ChunkedBitSet::<usize>::new_empty(4096);
+ assert_eq!(
+ b4096,
+ ChunkedBitSet {
+ domain_size: 4096,
+ chunks: Box::new([Zeros(2048), Zeros(2048)]),
+ marker: PhantomData,
+ }
+ );
+
+ b4096.assert_valid();
+ for i in 0..4096 {
+ assert!(!b4096.contains(i));
+ }
+ assert!(b4096.insert(0) && b4096.insert(4095) && !b4096.insert(4095));
+ assert!(
+ b4096.contains(0) && !b4096.contains(2047) && !b4096.contains(2048) && b4096.contains(4095)
+ );
+ assert_eq!(
+ b4096.chunks(),
+ #[rustfmt::skip]
+ vec![
+ Mixed(2048, 1, Rc::new([
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ])),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8000_0000_0000_0000
+ ])),
+ ],
+ );
+ assert_eq!(b4096.count(), 2);
+ b4096.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b10000 = ChunkedBitSet::<usize>::new_empty(10000);
+ assert_eq!(
+ b10000,
+ ChunkedBitSet {
+ domain_size: 10000,
+ chunks: Box::new([Zeros(2048), Zeros(2048), Zeros(2048), Zeros(2048), Zeros(1808),]),
+ marker: PhantomData,
+ }
+ );
+
+ b10000.assert_valid();
+ assert!(b10000.insert(3000) && b10000.insert(5000));
+ assert_eq!(
+ b10000.chunks(),
+ #[rustfmt::skip]
+ vec![
+ Zeros(2048),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100_0000_0000_0000, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ])),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ])),
+ Zeros(2048),
+ Zeros(1808),
+ ],
+ );
+ let mut b10000b = ChunkedBitSet::<usize>::new_empty(10000);
+ b10000b.clone_from(&b10000);
+ assert_eq!(b10000, b10000b);
+ for i in 6000..7000 {
+ b10000b.insert(i);
+ }
+ assert_eq!(b10000b.count(), 1002);
+ b10000b.assert_valid();
+ b10000b.clone_from(&b10000);
+ assert_eq!(b10000b.count(), 2);
+ for i in 2000..8000 {
+ b10000b.insert(i);
+ }
+ b10000.union(&b10000b);
+ assert_eq!(b10000.count(), 6000);
+ b10000.union(&b10000b);
+ assert_eq!(b10000.count(), 6000);
+ b10000.assert_valid();
+ b10000b.assert_valid();
+}
+
+fn with_elements_chunked(elements: &[usize], domain_size: usize) -> ChunkedBitSet<usize> {
+ let mut s = ChunkedBitSet::new_empty(domain_size);
+ for &e in elements {
+ assert!(s.insert(e));
+ }
+ s
+}
+
+fn with_elements_standard(elements: &[usize], domain_size: usize) -> BitSet<usize> {
+ let mut s = BitSet::new_empty(domain_size);
+ for &e in elements {
+ assert!(s.insert(e));
+ }
+ s
+}
+
+#[test]
+fn chunked_bitset_into_bitset_operations() {
+ let a = vec![1, 5, 7, 11, 15, 2000, 3000];
+ let b = vec![3, 4, 11, 3000, 4000];
+ let aub = vec![1, 3, 4, 5, 7, 11, 15, 2000, 3000, 4000];
+ let aib = vec![11, 3000];
+
+ let b = with_elements_chunked(&b, 9876);
+
+ let mut union = with_elements_standard(&a, 9876);
+ assert!(union.union(&b));
+ assert!(!union.union(&b));
+ assert!(union.iter().eq(aub.iter().copied()));
+
+ let mut intersection = with_elements_standard(&a, 9876);
+ assert!(intersection.intersect(&b));
+ assert!(!intersection.intersect(&b));
+ assert!(intersection.iter().eq(aib.iter().copied()));
+}
+
+#[test]
+fn chunked_bitset_iter() {
+ fn check_iter(bit: &ChunkedBitSet<usize>, vec: &Vec<usize>) {
+ // Test collecting via both `.next()` and `.fold()` calls, to make sure both are correct
+ let mut collect_next = Vec::new();
+ let mut bit_iter = bit.iter();
+ while let Some(item) = bit_iter.next() {
+ collect_next.push(item);
+ }
+ assert_eq!(vec, &collect_next);
+
+ let collect_fold = bit.iter().fold(Vec::new(), |mut v, item| {
+ v.push(item);
+ v
+ });
+ assert_eq!(vec, &collect_fold);
+ }
+
+ // Empty
+ let vec: Vec<usize> = Vec::new();
+ let bit = with_elements_chunked(&vec, 9000);
+ check_iter(&bit, &vec);
+
+ // Filled
+ let n = 10000;
+ let vec: Vec<usize> = (0..n).collect();
+ let bit = with_elements_chunked(&vec, n);
+ check_iter(&bit, &vec);
+
+ // Filled with trailing zeros
+ let n = 10000;
+ let vec: Vec<usize> = (0..n).collect();
+ let bit = with_elements_chunked(&vec, 2 * n);
+ check_iter(&bit, &vec);
+
+ // Mixed
+ let n = 12345;
+ let vec: Vec<usize> = vec![0, 1, 2, 2010, 2047, 2099, 6000, 6002, 6004];
+ let bit = with_elements_chunked(&vec, n);
+ check_iter(&bit, &vec);
+}
+
+#[test]
+fn grow() {
+ let mut set: GrowableBitSet<usize> = GrowableBitSet::with_capacity(65);
+ for index in 0..65 {
+ assert!(set.insert(index));
+ assert!(!set.insert(index));
+ }
+ set.ensure(128);
+
+ // Check if the bits set before growing are still set
+ for index in 0..65 {
+ assert!(set.contains(index));
+ }
+
+ // Check if the new bits are all un-set
+ for index in 65..128 {
+ assert!(!set.contains(index));
+ }
+
+ // Check that we can set all new bits without running out of bounds
+ for index in 65..128 {
+ assert!(set.insert(index));
+ assert!(!set.insert(index));
+ }
+}
+
+#[test]
+fn matrix_intersection() {
+ let mut matrix: BitMatrix<usize, usize> = BitMatrix::new(200, 200);
+
+ // (*) Elements reachable from both 2 and 65.
+
+ matrix.insert(2, 3);
+ matrix.insert(2, 6);
+ matrix.insert(2, 10); // (*)
+ matrix.insert(2, 64); // (*)
+ matrix.insert(2, 65);
+ matrix.insert(2, 130);
+ matrix.insert(2, 160); // (*)
+
+ matrix.insert(64, 133);
+
+ matrix.insert(65, 2);
+ matrix.insert(65, 8);
+ matrix.insert(65, 10); // (*)
+ matrix.insert(65, 64); // (*)
+ matrix.insert(65, 68);
+ matrix.insert(65, 133);
+ matrix.insert(65, 160); // (*)
+
+ let intersection = matrix.intersect_rows(2, 64);
+ assert!(intersection.is_empty());
+
+ let intersection = matrix.intersect_rows(2, 65);
+ assert_eq!(intersection, &[10, 64, 160]);
+}
+
+#[test]
+fn matrix_iter() {
+ let mut matrix: BitMatrix<usize, usize> = BitMatrix::new(64, 100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+ matrix.union_rows(3, 5);
+ matrix.insert_all_into_row(6);
+
+ let expected = [99];
+ let mut iter = expected.iter();
+ for i in matrix.iter(2) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(3), expected.len());
+ for i in matrix.iter(3) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [0];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(4), expected.len());
+ for i in matrix.iter(4) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(5), expected.len());
+ for i in matrix.iter(5) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ assert_eq!(matrix.count(6), 100);
+ let mut count = 0;
+ for (idx, i) in matrix.iter(6).enumerate() {
+ assert_eq!(idx, i);
+ count += 1;
+ }
+ assert_eq!(count, 100);
+
+ if let Some(i) = matrix.iter(7).next() {
+ panic!("expected no elements in row, but contains element {:?}", i);
+ }
+}
+
+#[test]
+fn sparse_matrix_iter() {
+ let mut matrix: SparseBitMatrix<usize, usize> = SparseBitMatrix::new(100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+ matrix.union_rows(3, 5);
+
+ let expected = [99];
+ let mut iter = expected.iter();
+ for i in matrix.iter(2) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ for i in matrix.iter(3) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [0];
+ let mut iter = expected.iter();
+ for i in matrix.iter(4) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ for i in matrix.iter(5) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn sparse_matrix_operations() {
+ let mut matrix: SparseBitMatrix<usize, usize> = SparseBitMatrix::new(100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+
+ let mut disjoint: HybridBitSet<usize> = HybridBitSet::new_empty(100);
+ disjoint.insert(33);
+
+ let mut superset = HybridBitSet::new_empty(100);
+ superset.insert(22);
+ superset.insert(75);
+ superset.insert(33);
+
+ let mut subset = HybridBitSet::new_empty(100);
+ subset.insert(22);
+
+ // SparseBitMatrix::remove
+ {
+ let mut matrix = matrix.clone();
+ matrix.remove(3, 22);
+ assert!(!matrix.row(3).unwrap().contains(22));
+ matrix.remove(0, 0);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::clear
+ {
+ let mut matrix = matrix.clone();
+ matrix.clear(3);
+ assert!(!matrix.row(3).unwrap().contains(75));
+ matrix.clear(0);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::intersect_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.intersect_row(3, &superset));
+ assert!(matrix.intersect_row(3, &subset));
+ matrix.intersect_row(0, &disjoint);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::subtract_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.subtract_row(3, &disjoint));
+ assert!(matrix.subtract_row(3, &subset));
+ assert!(matrix.subtract_row(3, &superset));
+ matrix.intersect_row(0, &disjoint);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::union_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.union_row(3, &subset));
+ assert!(matrix.union_row(3, &disjoint));
+ matrix.union_row(0, &disjoint);
+ assert!(matrix.row(0).is_some());
+ }
+}
+
+#[test]
+fn dense_insert_range() {
+ #[track_caller]
+ fn check<R>(domain: usize, range: R)
+ where
+ R: RangeBounds<usize> + Clone + IntoIterator<Item = usize> + std::fmt::Debug,
+ {
+ let mut set = BitSet::new_empty(domain);
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i));
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "{} in {:?}, inserted {:?}", i, set, range);
+ }
+ }
+ check(300, 10..10);
+ check(300, WORD_BITS..WORD_BITS * 2);
+ check(300, WORD_BITS - 1..WORD_BITS * 2);
+ check(300, WORD_BITS - 1..WORD_BITS);
+ check(300, 10..100);
+ check(300, 10..30);
+ check(300, 0..5);
+ check(300, 0..250);
+ check(300, 200..250);
+
+ check(300, 10..=10);
+ check(300, WORD_BITS..=WORD_BITS * 2);
+ check(300, WORD_BITS - 1..=WORD_BITS * 2);
+ check(300, WORD_BITS - 1..=WORD_BITS);
+ check(300, 10..=100);
+ check(300, 10..=30);
+ check(300, 0..=5);
+ check(300, 0..=250);
+ check(300, 200..=250);
+
+ for i in 0..WORD_BITS * 2 {
+ for j in i..WORD_BITS * 2 {
+ check(WORD_BITS * 2, i..j);
+ check(WORD_BITS * 2, i..=j);
+ check(300, i..j);
+ check(300, i..=j);
+ }
+ }
+}
+
+#[test]
+fn dense_last_set_before() {
+ fn easy(set: &BitSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> {
+ let mut last_leq = None;
+ for e in set.iter() {
+ if needle.contains(&e) {
+ last_leq = Some(e);
+ }
+ }
+ last_leq
+ }
+
+ #[track_caller]
+ fn cmp(set: &BitSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) {
+ assert_eq!(
+ set.last_set_in(needle.clone()),
+ easy(set, needle.clone()),
+ "{:?} in {:?}",
+ needle,
+ set
+ );
+ }
+ let mut set = BitSet::new_empty(300);
+ cmp(&set, 50..=50);
+ set.insert(WORD_BITS);
+ cmp(&set, WORD_BITS..=WORD_BITS);
+ set.insert(WORD_BITS - 1);
+ cmp(&set, 0..=WORD_BITS - 1);
+ cmp(&set, 0..=5);
+ cmp(&set, 10..100);
+ set.insert(100);
+ cmp(&set, 100..110);
+ cmp(&set, 99..100);
+ cmp(&set, 99..=100);
+
+ for i in 0..=WORD_BITS * 2 {
+ for j in i..=WORD_BITS * 2 {
+ for k in 0..WORD_BITS * 2 {
+ let mut set = BitSet::new_empty(300);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ set.insert(k);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ }
+ }
+ }
+}
+
+/// Merge dense hybrid set into empty sparse hybrid set.
+#[bench]
+fn union_hybrid_sparse_empty_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set with same indices.
+#[bench]
+fn union_hybrid_sparse_full_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set with indices over the whole domain.
+#[bench]
+fn union_hybrid_sparse_domain_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX * 64);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX * 64);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i * 64));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into empty hybrid set where the domain is very small.
+#[bench]
+fn union_hybrid_sparse_empty_small_domain(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_dense.insert(i));
+ }
+ let pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set where the domain is very small.
+#[bench]
+fn union_hybrid_sparse_full_small_domain(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+#[bench]
+fn bench_insert(b: &mut Bencher) {
+ let mut bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ black_box(bs.insert(black_box(100u32)));
+ });
+}
+
+#[bench]
+fn bench_remove(b: &mut Bencher) {
+ let mut bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ black_box(bs.remove(black_box(100u32)));
+ });
+}
+
+#[bench]
+fn bench_iter(b: &mut Bencher) {
+ let bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ bs.iter().map(|b: usize| black_box(b)).for_each(drop);
+ });
+}
+
+#[bench]
+fn bench_intersect(b: &mut Bencher) {
+ let mut ba: BitSet<u32> = BitSet::new_filled(99999usize);
+ let bb = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ ba.intersect(black_box(&bb));
+ });
+}
diff --git a/compiler/rustc_index/src/interval.rs b/compiler/rustc_index/src/interval.rs
new file mode 100644
index 000000000..3592fb330
--- /dev/null
+++ b/compiler/rustc_index/src/interval.rs
@@ -0,0 +1,305 @@
+use std::iter::Step;
+use std::marker::PhantomData;
+use std::ops::RangeBounds;
+use std::ops::{Bound, Range};
+
+use crate::vec::Idx;
+use crate::vec::IndexVec;
+use smallvec::SmallVec;
+
+#[cfg(test)]
+mod tests;
+
+/// Stores a set of intervals on the indices.
+///
+/// The elements in `map` are sorted and non-adjacent, which means
+/// the second value of the previous element is *greater* than the
+/// first value of the following element.
+#[derive(Debug, Clone)]
+pub struct IntervalSet<I> {
+ // Start, end
+ map: SmallVec<[(u32, u32); 4]>,
+ domain: usize,
+ _data: PhantomData<I>,
+}
+
+#[inline]
+fn inclusive_start<T: Idx>(range: impl RangeBounds<T>) -> u32 {
+ match range.start_bound() {
+ Bound::Included(start) => start.index() as u32,
+ Bound::Excluded(start) => start.index() as u32 + 1,
+ Bound::Unbounded => 0,
+ }
+}
+
+#[inline]
+fn inclusive_end<T: Idx>(domain: usize, range: impl RangeBounds<T>) -> Option<u32> {
+ let end = match range.end_bound() {
+ Bound::Included(end) => end.index() as u32,
+ Bound::Excluded(end) => end.index().checked_sub(1)? as u32,
+ Bound::Unbounded => domain.checked_sub(1)? as u32,
+ };
+ Some(end)
+}
+
+impl<I: Idx> IntervalSet<I> {
+ pub fn new(domain: usize) -> IntervalSet<I> {
+ IntervalSet { map: SmallVec::new(), domain, _data: PhantomData }
+ }
+
+ pub fn clear(&mut self) {
+ self.map.clear();
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = I> + '_
+ where
+ I: Step,
+ {
+ self.iter_intervals().flatten()
+ }
+
+ /// Iterates through intervals stored in the set, in order.
+ pub fn iter_intervals(&self) -> impl Iterator<Item = std::ops::Range<I>> + '_
+ where
+ I: Step,
+ {
+ self.map.iter().map(|&(start, end)| I::new(start as usize)..I::new(end as usize + 1))
+ }
+
+ /// Returns true if we increased the number of elements present.
+ pub fn insert(&mut self, point: I) -> bool {
+ self.insert_range(point..=point)
+ }
+
+ /// Returns true if we increased the number of elements present.
+ pub fn insert_range(&mut self, range: impl RangeBounds<I> + Clone) -> bool {
+ let start = inclusive_start(range.clone());
+ let Some(end) = inclusive_end(self.domain, range) else {
+ // empty range
+ return false;
+ };
+ if start > end {
+ return false;
+ }
+
+ // This condition looks a bit weird, but actually makes sense.
+ //
+ // if r.0 == end + 1, then we're actually adjacent, so we want to
+ // continue to the next range. We're looking here for the first
+ // range which starts *non-adjacently* to our end.
+ let next = self.map.partition_point(|r| r.0 <= end + 1);
+ let result = if let Some(right) = next.checked_sub(1) {
+ let (prev_start, prev_end) = self.map[right];
+ if prev_end + 1 >= start {
+ // If the start for the inserted range is adjacent to the
+ // end of the previous, we can extend the previous range.
+ if start < prev_start {
+ // The first range which ends *non-adjacently* to our start.
+ // And we can ensure that left <= right.
+ let left = self.map.partition_point(|l| l.1 + 1 < start);
+ let min = std::cmp::min(self.map[left].0, start);
+ let max = std::cmp::max(prev_end, end);
+ self.map[right] = (min, max);
+ if left != right {
+ self.map.drain(left..right);
+ }
+ true
+ } else {
+ // We overlap with the previous range, increase it to
+ // include us.
+ //
+ // Make sure we're actually going to *increase* it though --
+ // it may be that end is just inside the previously existing
+ // set.
+ if end > prev_end {
+ self.map[right].1 = end;
+ true
+ } else {
+ false
+ }
+ }
+ } else {
+ // Otherwise, we don't overlap, so just insert
+ self.map.insert(right + 1, (start, end));
+ true
+ }
+ } else {
+ if self.map.is_empty() {
+ // Quite common in practice, and expensive to call memcpy
+ // with length zero.
+ self.map.push((start, end));
+ } else {
+ self.map.insert(next, (start, end));
+ }
+ true
+ };
+ debug_assert!(
+ self.check_invariants(),
+ "wrong intervals after insert {:?}..={:?} to {:?}",
+ start,
+ end,
+ self
+ );
+ result
+ }
+
+ pub fn contains(&self, needle: I) -> bool {
+ let needle = needle.index() as u32;
+ let Some(last) = self.map.partition_point(|r| r.0 <= needle).checked_sub(1) else {
+ // All ranges in the map start after the new range's end
+ return false;
+ };
+ let (_, prev_end) = &self.map[last];
+ needle <= *prev_end
+ }
+
+ pub fn superset(&self, other: &IntervalSet<I>) -> bool
+ where
+ I: Step,
+ {
+ let mut sup_iter = self.iter_intervals();
+ let mut current = None;
+ let contains = |sup: Range<I>, sub: Range<I>, current: &mut Option<Range<I>>| {
+ if sup.end < sub.start {
+ // if `sup.end == sub.start`, the next sup doesn't contain `sub.start`
+ None // continue to the next sup
+ } else if sup.end >= sub.end && sup.start <= sub.start {
+ *current = Some(sup); // save the current sup
+ Some(true)
+ } else {
+ Some(false)
+ }
+ };
+ other.iter_intervals().all(|sub| {
+ current
+ .take()
+ .and_then(|sup| contains(sup, sub.clone(), &mut current))
+ .or_else(|| sup_iter.find_map(|sup| contains(sup, sub.clone(), &mut current)))
+ .unwrap_or(false)
+ })
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.map.is_empty()
+ }
+
+ /// Returns the maximum (last) element present in the set from `range`.
+ pub fn last_set_in(&self, range: impl RangeBounds<I> + Clone) -> Option<I> {
+ let start = inclusive_start(range.clone());
+ let Some(end) = inclusive_end(self.domain, range) else {
+ // empty range
+ return None;
+ };
+ if start > end {
+ return None;
+ }
+ let Some(last) = self.map.partition_point(|r| r.0 <= end).checked_sub(1) else {
+ // All ranges in the map start after the new range's end
+ return None;
+ };
+ let (_, prev_end) = &self.map[last];
+ if start <= *prev_end { Some(I::new(std::cmp::min(*prev_end, end) as usize)) } else { None }
+ }
+
+ pub fn insert_all(&mut self) {
+ self.clear();
+ if let Some(end) = self.domain.checked_sub(1) {
+ self.map.push((0, end.try_into().unwrap()));
+ }
+ debug_assert!(self.check_invariants());
+ }
+
+ pub fn union(&mut self, other: &IntervalSet<I>) -> bool
+ where
+ I: Step,
+ {
+ assert_eq!(self.domain, other.domain);
+ let mut did_insert = false;
+ for range in other.iter_intervals() {
+ did_insert |= self.insert_range(range);
+ }
+ debug_assert!(self.check_invariants());
+ did_insert
+ }
+
+ // Check the intervals are valid, sorted and non-adjacent
+ fn check_invariants(&self) -> bool {
+ let mut current: Option<u32> = None;
+ for (start, end) in &self.map {
+ if start > end || current.map_or(false, |x| x + 1 >= *start) {
+ return false;
+ }
+ current = Some(*end);
+ }
+ current.map_or(true, |x| x < self.domain as u32)
+ }
+}
+
+/// This data structure optimizes for cases where the stored bits in each row
+/// are expected to be highly contiguous (long ranges of 1s or 0s), in contrast
+/// to BitMatrix and SparseBitMatrix which are optimized for
+/// "random"/non-contiguous bits and cheap(er) point queries at the expense of
+/// memory usage.
+#[derive(Clone)]
+pub struct SparseIntervalMatrix<R, C>
+where
+ R: Idx,
+ C: Idx,
+{
+ rows: IndexVec<R, IntervalSet<C>>,
+ column_size: usize,
+}
+
+impl<R: Idx, C: Step + Idx> SparseIntervalMatrix<R, C> {
+ pub fn new(column_size: usize) -> SparseIntervalMatrix<R, C> {
+ SparseIntervalMatrix { rows: IndexVec::new(), column_size }
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ self.rows.indices()
+ }
+
+ pub fn row(&self, row: R) -> Option<&IntervalSet<C>> {
+ self.rows.get(row)
+ }
+
+ fn ensure_row(&mut self, row: R) -> &mut IntervalSet<C> {
+ self.rows.ensure_contains_elem(row, || IntervalSet::new(self.column_size));
+ &mut self.rows[row]
+ }
+
+ pub fn union_row(&mut self, row: R, from: &IntervalSet<C>) -> bool
+ where
+ C: Step,
+ {
+ self.ensure_row(row).union(from)
+ }
+
+ pub fn union_rows(&mut self, read: R, write: R) -> bool
+ where
+ C: Step,
+ {
+ if read == write || self.rows.get(read).is_none() {
+ return false;
+ }
+ self.ensure_row(write);
+ let (read_row, write_row) = self.rows.pick2_mut(read, write);
+ write_row.union(read_row)
+ }
+
+ pub fn insert_all_into_row(&mut self, row: R) {
+ self.ensure_row(row).insert_all();
+ }
+
+ pub fn insert_range(&mut self, row: R, range: impl RangeBounds<C> + Clone) {
+ self.ensure_row(row).insert_range(range);
+ }
+
+ pub fn insert(&mut self, row: R, point: C) -> bool {
+ self.ensure_row(row).insert(point)
+ }
+
+ pub fn contains(&self, row: R, point: C) -> bool {
+ self.row(row).map_or(false, |r| r.contains(point))
+ }
+}
diff --git a/compiler/rustc_index/src/interval/tests.rs b/compiler/rustc_index/src/interval/tests.rs
new file mode 100644
index 000000000..375af60f6
--- /dev/null
+++ b/compiler/rustc_index/src/interval/tests.rs
@@ -0,0 +1,199 @@
+use super::*;
+
+#[test]
+fn insert_collapses() {
+ let mut set = IntervalSet::<u32>::new(10000);
+ set.insert_range(9831..=9837);
+ set.insert_range(43..=9830);
+ assert_eq!(set.iter_intervals().collect::<Vec<_>>(), [43..9838]);
+}
+
+#[test]
+fn contains() {
+ let mut set = IntervalSet::new(300);
+ set.insert(0u32);
+ assert!(set.contains(0));
+ set.insert_range(0..10);
+ assert!(set.contains(9));
+ assert!(!set.contains(10));
+ set.insert_range(10..11);
+ assert!(set.contains(10));
+}
+
+#[test]
+fn insert() {
+ for i in 0..30usize {
+ let mut set = IntervalSet::new(300);
+ for j in i..30usize {
+ set.insert(j);
+ for k in i..j {
+ assert!(set.contains(k));
+ }
+ }
+ }
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..1u32);
+ assert!(set.contains(0), "{:?}", set.map);
+ assert!(!set.contains(1));
+ set.insert_range(1..1);
+ assert!(set.contains(0));
+ assert!(!set.contains(1));
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(4..5u32);
+ set.insert_range(5..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [4, 5, 6, 7, 8, 9]);
+ set.insert_range(3..7);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(3..5);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(0..3);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(0..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(5..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(5..13);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
+}
+
+#[test]
+fn insert_range() {
+ #[track_caller]
+ fn check<R>(range: R)
+ where
+ R: RangeBounds<usize> + Clone + IntoIterator<Item = usize> + std::fmt::Debug,
+ {
+ let mut set = IntervalSet::new(300);
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i));
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "A: {} in {:?}, inserted {:?}", i, set, range);
+ }
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i), "{} in {:?}", i, set);
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "B: {} in {:?}, inserted {:?}", i, set, range);
+ }
+ }
+ check(10..10);
+ check(10..100);
+ check(10..30);
+ check(0..5);
+ check(0..250);
+ check(200..250);
+
+ check(10..=10);
+ check(10..=100);
+ check(10..=30);
+ check(0..=5);
+ check(0..=250);
+ check(200..=250);
+
+ for i in 0..30 {
+ for j in i..30 {
+ check(i..j);
+ check(i..=j);
+ }
+ }
+}
+
+#[test]
+fn insert_range_dual() {
+ let mut set = IntervalSet::<u32>::new(300);
+ set.insert_range(0..3);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2]);
+ set.insert_range(5..7);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 5, 6]);
+ set.insert_range(3..4);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 5, 6]);
+ set.insert_range(3..5);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn last_set_before_adjacent() {
+ let mut set = IntervalSet::<u32>::new(300);
+ set.insert_range(0..3);
+ set.insert_range(3..5);
+ assert_eq!(set.last_set_in(0..3), Some(2));
+ assert_eq!(set.last_set_in(0..5), Some(4));
+ assert_eq!(set.last_set_in(3..5), Some(4));
+ set.insert_range(2..5);
+ assert_eq!(set.last_set_in(0..3), Some(2));
+ assert_eq!(set.last_set_in(0..5), Some(4));
+ assert_eq!(set.last_set_in(3..5), Some(4));
+}
+
+#[test]
+fn last_set_in() {
+ fn easy(set: &IntervalSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> {
+ let mut last_leq = None;
+ for e in set.iter() {
+ if needle.contains(&e) {
+ last_leq = Some(e);
+ }
+ }
+ last_leq
+ }
+
+ #[track_caller]
+ fn cmp(set: &IntervalSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) {
+ assert_eq!(
+ set.last_set_in(needle.clone()),
+ easy(set, needle.clone()),
+ "{:?} in {:?}",
+ needle,
+ set
+ );
+ }
+ let mut set = IntervalSet::new(300);
+ cmp(&set, 50..=50);
+ set.insert(64);
+ cmp(&set, 64..=64);
+ set.insert(64 - 1);
+ cmp(&set, 0..=64 - 1);
+ cmp(&set, 0..=5);
+ cmp(&set, 10..100);
+ set.insert(100);
+ cmp(&set, 100..110);
+ cmp(&set, 99..100);
+ cmp(&set, 99..=100);
+
+ for i in 0..=30 {
+ for j in i..=30 {
+ for k in 0..30 {
+ let mut set = IntervalSet::new(100);
+ cmp(&set, ..j);
+ cmp(&set, i..);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ set.insert(k);
+ cmp(&set, ..j);
+ cmp(&set, i..);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
new file mode 100644
index 000000000..33c3c536f
--- /dev/null
+++ b/compiler/rustc_index/src/lib.rs
@@ -0,0 +1,23 @@
+#![feature(allow_internal_unstable)]
+#![feature(bench_black_box)]
+#![feature(extend_one)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(new_uninit)]
+#![feature(step_trait)]
+#![feature(stmt_expr_attributes)]
+#![feature(test)]
+
+pub mod bit_set;
+pub mod interval;
+pub mod vec;
+
+pub use rustc_macros::newtype_index;
+
+/// Type size assertion. The first argument is a type and the second argument is its expected size.
+#[macro_export]
+macro_rules! static_assert_size {
+ ($ty:ty, $size:expr) => {
+ const _: [(); $size] = [(); ::std::mem::size_of::<$ty>()];
+ };
+}
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
new file mode 100644
index 000000000..30ff36421
--- /dev/null
+++ b/compiler/rustc_index/src/vec.rs
@@ -0,0 +1,409 @@
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use std::fmt;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::iter::FromIterator;
+use std::marker::PhantomData;
+use std::ops::{Index, IndexMut, RangeBounds};
+use std::slice;
+use std::vec;
+
+/// Represents some newtyped `usize` wrapper.
+///
+/// Purpose: avoid mixing indexes for different bitvector domains.
+pub trait Idx: Copy + 'static + Eq + PartialEq + Debug + Hash {
+ fn new(idx: usize) -> Self;
+
+ fn index(self) -> usize;
+
+ fn increment_by(&mut self, amount: usize) {
+ *self = self.plus(amount);
+ }
+
+ fn plus(self, amount: usize) -> Self {
+ Self::new(self.index() + amount)
+ }
+}
+
+impl Idx for usize {
+ #[inline]
+ fn new(idx: usize) -> Self {
+ idx
+ }
+ #[inline]
+ fn index(self) -> usize {
+ self
+ }
+}
+
+impl Idx for u32 {
+ #[inline]
+ fn new(idx: usize) -> Self {
+ assert!(idx <= u32::MAX as usize);
+ idx as u32
+ }
+ #[inline]
+ fn index(self) -> usize {
+ self as usize
+ }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct IndexVec<I: Idx, T> {
+ pub raw: Vec<T>,
+ _marker: PhantomData<fn(&I)>,
+}
+
+// Whether `IndexVec` is `Send` depends only on the data,
+// not the phantom data.
+unsafe impl<I: Idx, T> Send for IndexVec<I, T> where T: Send {}
+
+impl<S: Encoder, I: Idx, T: Encodable<S>> Encodable<S> for IndexVec<I, T> {
+ fn encode(&self, s: &mut S) {
+ Encodable::encode(&self.raw, s);
+ }
+}
+
+impl<D: Decoder, I: Idx, T: Decodable<D>> Decodable<D> for IndexVec<I, T> {
+ fn decode(d: &mut D) -> Self {
+ IndexVec { raw: Decodable::decode(d), _marker: PhantomData }
+ }
+}
+
+impl<I: Idx, T: fmt::Debug> fmt::Debug for IndexVec<I, T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.raw, fmt)
+ }
+}
+
+impl<I: Idx, T> IndexVec<I, T> {
+ #[inline]
+ pub fn new() -> Self {
+ IndexVec { raw: Vec::new(), _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_raw(raw: Vec<T>) -> Self {
+ IndexVec { raw, _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ IndexVec { raw: Vec::with_capacity(capacity), _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_elem<S>(elem: T, universe: &IndexVec<I, S>) -> Self
+ where
+ T: Clone,
+ {
+ IndexVec { raw: vec![elem; universe.len()], _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_elem_n(elem: T, n: usize) -> Self
+ where
+ T: Clone,
+ {
+ IndexVec { raw: vec![elem; n], _marker: PhantomData }
+ }
+
+ /// Create an `IndexVec` with `n` elements, where the value of each
+ /// element is the result of `func(i)`. (The underlying vector will
+ /// be allocated only once, with a capacity of at least `n`.)
+ #[inline]
+ pub fn from_fn_n(func: impl FnMut(I) -> T, n: usize) -> Self {
+ let indices = (0..n).map(I::new);
+ Self::from_raw(indices.map(func).collect())
+ }
+
+ #[inline]
+ pub fn push(&mut self, d: T) -> I {
+ let idx = I::new(self.len());
+ self.raw.push(d);
+ idx
+ }
+
+ #[inline]
+ pub fn pop(&mut self) -> Option<T> {
+ self.raw.pop()
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.raw.len()
+ }
+
+ /// Gives the next index that will be assigned when `push` is
+ /// called.
+ #[inline]
+ pub fn next_index(&self) -> I {
+ I::new(self.len())
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.raw.is_empty()
+ }
+
+ #[inline]
+ pub fn into_iter(self) -> vec::IntoIter<T> {
+ self.raw.into_iter()
+ }
+
+ #[inline]
+ pub fn into_iter_enumerated(
+ self,
+ ) -> impl DoubleEndedIterator<Item = (I, T)> + ExactSizeIterator {
+ self.raw.into_iter().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn iter(&self) -> slice::Iter<'_, T> {
+ self.raw.iter()
+ }
+
+ #[inline]
+ pub fn iter_enumerated(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = (I, &T)> + ExactSizeIterator + '_ {
+ self.raw.iter().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn indices(&self) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + 'static {
+ (0..self.len()).map(|n| I::new(n))
+ }
+
+ #[inline]
+ pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
+ self.raw.iter_mut()
+ }
+
+ #[inline]
+ pub fn iter_enumerated_mut(
+ &mut self,
+ ) -> impl DoubleEndedIterator<Item = (I, &mut T)> + ExactSizeIterator + '_ {
+ self.raw.iter_mut().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn drain<'a, R: RangeBounds<usize>>(
+ &'a mut self,
+ range: R,
+ ) -> impl Iterator<Item = T> + 'a {
+ self.raw.drain(range)
+ }
+
+ #[inline]
+ pub fn drain_enumerated<'a, R: RangeBounds<usize>>(
+ &'a mut self,
+ range: R,
+ ) -> impl Iterator<Item = (I, T)> + 'a {
+ self.raw.drain(range).enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn last(&self) -> Option<I> {
+ self.len().checked_sub(1).map(I::new)
+ }
+
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.raw.shrink_to_fit()
+ }
+
+ #[inline]
+ pub fn swap(&mut self, a: I, b: I) {
+ self.raw.swap(a.index(), b.index())
+ }
+
+ #[inline]
+ pub fn truncate(&mut self, a: usize) {
+ self.raw.truncate(a)
+ }
+
+ #[inline]
+ pub fn get(&self, index: I) -> Option<&T> {
+ self.raw.get(index.index())
+ }
+
+ #[inline]
+ pub fn get_mut(&mut self, index: I) -> Option<&mut T> {
+ self.raw.get_mut(index.index())
+ }
+
+ /// Returns mutable references to two distinct elements, `a` and `b`.
+ ///
+ /// Panics if `a == b`.
+ #[inline]
+ pub fn pick2_mut(&mut self, a: I, b: I) -> (&mut T, &mut T) {
+ let (ai, bi) = (a.index(), b.index());
+ assert!(ai != bi);
+
+ if ai < bi {
+ let (c1, c2) = self.raw.split_at_mut(bi);
+ (&mut c1[ai], &mut c2[0])
+ } else {
+ let (c2, c1) = self.pick2_mut(b, a);
+ (c1, c2)
+ }
+ }
+
+ /// Returns mutable references to three distinct elements.
+ ///
+ /// Panics if the elements are not distinct.
+ #[inline]
+ pub fn pick3_mut(&mut self, a: I, b: I, c: I) -> (&mut T, &mut T, &mut T) {
+ let (ai, bi, ci) = (a.index(), b.index(), c.index());
+ assert!(ai != bi && bi != ci && ci != ai);
+ let len = self.raw.len();
+ assert!(ai < len && bi < len && ci < len);
+ let ptr = self.raw.as_mut_ptr();
+ unsafe { (&mut *ptr.add(ai), &mut *ptr.add(bi), &mut *ptr.add(ci)) }
+ }
+
+ pub fn convert_index_type<Ix: Idx>(self) -> IndexVec<Ix, T> {
+ IndexVec { raw: self.raw, _marker: PhantomData }
+ }
+
+ /// Grows the index vector so that it contains an entry for
+ /// `elem`; if that is already true, then has no
+ /// effect. Otherwise, inserts new values as needed by invoking
+ /// `fill_value`.
+ #[inline]
+ pub fn ensure_contains_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) {
+ let min_new_len = elem.index() + 1;
+ if self.len() < min_new_len {
+ self.raw.resize_with(min_new_len, fill_value);
+ }
+ }
+
+ #[inline]
+ pub fn resize_to_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) {
+ let min_new_len = elem.index() + 1;
+ self.raw.resize_with(min_new_len, fill_value);
+ }
+}
+
+/// `IndexVec` is often used as a map, so it provides some map-like APIs.
+impl<I: Idx, T> IndexVec<I, Option<T>> {
+ #[inline]
+ pub fn insert(&mut self, index: I, value: T) -> Option<T> {
+ self.ensure_contains_elem(index, || None);
+ self[index].replace(value)
+ }
+
+ #[inline]
+ pub fn get_or_insert_with(&mut self, index: I, value: impl FnOnce() -> T) -> &mut T {
+ self.ensure_contains_elem(index, || None);
+ self[index].get_or_insert_with(value)
+ }
+
+ #[inline]
+ pub fn remove(&mut self, index: I) -> Option<T> {
+ self.ensure_contains_elem(index, || None);
+ self[index].take()
+ }
+}
+
+impl<I: Idx, T: Clone> IndexVec<I, T> {
+ #[inline]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ self.raw.resize(new_len, value)
+ }
+}
+
+impl<I: Idx, T: Ord> IndexVec<I, T> {
+ #[inline]
+ pub fn binary_search(&self, value: &T) -> Result<I, I> {
+ match self.raw.binary_search(value) {
+ Ok(i) => Ok(Idx::new(i)),
+ Err(i) => Err(Idx::new(i)),
+ }
+ }
+}
+
+impl<I: Idx, T> Index<I> for IndexVec<I, T> {
+ type Output = T;
+
+ #[inline]
+ fn index(&self, index: I) -> &T {
+ &self.raw[index.index()]
+ }
+}
+
+impl<I: Idx, T> IndexMut<I> for IndexVec<I, T> {
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut T {
+ &mut self.raw[index.index()]
+ }
+}
+
+impl<I: Idx, T> Default for IndexVec<I, T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<I: Idx, T> Extend<T> for IndexVec<I, T> {
+ #[inline]
+ fn extend<J: IntoIterator<Item = T>>(&mut self, iter: J) {
+ self.raw.extend(iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.raw.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.raw.reserve(additional);
+ }
+}
+
+impl<I: Idx, T> FromIterator<T> for IndexVec<I, T> {
+ #[inline]
+ fn from_iter<J>(iter: J) -> Self
+ where
+ J: IntoIterator<Item = T>,
+ {
+ IndexVec { raw: FromIterator::from_iter(iter), _marker: PhantomData }
+ }
+}
+
+impl<I: Idx, T> IntoIterator for IndexVec<I, T> {
+ type Item = T;
+ type IntoIter = vec::IntoIter<T>;
+
+ #[inline]
+ fn into_iter(self) -> vec::IntoIter<T> {
+ self.raw.into_iter()
+ }
+}
+
+impl<'a, I: Idx, T> IntoIterator for &'a IndexVec<I, T> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ #[inline]
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.raw.iter()
+ }
+}
+
+impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec<I, T> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ #[inline]
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.raw.iter_mut()
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_index/src/vec/tests.rs b/compiler/rustc_index/src/vec/tests.rs
new file mode 100644
index 000000000..915d2e8bc
--- /dev/null
+++ b/compiler/rustc_index/src/vec/tests.rs
@@ -0,0 +1,55 @@
+#![allow(dead_code)]
+
+// Allows the macro invocation below to work
+use crate as rustc_index;
+
+rustc_macros::newtype_index!(struct MyIdx { MAX = 0xFFFF_FFFA });
+
+#[test]
+fn index_size_is_optimized() {
+ use std::mem::size_of;
+
+ assert_eq!(size_of::<MyIdx>(), 4);
+ // Uses 0xFFFF_FFFB
+ assert_eq!(size_of::<Option<MyIdx>>(), 4);
+ // Uses 0xFFFF_FFFC
+ assert_eq!(size_of::<Option<Option<MyIdx>>>(), 4);
+ // Uses 0xFFFF_FFFD
+ assert_eq!(size_of::<Option<Option<Option<MyIdx>>>>(), 4);
+ // Uses 0xFFFF_FFFE
+ assert_eq!(size_of::<Option<Option<Option<Option<MyIdx>>>>>(), 4);
+ // Uses 0xFFFF_FFFF
+ assert_eq!(size_of::<Option<Option<Option<Option<Option<MyIdx>>>>>>(), 4);
+ // Uses a tag
+ assert_eq!(size_of::<Option<Option<Option<Option<Option<Option<MyIdx>>>>>>>(), 8);
+}
+
+#[test]
+fn range_iterator_iterates_forwards() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(
+ range.collect::<Vec<_>>(),
+ [MyIdx::from_u32(1), MyIdx::from_u32(2), MyIdx::from_u32(3)]
+ );
+}
+
+#[test]
+fn range_iterator_iterates_backwards() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(
+ range.rev().collect::<Vec<_>>(),
+ [MyIdx::from_u32(3), MyIdx::from_u32(2), MyIdx::from_u32(1)]
+ );
+}
+
+#[test]
+fn range_count_is_correct() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(range.count(), 3);
+}
+
+#[test]
+fn range_size_hint_is_correct() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(range.size_hint(), (3, Some(3)));
+}