summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs')
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs330
1 files changed, 250 insertions, 80 deletions
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
index 82e9a961a..dcb56a175 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -1,3 +1,6 @@
+#[cfg(test)]
+mod tests;
+
use std::hash;
use std::iter;
use std::ops::Range;
@@ -10,20 +13,185 @@ type Block = u64;
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
/// is initialized. If it is `false` the byte is uninitialized.
-// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
-// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
+/// The actual bits are only materialized when needed, and we try to keep this data lazy as long as
+/// possible. Currently, if all the blocks have the same value, then the mask represents either a
+/// fully initialized or fully uninitialized const allocation, so we can only store that single
+/// value.
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
pub struct InitMask {
- blocks: Vec<Block>,
+ blocks: InitMaskBlocks,
len: Size,
}
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+enum InitMaskBlocks {
+ Lazy {
+ /// Whether the lazy init mask is fully initialized or uninitialized.
+ state: bool,
+ },
+ Materialized(InitMaskMaterialized),
+}
+
+impl InitMask {
+ pub fn new(size: Size, state: bool) -> Self {
+ // Blocks start lazily allocated, until we have to materialize them.
+ let blocks = InitMaskBlocks::Lazy { state };
+ InitMask { len: size, blocks }
+ }
+
+ /// Checks whether the `range` is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+ /// indexes for the first contiguous span of the uninitialized access.
+ #[inline]
+ pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
+ let end = range.end();
+ if end > self.len {
+ return Err(AllocRange::from(self.len..end));
+ }
+
+ match self.blocks {
+ InitMaskBlocks::Lazy { state } => {
+ // Lazily allocated blocks represent the full mask, and cover the requested range by
+ // definition.
+ if state { Ok(()) } else { Err(range) }
+ }
+ InitMaskBlocks::Materialized(ref blocks) => {
+ blocks.is_range_initialized(range.start, end)
+ }
+ }
+ }
+
+ /// Sets a specified range to a value. If the range is out-of-bounds, the mask will grow to
+ /// accommodate it entirely.
+ pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
+ let start = range.start;
+ let end = range.end();
+
+ let is_full_overwrite = start == Size::ZERO && end >= self.len;
+
+ // Optimize the cases of a full init/uninit state, while handling growth if needed.
+ match self.blocks {
+ InitMaskBlocks::Lazy { ref mut state } if is_full_overwrite => {
+ // This is fully overwriting the mask, and we'll still have a single initialization
+ // state: the blocks can stay lazy.
+ *state = new_state;
+ self.len = end;
+ }
+ InitMaskBlocks::Materialized(_) if is_full_overwrite => {
+ // This is also fully overwriting materialized blocks with a single initialization
+ // state: we'll have no need for these blocks anymore and can make them lazy.
+ self.blocks = InitMaskBlocks::Lazy { state: new_state };
+ self.len = end;
+ }
+ InitMaskBlocks::Lazy { state } if state == new_state => {
+ // Here we're partially overwriting the mask but the initialization state doesn't
+ // change: the blocks can stay lazy.
+ if end > self.len {
+ self.len = end;
+ }
+ }
+ _ => {
+ // Otherwise, we have a partial overwrite that can result in a mix of initialization
+ // states, so we'll need materialized blocks.
+ let len = self.len;
+ let blocks = self.materialize_blocks();
+
+ // There are 3 cases of interest here, if we have:
+ //
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // 1) the range to set can be in-bounds:
+ //
+ // xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // Here, we'll simply set the single `start` to `end` range.
+ //
+ // 2) the range to set can be partially out-of-bounds:
+ //
+ // xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // We have 2 subranges to handle:
+ // - we'll set the existing `start` to `len` range.
+ // - we'll grow and set the `len` to `end` range.
+ //
+ // 3) the range to set can be fully out-of-bounds:
+ //
+ // ---xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // Since we're growing the mask to a single `new_state` value, we consider the gap
+ // from `len` to `start` to be part of the range, and have a single subrange to
+ // handle: we'll grow and set the `len` to `end` range.
+ //
+ // Note that we have to materialize, set blocks, and grow the mask. We could
+ // therefore slightly optimize things in situations where these writes overlap.
+ // However, as of writing this, growing the mask doesn't happen in practice yet, so
+ // we don't do this micro-optimization.
+
+ if end <= len {
+ // Handle case 1.
+ blocks.set_range_inbounds(start, end, new_state);
+ } else {
+ if start < len {
+ // Handle the first subrange of case 2.
+ blocks.set_range_inbounds(start, len, new_state);
+ }
+
+ // Handle the second subrange of case 2, and case 3.
+ blocks.grow(len, end - len, new_state); // `Size` operation
+ self.len = end;
+ }
+ }
+ }
+ }
+
+ /// Materializes this mask's blocks when the mask is lazy.
+ #[inline]
+ fn materialize_blocks(&mut self) -> &mut InitMaskMaterialized {
+ if let InitMaskBlocks::Lazy { state } = self.blocks {
+ self.blocks = InitMaskBlocks::Materialized(InitMaskMaterialized::new(self.len, state));
+ }
+
+ let InitMaskBlocks::Materialized(ref mut blocks) = self.blocks else {
+ bug!("initmask blocks must be materialized here")
+ };
+ blocks
+ }
+
+ /// Returns the initialization state at the specified in-bounds index.
+ #[inline]
+ pub fn get(&self, idx: Size) -> bool {
+ match self.blocks {
+ InitMaskBlocks::Lazy { state } => state,
+ InitMaskBlocks::Materialized(ref blocks) => blocks.get(idx),
+ }
+ }
+}
+
+/// The actual materialized blocks of the bitmask, when we can't keep the `InitMask` lazy.
+// Note: for performance reasons when interning, some of the fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, HashStable)]
+struct InitMaskMaterialized {
+ blocks: Vec<Block>,
+}
+
// Const allocations are only hashed for interning. However, they can be large, making the hashing
// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
// big buffers like the allocation's init mask. We can partially hash some fields when they're
// large.
-impl hash::Hash for InitMask {
+impl hash::Hash for InitMaskMaterialized {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
@@ -41,18 +209,15 @@ impl hash::Hash for InitMask {
} else {
self.blocks.hash(state);
}
-
- // Hash the other fields as usual.
- self.len.hash(state);
}
}
-impl InitMask {
+impl InitMaskMaterialized {
pub const BLOCK_SIZE: u64 = 64;
- pub fn new(size: Size, state: bool) -> Self {
- let mut m = InitMask { blocks: vec![], len: Size::ZERO };
- m.grow(size, state);
+ fn new(size: Size, state: bool) -> Self {
+ let mut m = InitMaskMaterialized { blocks: vec![] };
+ m.grow(Size::ZERO, size, state);
m
}
@@ -62,8 +227,8 @@ impl InitMask {
// Each bit in a `Block` represents the initialization state of one byte of an allocation,
// so we use `.bytes()` here.
let bits = bits.bytes();
- let a = bits / InitMask::BLOCK_SIZE;
- let b = bits % InitMask::BLOCK_SIZE;
+ let a = bits / Self::BLOCK_SIZE;
+ let b = bits % Self::BLOCK_SIZE;
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
}
@@ -71,7 +236,7 @@ impl InitMask {
fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
let block = block.try_into().ok().unwrap();
let bit = bit.try_into().ok().unwrap();
- Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+ Size::from_bytes(block * Self::BLOCK_SIZE + bit)
}
/// Checks whether the `range` is entirely initialized.
@@ -79,13 +244,8 @@ impl InitMask {
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
/// indexes for the first contiguous span of the uninitialized access.
#[inline]
- pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
- let end = range.end();
- if end > self.len {
- return Err(AllocRange::from(self.len..end));
- }
-
- let uninit_start = self.find_bit(range.start, end, false);
+ fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
+ let uninit_start = self.find_bit(start, end, false);
match uninit_start {
Some(uninit_start) => {
@@ -96,81 +256,80 @@ impl InitMask {
}
}
- pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
- let end = range.end();
- let len = self.len;
- if end > len {
- self.grow(end - len, new_state);
- }
- self.set_range_inbounds(range.start, end, new_state);
- }
-
fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
- let (blocka, bita) = Self::bit_index(start);
- let (blockb, bitb) = Self::bit_index(end);
- if blocka == blockb {
- // First set all bits except the first `bita`,
- // then unset the last `64 - bitb` bits.
- let range = if bitb == 0 {
- u64::MAX << bita
+ let (block_a, bit_a) = Self::bit_index(start);
+ let (block_b, bit_b) = Self::bit_index(end);
+ if block_a == block_b {
+ // First set all bits except the first `bit_a`,
+ // then unset the last `64 - bit_b` bits.
+ let range = if bit_b == 0 {
+ u64::MAX << bit_a
} else {
- (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+ (u64::MAX << bit_a) & (u64::MAX >> (64 - bit_b))
};
if new_state {
- self.blocks[blocka] |= range;
+ self.blocks[block_a] |= range;
} else {
- self.blocks[blocka] &= !range;
+ self.blocks[block_a] &= !range;
}
return;
}
// across block boundaries
if new_state {
- // Set `bita..64` to `1`.
- self.blocks[blocka] |= u64::MAX << bita;
- // Set `0..bitb` to `1`.
- if bitb != 0 {
- self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+ // Set `bit_a..64` to `1`.
+ self.blocks[block_a] |= u64::MAX << bit_a;
+ // Set `0..bit_b` to `1`.
+ if bit_b != 0 {
+ self.blocks[block_b] |= u64::MAX >> (64 - bit_b);
}
// Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
+ for block in (block_a + 1)..block_b {
self.blocks[block] = u64::MAX;
}
} else {
- // Set `bita..64` to `0`.
- self.blocks[blocka] &= !(u64::MAX << bita);
- // Set `0..bitb` to `0`.
- if bitb != 0 {
- self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+ // Set `bit_a..64` to `0`.
+ self.blocks[block_a] &= !(u64::MAX << bit_a);
+ // Set `0..bit_b` to `0`.
+ if bit_b != 0 {
+ self.blocks[block_b] &= !(u64::MAX >> (64 - bit_b));
}
// Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
+ for block in (block_a + 1)..block_b {
self.blocks[block] = 0;
}
}
}
#[inline]
- pub fn get(&self, i: Size) -> bool {
+ fn get(&self, i: Size) -> bool {
let (block, bit) = Self::bit_index(i);
(self.blocks[block] & (1 << bit)) != 0
}
- fn grow(&mut self, amount: Size, new_state: bool) {
+ fn grow(&mut self, len: Size, amount: Size, new_state: bool) {
if amount.bytes() == 0 {
return;
}
let unused_trailing_bits =
- u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - len.bytes();
+
+ // If there's not enough capacity in the currently allocated blocks, allocate some more.
if amount.bytes() > unused_trailing_bits {
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
- self.blocks.extend(
- // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
- iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
- );
+
+ // We allocate the blocks to the correct value for the requested init state, so we won't
+ // have to manually set them with another write.
+ let block = if new_state { u64::MAX } else { 0 };
+ self.blocks
+ .extend(iter::repeat(block).take(usize::try_from(additional_blocks).unwrap()));
+ }
+
+ // New blocks have already been set here, so we only need to set the unused trailing bits,
+ // if any.
+ if unused_trailing_bits > 0 {
+ let in_bounds_tail = Size::from_bytes(unused_trailing_bits);
+ self.set_range_inbounds(len, len + in_bounds_tail, new_state); // `Size` operation
}
- let start = self.len;
- self.len += amount;
- self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
}
/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
@@ -188,7 +347,7 @@ impl InitMask {
/// ```
/// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
fn find_bit_fast(
- init_mask: &InitMask,
+ init_mask: &InitMaskMaterialized,
start: Size,
end: Size,
is_init: bool,
@@ -223,7 +382,7 @@ impl InitMask {
None
} else {
let bit = bits.trailing_zeros();
- Some(InitMask::size_from_bit_index(block, bit))
+ Some(InitMaskMaterialized::size_from_bit_index(block, bit))
}
}
@@ -253,9 +412,9 @@ impl InitMask {
// This provides the desired behavior of searching blocks 0 and 1 for (a),
// and searching only block 0 for (b).
// There is no concern of overflows since we checked for `start >= end` above.
- let (start_block, start_bit) = InitMask::bit_index(start);
+ let (start_block, start_bit) = InitMaskMaterialized::bit_index(start);
let end_inclusive = Size::from_bytes(end.bytes() - 1);
- let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+ let (end_block_inclusive, _) = InitMaskMaterialized::bit_index(end_inclusive);
// Handle first block: need to skip `start_bit` bits.
//
@@ -340,7 +499,7 @@ impl InitMask {
#[cfg_attr(not(debug_assertions), allow(dead_code))]
fn find_bit_slow(
- init_mask: &InitMask,
+ init_mask: &InitMaskMaterialized,
start: Size,
end: Size,
is_init: bool,
@@ -436,10 +595,19 @@ impl<'a> Iterator for InitChunkIter<'a> {
return None;
}
- let end_of_chunk =
- self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ let end_of_chunk = match self.init_mask.blocks {
+ InitMaskBlocks::Lazy { .. } => {
+ // If we're iterating over the chunks of lazy blocks, we just emit a single
+ // full-size chunk.
+ self.end
+ }
+ InitMaskBlocks::Materialized(ref blocks) => {
+ let end_of_chunk =
+ blocks.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ end_of_chunk
+ }
+ };
let range = self.start..end_of_chunk;
-
let ret =
Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
@@ -504,17 +672,19 @@ impl InitMask {
/// Applies multiple instances of the run-length encoding to the initialization mask.
pub fn apply_copy(&mut self, defined: InitCopy, range: AllocRange, repeat: u64) {
- // An optimization where we can just overwrite an entire range of initialization
- // bits if they are going to be uniformly `1` or `0`.
+ // An optimization where we can just overwrite an entire range of initialization bits if
+ // they are going to be uniformly `1` or `0`. If this happens to be a full-range overwrite,
+ // we won't need materialized blocks either.
if defined.ranges.len() <= 1 {
- self.set_range_inbounds(
- range.start,
- range.start + range.size * repeat, // `Size` operations
- defined.initial,
- );
+ let start = range.start;
+ let end = range.start + range.size * repeat; // `Size` operations
+ self.set_range(AllocRange::from(start..end), defined.initial);
return;
}
+ // We're about to do one or more partial writes, so we ensure the blocks are materialized.
+ let blocks = self.materialize_blocks();
+
for mut j in 0..repeat {
j *= range.size.bytes();
j += range.start.bytes();
@@ -522,7 +692,7 @@ impl InitMask {
for range in &defined.ranges {
let old_j = j;
j += range;
- self.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
+ blocks.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
cur = !cur;
}
}