diff options
Diffstat (limited to 'compiler/rustc_middle/src/mir/interpret')
9 files changed, 991 insertions, 822 deletions
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 37ec04b07..221105ac4 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -1,16 +1,20 @@ //! The virtual memory representation of the MIR interpreter. +mod init_mask; +mod provenance_map; +#[cfg(test)] +mod tests; + use std::borrow::Cow; -use std::convert::{TryFrom, TryInto}; use std::fmt; use std::hash; -use std::iter; -use std::ops::{Deref, Range}; +use std::ops::Range; use std::ptr; +use either::{Left, Right}; + use rustc_ast::Mutability; use rustc_data_structures::intern::Interned; -use rustc_data_structures::sorted_map::SortedMap; use rustc_span::DUMMY_SP; use rustc_target::abi::{Align, HasDataLayout, Size}; @@ -20,6 +24,10 @@ use super::{ UnsupportedOpInfo, }; use crate::ty; +use init_mask::*; +use provenance_map::*; + +pub use init_mask::{InitChunk, InitChunkIter}; /// This type represents an Allocation in the Miri/CTFE core engine. /// @@ -28,9 +36,9 @@ use crate::ty; /// module provides higher-level access. // Note: for performance reasons when interning, some of the `Allocation` fields can be partially // hashed. (see the `Hash` impl below for more details), so the impl is not derived. -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] +#[derive(Clone, Eq, PartialEq, TyEncodable, TyDecodable)] #[derive(HashStable)] -pub struct Allocation<Prov = AllocId, Extra = ()> { +pub struct Allocation<Prov: Provenance = AllocId, Extra = ()> { /// The actual bytes of the allocation. /// Note that the bytes of a pointer represent the offset of the pointer. bytes: Box<[u8]>, @@ -95,27 +103,25 @@ impl hash::Hash for Allocation { /// Interned types generally have an `Outer` type and an `Inner` type, where /// `Outer` is a newtype around `Interned<Inner>`, and all the operations are /// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an -/// outer type and `TyS` is its inner type. +/// outer type and `TyKind` is its inner type. /// /// Here things are different because only const allocations are interned. This /// means that both the inner type (`Allocation`) and the outer type /// (`ConstAllocation`) are used quite a bit. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable)] #[rustc_pass_by_value] -pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>( - pub Interned<'tcx, Allocation<Prov, Extra>>, -); +pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>); impl<'tcx> fmt::Debug for ConstAllocation<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // This matches how `Allocation` is printed. We print it like this to - // avoid having to update expected output in a lot of tests. - write!(f, "{:?}", self.inner()) + // The debug representation of this is very verbose and basically useless, + // so don't print it. + write!(f, "ConstAllocation {{ .. }}") } } -impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> { - pub fn inner(self) -> &'tcx Allocation<Prov, Extra> { +impl<'tcx> ConstAllocation<'tcx> { + pub fn inner(self) -> &'tcx Allocation { self.0.0 } } @@ -183,12 +189,21 @@ pub fn alloc_range(start: Size, size: Size) -> AllocRange { AllocRange { start, size } } -impl AllocRange { +impl From<Range<Size>> for AllocRange { #[inline] - pub fn from(r: Range<Size>) -> Self { + fn from(r: Range<Size>) -> Self { alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked) } +} +impl From<Range<usize>> for AllocRange { + #[inline] + fn from(r: Range<usize>) -> Self { + AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end)) + } +} + +impl AllocRange { #[inline(always)] pub fn end(self) -> Size { self.start + self.size // This does overflow checking. @@ -205,7 +220,7 @@ impl AllocRange { } // The constructors are all without extra; the extra gets added by a machine hook later. -impl<Prov> Allocation<Prov> { +impl<Prov: Provenance> Allocation<Prov> { /// Creates an allocation initialized by the given bytes pub fn from_bytes<'a>( slice: impl Into<Cow<'a, [u8]>>, @@ -263,7 +278,7 @@ impl<Prov> Allocation<Prov> { impl Allocation { /// Adjust allocation from the ones in tcx to a custom Machine instance /// with a different Provenance and Extra type. - pub fn adjust_from_tcx<Prov, Extra, Err>( + pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>( self, cx: &impl HasDataLayout, extra: Extra, @@ -271,10 +286,10 @@ impl Allocation { ) -> Result<Allocation<Prov, Extra>, Err> { // Compute new pointer provenance, which also adjusts the bytes. let mut bytes = self.bytes; - let mut new_provenance = Vec::with_capacity(self.provenance.0.len()); + let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len()); let ptr_size = cx.data_layout().pointer_size.bytes_usize(); let endian = cx.data_layout().endian; - for &(offset, alloc_id) in self.provenance.iter() { + for &(offset, alloc_id) in self.provenance.ptrs().iter() { let idx = offset.bytes_usize(); let ptr_bytes = &mut bytes[idx..idx + ptr_size]; let bits = read_target_uint(endian, ptr_bytes).unwrap(); @@ -286,7 +301,7 @@ impl Allocation { // Create allocation. Ok(Allocation { bytes, - provenance: ProvenanceMap::from_presorted(new_provenance), + provenance: ProvenanceMap::from_presorted_ptrs(new_provenance), init_mask: self.init_mask, align: self.align, mutability: self.mutability, @@ -296,7 +311,7 @@ impl Allocation { } /// Raw accessors. Provide access to otherwise private bytes. -impl<Prov, Extra> Allocation<Prov, Extra> { +impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { pub fn len(&self) -> usize { self.bytes.len() } @@ -349,9 +364,14 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { cx: &impl HasDataLayout, range: AllocRange, ) -> AllocResult<&[u8]> { - self.check_init(range)?; + self.init_mask.is_range_initialized(range).map_err(|uninit_range| { + AllocError::InvalidUninitBytes(Some(UninitBytesAccess { + access: range, + uninit: uninit_range, + })) + })?; if !Prov::OFFSET_IS_ADDR { - if self.range_has_provenance(cx, range) { + if !self.provenance.range_empty(range, cx) { return Err(AllocError::ReadPointerAsBytes); } } @@ -370,7 +390,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { range: AllocRange, ) -> AllocResult<&mut [u8]> { self.mark_init(range, true); - self.clear_provenance(cx, range)?; + self.provenance.clear(range, cx)?; Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]) } @@ -382,7 +402,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { range: AllocRange, ) -> AllocResult<*mut [u8]> { self.mark_init(range, true); - self.clear_provenance(cx, range)?; + self.provenance.clear(range, cx)?; assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize()); @@ -393,6 +413,15 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// Reading and writing. impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { + /// Sets the init bit for the given range. + fn mark_init(&mut self, range: AllocRange, is_init: bool) { + if range.size.bytes() == 0 { + return; + } + assert!(self.mutability == Mutability::Mut); + self.init_mask.set_range(range, is_init); + } + /// Reads a *non-ZST* scalar. /// /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine @@ -410,7 +439,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { read_provenance: bool, ) -> AllocResult<Scalar<Prov>> { // First and foremost, if anything is uninit, bail. - if self.is_init(range).is_err() { + if self.init_mask.is_range_initialized(range).is_err() { return Err(AllocError::InvalidUninitBytes(None)); } @@ -423,7 +452,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { // When reading data with provenance, the easy case is finding provenance exactly where we // are reading, then we can put data and provenance back together and return that. - if let Some(&prov) = self.provenance.get(&range.start) { + if let Some(prov) = self.provenance.get_ptr(range.start) { // Now we can return the bits, with their appropriate provenance. let ptr = Pointer::new(prov, Size::from_bytes(bits)); return Ok(Scalar::from_pointer(ptr, cx)); @@ -431,10 +460,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { // If we can work on pointers byte-wise, join the byte-wise provenances. if Prov::OFFSET_IS_ADDR { - let mut prov = self.offset_get_provenance(cx, range.start); - for offset in 1..range.size.bytes() { - let this_prov = - self.offset_get_provenance(cx, range.start + Size::from_bytes(offset)); + let mut prov = self.provenance.get(range.start, cx); + for offset in Size::from_bytes(1)..range.size { + let this_prov = self.provenance.get(range.start + offset, cx); prov = Prov::join(prov, this_prov); } // Now use this provenance. @@ -452,7 +480,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { // Fallback path for when we cannot treat provenance bytewise or ignore it. assert!(!Prov::OFFSET_IS_ADDR); - if self.range_has_provenance(cx, range) { + if !self.provenance.range_empty(range, cx) { return Err(AllocError::ReadPointerAsBytes); } // There is no provenance, we can just return the bits. @@ -466,7 +494,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// /// It is the caller's responsibility to check bounds and alignment beforehand. /// Most likely, you want to call `InterpCx::write_scalar` instead of this method. - #[instrument(skip(self, cx), level = "debug")] pub fn write_scalar( &mut self, cx: &impl HasDataLayout, @@ -478,11 +505,11 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { // `to_bits_or_ptr_internal` is the right method because we just want to store this data // as-is into memory. let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? { - Err(val) => { - let (provenance, offset) = val.into_parts(); + Right(ptr) => { + let (provenance, offset) = ptr.into_parts(); (u128::from(offset.bytes()), Some(provenance)) } - Ok(data) => (data, None), + Left(data) => (data, None), }; let endian = cx.data_layout().endian; @@ -491,7 +518,8 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { // See if we have to also store some provenance. if let Some(provenance) = provenance { - self.provenance.0.insert(range.start, provenance); + assert_eq!(range.size, cx.data_layout().pointer_size); + self.provenance.insert_ptr(range.start, provenance, cx); } Ok(()) @@ -500,750 +528,25 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// Write "uninit" to the given memory range. pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { self.mark_init(range, false); - self.clear_provenance(cx, range)?; + self.provenance.clear(range, cx)?; return Ok(()); } -} - -/// Provenance. -impl<Prov: Copy, Extra> Allocation<Prov, Extra> { - /// Returns all provenance overlapping with the given pointer-offset pair. - fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] { - // We have to go back `pointer_size - 1` bytes, as that one would still overlap with - // the beginning of this range. - let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); - self.provenance.range(Size::from_bytes(start)..range.end()) - } - - /// Get the provenance of a single byte. - fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> { - let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1))); - assert!(prov.len() <= 1); - prov.first().map(|(_offset, prov)| *prov) - } - - /// Returns whether this allocation has progrnance overlapping with the given range. - /// - /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat - /// limit access to provenance outside of the `Allocation` abstraction. - /// - pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool { - !self.range_get_provenance(cx, range).is_empty() - } - - /// Removes all provenance inside the given range. - /// If there is provenance overlapping with the edges, it - /// are removed as well *and* the bytes they cover are marked as - /// uninitialized. This is a somewhat odd "spooky action at a distance", - /// but it allows strictly more code to run than if we would just error - /// immediately in that case. - fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult - where - Prov: Provenance, - { - // Find the start and end of the given range and its outermost provenance. - let (first, last) = { - // Find all provenance overlapping the given range. - let provenance = self.range_get_provenance(cx, range); - if provenance.is_empty() { - return Ok(()); - } - - ( - provenance.first().unwrap().0, - provenance.last().unwrap().0 + cx.data_layout().pointer_size, - ) - }; - let start = range.start; - let end = range.end(); - - // We need to handle clearing the provenance from parts of a pointer. - // FIXME: Miri should preserve partial provenance; see - // https://github.com/rust-lang/miri/issues/2181. - if first < start { - if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE { - return Err(AllocError::PartialPointerOverwrite(first)); - } - warn!( - "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}." - ); - self.init_mask.set_range(first, start, false); - } - if last > end { - if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE { - return Err(AllocError::PartialPointerOverwrite( - last - cx.data_layout().pointer_size, - )); - } - warn!( - "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}." - ); - self.init_mask.set_range(end, last, false); - } - - // Forget all the provenance. - // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine, - // i.e., this will not remove any other provenance just after the ones we care about. - self.provenance.0.remove_range(first..last); - - Ok(()) - } -} - -/// Stores the provenance information of pointers stored in memory. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>); - -impl<Prov> ProvenanceMap<Prov> { - pub fn new() -> Self { - ProvenanceMap(SortedMap::new()) - } - - // The caller must guarantee that the given provenance list is already sorted - // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self { - ProvenanceMap(SortedMap::from_presorted_elements(r)) - } -} - -impl<Prov> Deref for ProvenanceMap<Prov> { - type Target = SortedMap<Size, Prov>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// A partial, owned list of provenance to transfer into another allocation. -/// -/// Offsets are already adjusted to the destination allocation. -pub struct AllocationProvenance<Prov> { - dest_provenance: Vec<(Size, Prov)>, -} - -impl<Prov: Copy, Extra> Allocation<Prov, Extra> { - pub fn prepare_provenance_copy( - &self, - cx: &impl HasDataLayout, - src: AllocRange, - dest: Size, - count: u64, - ) -> AllocationProvenance<Prov> { - let provenance = self.range_get_provenance(cx, src); - if provenance.is_empty() { - return AllocationProvenance { dest_provenance: Vec::new() }; - } - - let size = src.size; - let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize)); - - // If `count` is large, this is rather wasteful -- we are allocating a big array here, which - // is mostly filled with redundant information since it's just N copies of the same `Prov`s - // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range` - // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces - // the right sequence of provenance for all N copies. - for i in 0..count { - new_provenance.extend(provenance.iter().map(|&(offset, reloc)| { - // compute offset for current repetition - let dest_offset = dest + size * i; // `Size` operations - ( - // shift offsets from source allocation to destination allocation - (offset + dest_offset) - src.start, // `Size` operations - reloc, - ) - })); - } - - AllocationProvenance { dest_provenance: new_provenance } - } - /// Applies a provenance copy. - /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected + /// Applies a previously prepared provenance copy. + /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected /// to be clear of provenance. /// /// This is dangerous to use as it can violate internal `Allocation` invariants! /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. - pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) { - self.provenance.0.insert_presorted(provenance.dest_provenance); - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Uninitialized byte tracking -//////////////////////////////////////////////////////////////////////////////// - -type Block = u64; - -/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte -/// is initialized. If it is `false` the byte is uninitialized. -// Note: for performance reasons when interning, some of the `InitMask` fields can be partially -// hashed. (see the `Hash` impl below for more details), so the impl is not derived. -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] -#[derive(HashStable)] -pub struct InitMask { - blocks: Vec<Block>, - len: Size, -} - -// Const allocations are only hashed for interning. However, they can be large, making the hashing -// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially -// big buffers like the allocation's init mask. We can partially hash some fields when they're -// large. -impl hash::Hash for InitMask { - fn hash<H: hash::Hasher>(&self, state: &mut H) { - const MAX_BLOCKS_TO_HASH: usize = MAX_BYTES_TO_HASH / std::mem::size_of::<Block>(); - const MAX_BLOCKS_LEN: usize = MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>(); - - // Partially hash the `blocks` buffer when it is large. To limit collisions with common - // prefixes and suffixes, we hash the length and some slices of the buffer. - let block_count = self.blocks.len(); - if block_count > MAX_BLOCKS_LEN { - // Hash the buffer's length. - block_count.hash(state); - - // And its head and tail. - self.blocks[..MAX_BLOCKS_TO_HASH].hash(state); - self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state); - } else { - self.blocks.hash(state); - } - - // Hash the other fields as usual. - self.len.hash(state); - } -} - -impl InitMask { - pub const BLOCK_SIZE: u64 = 64; - - #[inline] - fn bit_index(bits: Size) -> (usize, usize) { - // BLOCK_SIZE is the number of bits that can fit in a `Block`. - // Each bit in a `Block` represents the initialization state of one byte of an allocation, - // so we use `.bytes()` here. - let bits = bits.bytes(); - let a = bits / InitMask::BLOCK_SIZE; - let b = bits % InitMask::BLOCK_SIZE; - (usize::try_from(a).unwrap(), usize::try_from(b).unwrap()) - } - - #[inline] - fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size { - let block = block.try_into().ok().unwrap(); - let bit = bit.try_into().ok().unwrap(); - Size::from_bytes(block * InitMask::BLOCK_SIZE + bit) - } - - pub fn new(size: Size, state: bool) -> Self { - let mut m = InitMask { blocks: vec![], len: Size::ZERO }; - m.grow(size, state); - m - } - - pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { - let len = self.len; - if end > len { - self.grow(end - len, new_state); - } - self.set_range_inbounds(start, end, new_state); - } - - pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { - let (blocka, bita) = Self::bit_index(start); - let (blockb, bitb) = Self::bit_index(end); - if blocka == blockb { - // First set all bits except the first `bita`, - // then unset the last `64 - bitb` bits. - let range = if bitb == 0 { - u64::MAX << bita - } else { - (u64::MAX << bita) & (u64::MAX >> (64 - bitb)) - }; - if new_state { - self.blocks[blocka] |= range; - } else { - self.blocks[blocka] &= !range; - } - return; - } - // across block boundaries - if new_state { - // Set `bita..64` to `1`. - self.blocks[blocka] |= u64::MAX << bita; - // Set `0..bitb` to `1`. - if bitb != 0 { - self.blocks[blockb] |= u64::MAX >> (64 - bitb); - } - // Fill in all the other blocks (much faster than one bit at a time). - for block in (blocka + 1)..blockb { - self.blocks[block] = u64::MAX; - } - } else { - // Set `bita..64` to `0`. - self.blocks[blocka] &= !(u64::MAX << bita); - // Set `0..bitb` to `0`. - if bitb != 0 { - self.blocks[blockb] &= !(u64::MAX >> (64 - bitb)); - } - // Fill in all the other blocks (much faster than one bit at a time). - for block in (blocka + 1)..blockb { - self.blocks[block] = 0; - } - } - } - - #[inline] - pub fn get(&self, i: Size) -> bool { - let (block, bit) = Self::bit_index(i); - (self.blocks[block] & (1 << bit)) != 0 + pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) { + self.provenance.apply_copy(copy) } - #[inline] - pub fn set(&mut self, i: Size, new_state: bool) { - let (block, bit) = Self::bit_index(i); - self.set_bit(block, bit, new_state); - } - - #[inline] - fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) { - if new_state { - self.blocks[block] |= 1 << bit; - } else { - self.blocks[block] &= !(1 << bit); - } - } - - pub fn grow(&mut self, amount: Size, new_state: bool) { - if amount.bytes() == 0 { - return; - } - let unused_trailing_bits = - u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes(); - if amount.bytes() > unused_trailing_bits { - let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1; - self.blocks.extend( - // FIXME(oli-obk): optimize this by repeating `new_state as Block`. - iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()), - ); - } - let start = self.len; - self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); // `Size` operation - } - - /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init. - fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> { - /// A fast implementation of `find_bit`, - /// which skips over an entire block at a time if it's all 0s (resp. 1s), - /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop. - /// - /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity, - /// and with the least significant bit (and lowest block) first: - /// ```text - /// 00000000|00000000 - /// ^ ^ ^ ^ - /// index: 0 7 8 15 - /// ``` - /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit. - fn find_bit_fast( - init_mask: &InitMask, - start: Size, - end: Size, - is_init: bool, - ) -> Option<Size> { - /// Search one block, returning the index of the first bit equal to `is_init`. - fn search_block( - bits: Block, - block: usize, - start_bit: usize, - is_init: bool, - ) -> Option<Size> { - // For the following examples, assume this function was called with: - // bits = 0b00111011 - // start_bit = 3 - // is_init = false - // Note that, for the examples in this function, the most significant bit is written first, - // which is backwards compared to the comments in `find_bit`/`find_bit_fast`. - - // Invert bits so we're always looking for the first set bit. - // ! 0b00111011 - // bits = 0b11000100 - let bits = if is_init { bits } else { !bits }; - // Mask off unused start bits. - // 0b11000100 - // & 0b11111000 - // bits = 0b11000000 - let bits = bits & (!0 << start_bit); - // Find set bit, if any. - // bit = trailing_zeros(0b11000000) - // bit = 6 - if bits == 0 { - None - } else { - let bit = bits.trailing_zeros(); - Some(InitMask::size_from_bit_index(block, bit)) - } - } - - if start >= end { - return None; - } - - // Convert `start` and `end` to block indexes and bit indexes within each block. - // We must convert `end` to an inclusive bound to handle block boundaries correctly. - // - // For example: - // - // (a) 00000000|00000000 (b) 00000000| - // ^~~~~~~~~~~^ ^~~~~~~~~^ - // start end start end - // - // In both cases, the block index of `end` is 1. - // But we do want to search block 1 in (a), and we don't in (b). - // - // We subtract 1 from both end positions to make them inclusive: - // - // (a) 00000000|00000000 (b) 00000000| - // ^~~~~~~~~~^ ^~~~~~~^ - // start end_inclusive start end_inclusive - // - // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0. - // This provides the desired behavior of searching blocks 0 and 1 for (a), - // and searching only block 0 for (b). - // There is no concern of overflows since we checked for `start >= end` above. - let (start_block, start_bit) = InitMask::bit_index(start); - let end_inclusive = Size::from_bytes(end.bytes() - 1); - let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive); - - // Handle first block: need to skip `start_bit` bits. - // - // We need to handle the first block separately, - // because there may be bits earlier in the block that should be ignored, - // such as the bit marked (1) in this example: - // - // (1) - // -|------ - // (c) 01000000|00000000|00000001 - // ^~~~~~~~~~~~~~~~~~^ - // start end - if let Some(i) = - search_block(init_mask.blocks[start_block], start_block, start_bit, is_init) - { - // If the range is less than a block, we may find a matching bit after `end`. - // - // For example, we shouldn't successfully find bit (2), because it's after `end`: - // - // (2) - // -------| - // (d) 00000001|00000000|00000001 - // ^~~~~^ - // start end - // - // An alternative would be to mask off end bits in the same way as we do for start bits, - // but performing this check afterwards is faster and simpler to implement. - if i < end { - return Some(i); - } else { - return None; - } - } - - // Handle remaining blocks. - // - // We can skip over an entire block at once if it's all 0s (resp. 1s). - // The block marked (3) in this example is the first block that will be handled by this loop, - // and it will be skipped for that reason: - // - // (3) - // -------- - // (e) 01000000|00000000|00000001 - // ^~~~~~~~~~~~~~~~~~^ - // start end - if start_block < end_block_inclusive { - // This loop is written in a specific way for performance. - // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`, - // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`, - // because both alternatives result in significantly worse codegen. - // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`, - // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte). - for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1] - .iter() - .zip(start_block + 1..) - { - if let Some(i) = search_block(bits, block, 0, is_init) { - // If this is the last block, we may find a matching bit after `end`. - // - // For example, we shouldn't successfully find bit (4), because it's after `end`: - // - // (4) - // -------| - // (f) 00000001|00000000|00000001 - // ^~~~~~~~~~~~~~~~~~^ - // start end - // - // As above with example (d), we could handle the end block separately and mask off end bits, - // but unconditionally searching an entire block at once and performing this check afterwards - // is faster and much simpler to implement. - if i < end { - return Some(i); - } else { - return None; - } - } - } - } - - None - } - - #[cfg_attr(not(debug_assertions), allow(dead_code))] - fn find_bit_slow( - init_mask: &InitMask, - start: Size, - end: Size, - is_init: bool, - ) -> Option<Size> { - (start..end).find(|&i| init_mask.get(i) == is_init) - } - - let result = find_bit_fast(self, start, end, is_init); - - debug_assert_eq!( - result, - find_bit_slow(self, start, end, is_init), - "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}", - start, - end, - is_init, - self - ); - - result - } -} - -/// A contiguous chunk of initialized or uninitialized memory. -pub enum InitChunk { - Init(Range<Size>), - Uninit(Range<Size>), -} - -impl InitChunk { - #[inline] - pub fn is_init(&self) -> bool { - match self { - Self::Init(_) => true, - Self::Uninit(_) => false, - } - } - - #[inline] - pub fn range(&self) -> Range<Size> { - match self { - Self::Init(r) => r.clone(), - Self::Uninit(r) => r.clone(), - } - } -} - -impl InitMask { - /// Checks whether the range `start..end` (end-exclusive) is entirely initialized. - /// - /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte - /// indexes for the first contiguous span of the uninitialized access. - #[inline] - pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> { - if end > self.len { - return Err(AllocRange::from(self.len..end)); - } - - let uninit_start = self.find_bit(start, end, false); - - match uninit_start { - Some(uninit_start) => { - let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end); - Err(AllocRange::from(uninit_start..uninit_end)) - } - None => Ok(()), - } - } - - /// Returns an iterator, yielding a range of byte indexes for each contiguous region - /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive). - /// - /// The iterator guarantees the following: - /// - Chunks are nonempty. - /// - Chunks are adjacent (each range's start is equal to the previous range's end). - /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`). - /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`]. - #[inline] - pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> { - assert!(end <= self.len); - - let is_init = if start < end { - self.get(start) - } else { - // `start..end` is empty: there are no chunks, so use some arbitrary value - false - }; - - InitChunkIter { init_mask: self, is_init, start, end } - } -} - -/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`]. -#[derive(Clone)] -pub struct InitChunkIter<'a> { - init_mask: &'a InitMask, - /// Whether the next chunk we will return is initialized. - /// If there are no more chunks, contains some arbitrary value. - is_init: bool, - /// The current byte index into `init_mask`. - start: Size, - /// The end byte index into `init_mask`. - end: Size, -} - -impl<'a> Iterator for InitChunkIter<'a> { - type Item = InitChunk; - - #[inline] - fn next(&mut self) -> Option<Self::Item> { - if self.start >= self.end { - return None; - } - - let end_of_chunk = - self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end); - let range = self.start..end_of_chunk; - - let ret = - Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) }); - - self.is_init = !self.is_init; - self.start = end_of_chunk; - - ret - } -} - -/// Uninitialized bytes. -impl<Prov: Copy, Extra> Allocation<Prov, Extra> { - /// Checks whether the given range is entirely initialized. - /// - /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte - /// indexes of the first contiguous uninitialized access. - fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> { - self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition - } - - /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes` - /// error which will report the first range of bytes which is uninitialized. - fn check_init(&self, range: AllocRange) -> AllocResult { - self.is_init(range).map_err(|uninit_range| { - AllocError::InvalidUninitBytes(Some(UninitBytesAccess { - access: range, - uninit: uninit_range, - })) - }) - } - - fn mark_init(&mut self, range: AllocRange, is_init: bool) { - if range.size.bytes() == 0 { - return; - } - assert!(self.mutability == Mutability::Mut); - self.init_mask.set_range(range.start, range.end(), is_init); - } -} - -/// Run-length encoding of the uninit mask. -/// Used to copy parts of a mask multiple times to another allocation. -pub struct InitMaskCompressed { - /// Whether the first range is initialized. - initial: bool, - /// The lengths of ranges that are run-length encoded. - /// The initialization state of the ranges alternate starting with `initial`. - ranges: smallvec::SmallVec<[u64; 1]>, -} - -impl InitMaskCompressed { - pub fn no_bytes_init(&self) -> bool { - // The `ranges` are run-length encoded and of alternating initialization state. - // So if `ranges.len() > 1` then the second block is an initialized range. - !self.initial && self.ranges.len() == 1 - } -} - -/// Transferring the initialization mask to other allocations. -impl<Prov, Extra> Allocation<Prov, Extra> { - /// Creates a run-length encoding of the initialization mask; panics if range is empty. - /// - /// This is essentially a more space-efficient version of - /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`. - pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed { - // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`), - // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from - // the source and write it to the destination. Even if we optimized the memory accesses, - // we'd be doing all of this `repeat` times. - // Therefore we precompute a compressed version of the initialization mask of the source value and - // then write it back `repeat` times without computing any more information from the source. - - // A precomputed cache for ranges of initialized / uninitialized bits - // 0000010010001110 will become - // `[5, 1, 2, 1, 3, 3, 1]`, - // where each element toggles the state. - - let mut ranges = smallvec::SmallVec::<[u64; 1]>::new(); - - let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable(); - - let initial = chunks.peek().expect("range should be nonempty").is_init(); - - // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks. - for chunk in chunks { - let len = chunk.range().end.bytes() - chunk.range().start.bytes(); - ranges.push(len); - } - - InitMaskCompressed { ranges, initial } - } - - /// Applies multiple instances of the run-length encoding to the initialization mask. + /// Applies a previously prepared copy of the init mask. /// /// This is dangerous to use as it can violate internal `Allocation` invariants! /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. - pub fn mark_compressed_init_range( - &mut self, - defined: &InitMaskCompressed, - range: AllocRange, - repeat: u64, - ) { - // An optimization where we can just overwrite an entire range of initialization - // bits if they are going to be uniformly `1` or `0`. - if defined.ranges.len() <= 1 { - self.init_mask.set_range_inbounds( - range.start, - range.start + range.size * repeat, // `Size` operations - defined.initial, - ); - return; - } - - for mut j in 0..repeat { - j *= range.size.bytes(); - j += range.start.bytes(); - let mut cur = defined.initial; - for range in &defined.ranges { - let old_j = j; - j += range; - self.init_mask.set_range_inbounds( - Size::from_bytes(old_j), - Size::from_bytes(j), - cur, - ); - cur = !cur; - } - } + pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) { + self.init_mask.apply_copy(copy, range, repeat) } } diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs new file mode 100644 index 000000000..82e9a961a --- /dev/null +++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs @@ -0,0 +1,530 @@ +use std::hash; +use std::iter; +use std::ops::Range; + +use rustc_target::abi::Size; + +use super::AllocRange; + +type Block = u64; + +/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte +/// is initialized. If it is `false` the byte is uninitialized. +// Note: for performance reasons when interning, some of the `InitMask` fields can be partially +// hashed. (see the `Hash` impl below for more details), so the impl is not derived. +#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable)] +#[derive(HashStable)] +pub struct InitMask { + blocks: Vec<Block>, + len: Size, +} + +// Const allocations are only hashed for interning. However, they can be large, making the hashing +// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially +// big buffers like the allocation's init mask. We can partially hash some fields when they're +// large. +impl hash::Hash for InitMask { + fn hash<H: hash::Hasher>(&self, state: &mut H) { + const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>(); + const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>(); + + // Partially hash the `blocks` buffer when it is large. To limit collisions with common + // prefixes and suffixes, we hash the length and some slices of the buffer. + let block_count = self.blocks.len(); + if block_count > MAX_BLOCKS_LEN { + // Hash the buffer's length. + block_count.hash(state); + + // And its head and tail. + self.blocks[..MAX_BLOCKS_TO_HASH].hash(state); + self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state); + } else { + self.blocks.hash(state); + } + + // Hash the other fields as usual. + self.len.hash(state); + } +} + +impl InitMask { + pub const BLOCK_SIZE: u64 = 64; + + pub fn new(size: Size, state: bool) -> Self { + let mut m = InitMask { blocks: vec![], len: Size::ZERO }; + m.grow(size, state); + m + } + + #[inline] + fn bit_index(bits: Size) -> (usize, usize) { + // BLOCK_SIZE is the number of bits that can fit in a `Block`. + // Each bit in a `Block` represents the initialization state of one byte of an allocation, + // so we use `.bytes()` here. + let bits = bits.bytes(); + let a = bits / InitMask::BLOCK_SIZE; + let b = bits % InitMask::BLOCK_SIZE; + (usize::try_from(a).unwrap(), usize::try_from(b).unwrap()) + } + + #[inline] + fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size { + let block = block.try_into().ok().unwrap(); + let bit = bit.try_into().ok().unwrap(); + Size::from_bytes(block * InitMask::BLOCK_SIZE + bit) + } + + /// Checks whether the `range` is entirely initialized. + /// + /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte + /// indexes for the first contiguous span of the uninitialized access. + #[inline] + pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> { + let end = range.end(); + if end > self.len { + return Err(AllocRange::from(self.len..end)); + } + + let uninit_start = self.find_bit(range.start, end, false); + + match uninit_start { + Some(uninit_start) => { + let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end); + Err(AllocRange::from(uninit_start..uninit_end)) + } + None => Ok(()), + } + } + + pub fn set_range(&mut self, range: AllocRange, new_state: bool) { + let end = range.end(); + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(range.start, end, new_state); + } + + fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { + let (blocka, bita) = Self::bit_index(start); + let (blockb, bitb) = Self::bit_index(end); + if blocka == blockb { + // First set all bits except the first `bita`, + // then unset the last `64 - bitb` bits. + let range = if bitb == 0 { + u64::MAX << bita + } else { + (u64::MAX << bita) & (u64::MAX >> (64 - bitb)) + }; + if new_state { + self.blocks[blocka] |= range; + } else { + self.blocks[blocka] &= !range; + } + return; + } + // across block boundaries + if new_state { + // Set `bita..64` to `1`. + self.blocks[blocka] |= u64::MAX << bita; + // Set `0..bitb` to `1`. + if bitb != 0 { + self.blocks[blockb] |= u64::MAX >> (64 - bitb); + } + // Fill in all the other blocks (much faster than one bit at a time). + for block in (blocka + 1)..blockb { + self.blocks[block] = u64::MAX; + } + } else { + // Set `bita..64` to `0`. + self.blocks[blocka] &= !(u64::MAX << bita); + // Set `0..bitb` to `0`. + if bitb != 0 { + self.blocks[blockb] &= !(u64::MAX >> (64 - bitb)); + } + // Fill in all the other blocks (much faster than one bit at a time). + for block in (blocka + 1)..blockb { + self.blocks[block] = 0; + } + } + } + + #[inline] + pub fn get(&self, i: Size) -> bool { + let (block, bit) = Self::bit_index(i); + (self.blocks[block] & (1 << bit)) != 0 + } + + fn grow(&mut self, amount: Size, new_state: bool) { + if amount.bytes() == 0 { + return; + } + let unused_trailing_bits = + u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes(); + if amount.bytes() > unused_trailing_bits { + let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1; + self.blocks.extend( + // FIXME(oli-obk): optimize this by repeating `new_state as Block`. + iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); // `Size` operation + } + + /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init. + fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> { + /// A fast implementation of `find_bit`, + /// which skips over an entire block at a time if it's all 0s (resp. 1s), + /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop. + /// + /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity, + /// and with the least significant bit (and lowest block) first: + /// ```text + /// 00000000|00000000 + /// ^ ^ ^ ^ + /// index: 0 7 8 15 + /// ``` + /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit. + fn find_bit_fast( + init_mask: &InitMask, + start: Size, + end: Size, + is_init: bool, + ) -> Option<Size> { + /// Search one block, returning the index of the first bit equal to `is_init`. + fn search_block( + bits: Block, + block: usize, + start_bit: usize, + is_init: bool, + ) -> Option<Size> { + // For the following examples, assume this function was called with: + // bits = 0b00111011 + // start_bit = 3 + // is_init = false + // Note that, for the examples in this function, the most significant bit is written first, + // which is backwards compared to the comments in `find_bit`/`find_bit_fast`. + + // Invert bits so we're always looking for the first set bit. + // ! 0b00111011 + // bits = 0b11000100 + let bits = if is_init { bits } else { !bits }; + // Mask off unused start bits. + // 0b11000100 + // & 0b11111000 + // bits = 0b11000000 + let bits = bits & (!0 << start_bit); + // Find set bit, if any. + // bit = trailing_zeros(0b11000000) + // bit = 6 + if bits == 0 { + None + } else { + let bit = bits.trailing_zeros(); + Some(InitMask::size_from_bit_index(block, bit)) + } + } + + if start >= end { + return None; + } + + // Convert `start` and `end` to block indexes and bit indexes within each block. + // We must convert `end` to an inclusive bound to handle block boundaries correctly. + // + // For example: + // + // (a) 00000000|00000000 (b) 00000000| + // ^~~~~~~~~~~^ ^~~~~~~~~^ + // start end start end + // + // In both cases, the block index of `end` is 1. + // But we do want to search block 1 in (a), and we don't in (b). + // + // We subtract 1 from both end positions to make them inclusive: + // + // (a) 00000000|00000000 (b) 00000000| + // ^~~~~~~~~~^ ^~~~~~~^ + // start end_inclusive start end_inclusive + // + // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0. + // This provides the desired behavior of searching blocks 0 and 1 for (a), + // and searching only block 0 for (b). + // There is no concern of overflows since we checked for `start >= end` above. + let (start_block, start_bit) = InitMask::bit_index(start); + let end_inclusive = Size::from_bytes(end.bytes() - 1); + let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive); + + // Handle first block: need to skip `start_bit` bits. + // + // We need to handle the first block separately, + // because there may be bits earlier in the block that should be ignored, + // such as the bit marked (1) in this example: + // + // (1) + // -|------ + // (c) 01000000|00000000|00000001 + // ^~~~~~~~~~~~~~~~~~^ + // start end + if let Some(i) = + search_block(init_mask.blocks[start_block], start_block, start_bit, is_init) + { + // If the range is less than a block, we may find a matching bit after `end`. + // + // For example, we shouldn't successfully find bit (2), because it's after `end`: + // + // (2) + // -------| + // (d) 00000001|00000000|00000001 + // ^~~~~^ + // start end + // + // An alternative would be to mask off end bits in the same way as we do for start bits, + // but performing this check afterwards is faster and simpler to implement. + if i < end { + return Some(i); + } else { + return None; + } + } + + // Handle remaining blocks. + // + // We can skip over an entire block at once if it's all 0s (resp. 1s). + // The block marked (3) in this example is the first block that will be handled by this loop, + // and it will be skipped for that reason: + // + // (3) + // -------- + // (e) 01000000|00000000|00000001 + // ^~~~~~~~~~~~~~~~~~^ + // start end + if start_block < end_block_inclusive { + // This loop is written in a specific way for performance. + // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`, + // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`, + // because both alternatives result in significantly worse codegen. + // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`, + // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte). + for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1] + .iter() + .zip(start_block + 1..) + { + if let Some(i) = search_block(bits, block, 0, is_init) { + // If this is the last block, we may find a matching bit after `end`. + // + // For example, we shouldn't successfully find bit (4), because it's after `end`: + // + // (4) + // -------| + // (f) 00000001|00000000|00000001 + // ^~~~~~~~~~~~~~~~~~^ + // start end + // + // As above with example (d), we could handle the end block separately and mask off end bits, + // but unconditionally searching an entire block at once and performing this check afterwards + // is faster and much simpler to implement. + if i < end { + return Some(i); + } else { + return None; + } + } + } + } + + None + } + + #[cfg_attr(not(debug_assertions), allow(dead_code))] + fn find_bit_slow( + init_mask: &InitMask, + start: Size, + end: Size, + is_init: bool, + ) -> Option<Size> { + (start..end).find(|&i| init_mask.get(i) == is_init) + } + + let result = find_bit_fast(self, start, end, is_init); + + debug_assert_eq!( + result, + find_bit_slow(self, start, end, is_init), + "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}", + start, + end, + is_init, + self + ); + + result + } +} + +/// A contiguous chunk of initialized or uninitialized memory. +pub enum InitChunk { + Init(Range<Size>), + Uninit(Range<Size>), +} + +impl InitChunk { + #[inline] + pub fn is_init(&self) -> bool { + match self { + Self::Init(_) => true, + Self::Uninit(_) => false, + } + } + + #[inline] + pub fn range(&self) -> Range<Size> { + match self { + Self::Init(r) => r.clone(), + Self::Uninit(r) => r.clone(), + } + } +} + +impl InitMask { + /// Returns an iterator, yielding a range of byte indexes for each contiguous region + /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive). + /// + /// The iterator guarantees the following: + /// - Chunks are nonempty. + /// - Chunks are adjacent (each range's start is equal to the previous range's end). + /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`). + /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`]. + #[inline] + pub fn range_as_init_chunks(&self, range: AllocRange) -> InitChunkIter<'_> { + let start = range.start; + let end = range.end(); + assert!(end <= self.len); + + let is_init = if start < end { + self.get(start) + } else { + // `start..end` is empty: there are no chunks, so use some arbitrary value + false + }; + + InitChunkIter { init_mask: self, is_init, start, end } + } +} + +/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`]. +#[derive(Clone)] +pub struct InitChunkIter<'a> { + init_mask: &'a InitMask, + /// Whether the next chunk we will return is initialized. + /// If there are no more chunks, contains some arbitrary value. + is_init: bool, + /// The current byte index into `init_mask`. + start: Size, + /// The end byte index into `init_mask`. + end: Size, +} + +impl<'a> Iterator for InitChunkIter<'a> { + type Item = InitChunk; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + if self.start >= self.end { + return None; + } + + let end_of_chunk = + self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end); + let range = self.start..end_of_chunk; + + let ret = + Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) }); + + self.is_init = !self.is_init; + self.start = end_of_chunk; + + ret + } +} + +/// Run-length encoding of the uninit mask. +/// Used to copy parts of a mask multiple times to another allocation. +pub struct InitCopy { + /// Whether the first range is initialized. + initial: bool, + /// The lengths of ranges that are run-length encoded. + /// The initialization state of the ranges alternate starting with `initial`. + ranges: smallvec::SmallVec<[u64; 1]>, +} + +impl InitCopy { + pub fn no_bytes_init(&self) -> bool { + // The `ranges` are run-length encoded and of alternating initialization state. + // So if `ranges.len() > 1` then the second block is an initialized range. + !self.initial && self.ranges.len() == 1 + } +} + +/// Transferring the initialization mask to other allocations. +impl InitMask { + /// Creates a run-length encoding of the initialization mask; panics if range is empty. + /// + /// This is essentially a more space-efficient version of + /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`. + pub fn prepare_copy(&self, range: AllocRange) -> InitCopy { + // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`), + // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from + // the source and write it to the destination. Even if we optimized the memory accesses, + // we'd be doing all of this `repeat` times. + // Therefore we precompute a compressed version of the initialization mask of the source value and + // then write it back `repeat` times without computing any more information from the source. + + // A precomputed cache for ranges of initialized / uninitialized bits + // 0000010010001110 will become + // `[5, 1, 2, 1, 3, 3, 1]`, + // where each element toggles the state. + + let mut ranges = smallvec::SmallVec::<[u64; 1]>::new(); + + let mut chunks = self.range_as_init_chunks(range).peekable(); + + let initial = chunks.peek().expect("range should be nonempty").is_init(); + + // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks. + for chunk in chunks { + let len = chunk.range().end.bytes() - chunk.range().start.bytes(); + ranges.push(len); + } + + InitCopy { ranges, initial } + } + + /// Applies multiple instances of the run-length encoding to the initialization mask. + pub fn apply_copy(&mut self, defined: InitCopy, range: AllocRange, repeat: u64) { + // An optimization where we can just overwrite an entire range of initialization + // bits if they are going to be uniformly `1` or `0`. + if defined.ranges.len() <= 1 { + self.set_range_inbounds( + range.start, + range.start + range.size * repeat, // `Size` operations + defined.initial, + ); + return; + } + + for mut j in 0..repeat { + j *= range.size.bytes(); + j += range.start.bytes(); + let mut cur = defined.initial; + for range in &defined.ranges { + let old_j = j; + j += range; + self.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur); + cur = !cur; + } + } + } +} diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs new file mode 100644 index 000000000..ddd3f3943 --- /dev/null +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -0,0 +1,321 @@ +//! Store the provenance for each byte in the range, with a more efficient +//! representation for the common case where PTR_SIZE consecutive bytes have the same provenance. + +use std::cmp; + +use rustc_data_structures::sorted_map::SortedMap; +use rustc_target::abi::{HasDataLayout, Size}; + +use super::{alloc_range, AllocError, AllocId, AllocRange, AllocResult, Provenance}; +use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; + +/// Stores the provenance information of pointers stored in memory. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(HashStable)] +pub struct ProvenanceMap<Prov = AllocId> { + /// Provenance in this map applies from the given offset for an entire pointer-size worth of + /// bytes. Two entires in this map are always at least a pointer size apart. + ptrs: SortedMap<Size, Prov>, + /// Provenance in this map only applies to the given single byte. + /// This map is disjoint from the previous. It will always be empty when + /// `Prov::OFFSET_IS_ADDR` is false. + bytes: Option<Box<SortedMap<Size, Prov>>>, +} + +impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> { + fn decode(d: &mut D) -> Self { + assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized + Self { ptrs: Decodable::decode(d), bytes: None } + } +} + +impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> { + fn encode(&self, s: &mut S) { + let Self { ptrs, bytes } = self; + assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized + debug_assert!(bytes.is_none()); + ptrs.encode(s) + } +} + +impl<Prov> ProvenanceMap<Prov> { + pub fn new() -> Self { + ProvenanceMap { ptrs: SortedMap::new(), bytes: None } + } + + /// The caller must guarantee that the given provenance list is already sorted + /// by address and contain no duplicates. + pub fn from_presorted_ptrs(r: Vec<(Size, Prov)>) -> Self { + ProvenanceMap { ptrs: SortedMap::from_presorted_elements(r), bytes: None } + } +} + +impl ProvenanceMap { + /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and + /// indeed that is how codegen treats them). + /// + /// Only exposed with `AllocId` provenance, since it panics if there is bytewise provenance. + #[inline] + pub fn ptrs(&self) -> &SortedMap<Size, AllocId> { + debug_assert!(self.bytes.is_none()); // `AllocId::OFFSET_IS_ADDR` is false so this cannot fail + &self.ptrs + } +} + +impl<Prov: Provenance> ProvenanceMap<Prov> { + /// Returns all ptr-sized provenance in the given range. + /// If the range has length 0, returns provenance that crosses the edge between `start-1` and + /// `start`. + fn range_get_ptrs(&self, range: AllocRange, cx: &impl HasDataLayout) -> &[(Size, Prov)] { + // We have to go back `pointer_size - 1` bytes, as that one would still overlap with + // the beginning of this range. + let adjusted_start = Size::from_bytes( + range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1), + ); + self.ptrs.range(adjusted_start..range.end()) + } + + /// Returns all byte-wise provenance in the given range. + fn range_get_bytes(&self, range: AllocRange) -> &[(Size, Prov)] { + if let Some(bytes) = self.bytes.as_ref() { + bytes.range(range.start..range.end()) + } else { + &[] + } + } + + /// Get the provenance of a single byte. + pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> { + let prov = self.range_get_ptrs(alloc_range(offset, Size::from_bytes(1)), cx); + debug_assert!(prov.len() <= 1); + if let Some(entry) = prov.first() { + // If it overlaps with this byte, it is on this byte. + debug_assert!(self.bytes.as_ref().map_or(true, |b| b.get(&offset).is_none())); + Some(entry.1) + } else { + // Look up per-byte provenance. + self.bytes.as_ref().and_then(|b| b.get(&offset).copied()) + } + } + + /// Check if here is ptr-sized provenance at the given index. + /// Does not mean anything for bytewise provenance! But can be useful as an optimization. + pub fn get_ptr(&self, offset: Size) -> Option<Prov> { + self.ptrs.get(&offset).copied() + } + + /// Returns whether this allocation has provenance overlapping with the given range. + /// + /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat + /// limit access to provenance outside of the `Allocation` abstraction. + /// + pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { + self.range_get_ptrs(range, cx).is_empty() && self.range_get_bytes(range).is_empty() + } + + /// Yields all the provenances stored in this map. + pub fn provenances(&self) -> impl Iterator<Item = Prov> + '_ { + let bytes = self.bytes.iter().flat_map(|b| b.values()); + self.ptrs.values().chain(bytes).copied() + } + + pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) { + debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx)); + self.ptrs.insert(offset, prov); + } + + /// Removes all provenance inside the given range. + /// If there is provenance overlapping with the edges, might result in an error. + pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult { + let start = range.start; + let end = range.end(); + // Clear the bytewise part -- this is easy. + if Prov::OFFSET_IS_ADDR { + if let Some(bytes) = self.bytes.as_mut() { + bytes.remove_range(start..end); + } + } else { + debug_assert!(self.bytes.is_none()); + } + + // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of + // provenance that overlaps with the given range. + let (first, last) = { + // Find all provenance overlapping the given range. + let provenance = self.range_get_ptrs(range, cx); + if provenance.is_empty() { + // No provenance in this range, we are done. + return Ok(()); + } + + ( + provenance.first().unwrap().0, + provenance.last().unwrap().0 + cx.data_layout().pointer_size, + ) + }; + + // We need to handle clearing the provenance from parts of a pointer. + if first < start { + if !Prov::OFFSET_IS_ADDR { + // We can't split up the provenance into less than a pointer. + return Err(AllocError::PartialPointerOverwrite(first)); + } + // Insert the remaining part in the bytewise provenance. + let prov = self.ptrs[&first]; + let bytes = self.bytes.get_or_insert_with(Box::default); + for offset in first..start { + bytes.insert(offset, prov); + } + } + if last > end { + let begin_of_last = last - cx.data_layout().pointer_size; + if !Prov::OFFSET_IS_ADDR { + // We can't split up the provenance into less than a pointer. + return Err(AllocError::PartialPointerOverwrite(begin_of_last)); + } + // Insert the remaining part in the bytewise provenance. + let prov = self.ptrs[&begin_of_last]; + let bytes = self.bytes.get_or_insert_with(Box::default); + for offset in end..last { + bytes.insert(offset, prov); + } + } + + // Forget all the provenance. + // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine, + // i.e., this will not remove any other provenance just after the ones we care about. + self.ptrs.remove_range(first..last); + + Ok(()) + } +} + +/// A partial, owned list of provenance to transfer into another allocation. +/// +/// Offsets are already adjusted to the destination allocation. +pub struct ProvenanceCopy<Prov> { + dest_ptrs: Option<Box<[(Size, Prov)]>>, + dest_bytes: Option<Box<[(Size, Prov)]>>, +} + +impl<Prov: Provenance> ProvenanceMap<Prov> { + pub fn prepare_copy( + &self, + src: AllocRange, + dest: Size, + count: u64, + cx: &impl HasDataLayout, + ) -> AllocResult<ProvenanceCopy<Prov>> { + let shift_offset = move |idx, offset| { + // compute offset for current repetition + let dest_offset = dest + src.size * idx; // `Size` operations + // shift offsets from source allocation to destination allocation + (offset - src.start) + dest_offset // `Size` operations + }; + let ptr_size = cx.data_layout().pointer_size; + + // # Pointer-sized provenances + // Get the provenances that are entirely within this range. + // (Different from `range_get_ptrs` which asks if they overlap the range.) + // Only makes sense if we are copying at least one pointer worth of bytes. + let mut dest_ptrs_box = None; + if src.size >= ptr_size { + let adjusted_end = Size::from_bytes(src.end().bytes() - (ptr_size.bytes() - 1)); + let ptrs = self.ptrs.range(src.start..adjusted_end); + // If `count` is large, this is rather wasteful -- we are allocating a big array here, which + // is mostly filled with redundant information since it's just N copies of the same `Prov`s + // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range` + // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces + // the right sequence of provenance for all N copies. + // Basically, this large array would have to be created anyway in the target allocation. + let mut dest_ptrs = Vec::with_capacity(ptrs.len() * (count as usize)); + for i in 0..count { + dest_ptrs + .extend(ptrs.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc))); + } + debug_assert_eq!(dest_ptrs.len(), dest_ptrs.capacity()); + dest_ptrs_box = Some(dest_ptrs.into_boxed_slice()); + }; + + // # Byte-sized provenances + // This includes the existing bytewise provenance in the range, and ptr provenance + // that overlaps with the begin/end of the range. + let mut dest_bytes_box = None; + let begin_overlap = self.range_get_ptrs(alloc_range(src.start, Size::ZERO), cx).first(); + let end_overlap = self.range_get_ptrs(alloc_range(src.end(), Size::ZERO), cx).first(); + if !Prov::OFFSET_IS_ADDR { + // There can't be any bytewise provenance, and we cannot split up the begin/end overlap. + if let Some(entry) = begin_overlap { + return Err(AllocError::PartialPointerCopy(entry.0)); + } + if let Some(entry) = end_overlap { + return Err(AllocError::PartialPointerCopy(entry.0)); + } + debug_assert!(self.bytes.is_none()); + } else { + let mut bytes = Vec::new(); + // First, if there is a part of a pointer at the start, add that. + if let Some(entry) = begin_overlap { + trace!("start overlapping entry: {entry:?}"); + // For really small copies, make sure we don't run off the end of the `src` range. + let entry_end = cmp::min(entry.0 + ptr_size, src.end()); + for offset in src.start..entry_end { + bytes.push((offset, entry.1)); + } + } else { + trace!("no start overlapping entry"); + } + // Then the main part, bytewise provenance from `self.bytes`. + if let Some(all_bytes) = self.bytes.as_ref() { + bytes.extend(all_bytes.range(src.start..src.end())); + } + // And finally possibly parts of a pointer at the end. + if let Some(entry) = end_overlap { + trace!("end overlapping entry: {entry:?}"); + // For really small copies, make sure we don't start before `src` does. + let entry_start = cmp::max(entry.0, src.start); + for offset in entry_start..src.end() { + if bytes.last().map_or(true, |bytes_entry| bytes_entry.0 < offset) { + // The last entry, if it exists, has a lower offset than us. + bytes.push((offset, entry.1)); + } else { + // There already is an entry for this offset in there! This can happen when the + // start and end range checks actually end up hitting the same pointer, so we + // already added this in the "pointer at the start" part above. + assert!(entry.0 <= src.start); + } + } + } else { + trace!("no end overlapping entry"); + } + trace!("byte provenances: {bytes:?}"); + + // And again a buffer for the new list on the target side. + let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize)); + for i in 0..count { + dest_bytes + .extend(bytes.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc))); + } + debug_assert_eq!(dest_bytes.len(), dest_bytes.capacity()); + dest_bytes_box = Some(dest_bytes.into_boxed_slice()); + } + + Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box }) + } + + /// Applies a provenance copy. + /// The affected range, as defined in the parameters to `prepare_copy` is expected + /// to be clear of provenance. + pub fn apply_copy(&mut self, copy: ProvenanceCopy<Prov>) { + if let Some(dest_ptrs) = copy.dest_ptrs { + self.ptrs.insert_presorted(dest_ptrs.into()); + } + if Prov::OFFSET_IS_ADDR { + if let Some(dest_bytes) = copy.dest_bytes && !dest_bytes.is_empty() { + self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into()); + } + } else { + debug_assert!(copy.dest_bytes.is_none()); + } + } +} diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs new file mode 100644 index 000000000..c9c3c50c5 --- /dev/null +++ b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs @@ -0,0 +1,19 @@ +use super::*; + +#[test] +fn uninit_mask() { + let mut mask = InitMask::new(Size::from_bytes(500), false); + assert!(!mask.get(Size::from_bytes(499))); + mask.set_range(alloc_range(Size::from_bytes(499), Size::from_bytes(1)), true); + assert!(mask.get(Size::from_bytes(499))); + mask.set_range((100..256).into(), true); + for i in 0..100 { + assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set"); + } + for i in 100..256 { + assert!(mask.get(Size::from_bytes(i)), "{i} should be set"); + } + for i in 256..499 { + assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set"); + } +} diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index b5a50cc15..bd9cd53e1 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -16,8 +16,6 @@ pub enum ErrorHandled { /// Already reported an error for this evaluation, and the compilation is /// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`. Reported(ErrorGuaranteed), - /// Already emitted a lint for this evaluation. - Linted, /// Don't emit an error, the evaluation failed because the MIR was generic /// and the substs didn't fully monomorphize it. TooGeneric, @@ -89,18 +87,6 @@ fn print_backtrace(backtrace: &Backtrace) { eprintln!("\n\nAn error occurred in miri:\n{}", backtrace); } -impl From<ErrorHandled> for InterpErrorInfo<'_> { - fn from(err: ErrorHandled) -> Self { - match err { - ErrorHandled::Reported(ErrorGuaranteed { .. }) | ErrorHandled::Linted => { - err_inval!(ReferencedConstant) - } - ErrorHandled::TooGeneric => err_inval!(TooGeneric), - } - .into() - } -} - impl From<ErrorGuaranteed> for InterpErrorInfo<'_> { fn from(err: ErrorGuaranteed) -> Self { InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into() @@ -138,9 +124,6 @@ impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> { pub enum InvalidProgramInfo<'tcx> { /// Resolution can fail if we are in a too generic context. TooGeneric, - /// Cannot compute this constant because it depends on another one - /// which already produced an error. - ReferencedConstant, /// Abort in case errors are already reported. AlreadyReported(ErrorGuaranteed), /// An error occurred during layout computation. @@ -158,9 +141,11 @@ impl fmt::Display for InvalidProgramInfo<'_> { use InvalidProgramInfo::*; match self { TooGeneric => write!(f, "encountered overly generic constant"), - ReferencedConstant => write!(f, "referenced constant has errors"), AlreadyReported(ErrorGuaranteed { .. }) => { - write!(f, "encountered constants with type errors, stopping evaluation") + write!( + f, + "an error has already been reported elsewhere (this should not usually be printed)" + ) } Layout(ref err) => write!(f, "{err}"), FnAbiAdjustForForeignAbi(ref err) => write!(f, "{err}"), @@ -401,16 +386,15 @@ impl fmt::Display for UndefinedBehaviorInfo { pub enum UnsupportedOpInfo { /// Free-form case. Only for errors that are never caught! Unsupported(String), - /// Overwriting parts of a pointer; the resulting state cannot be represented in our - /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>. - PartialPointerOverwrite(Pointer<AllocId>), - /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be - /// represented in our `Allocation` data structure. See - /// <https://github.com/rust-lang/miri/issues/2181>. - PartialPointerCopy(Pointer<AllocId>), // // The variants below are only reachable from CTFE/const prop, miri will never emit them. // + /// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state + /// cannot be represented by the CTFE interpreter. + PartialPointerOverwrite(Pointer<AllocId>), + /// Attempting to `copy` parts of a pointer to somewhere else; without knowing absolute + /// addresses, the resulting state cannot be represented by the CTFE interpreter. + PartialPointerCopy(Pointer<AllocId>), /// Encountered a pointer where we needed raw bytes. ReadPointerAsBytes, /// Accessing thread local statics diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index 5e3dfcbcc..d79cd8b7a 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -106,6 +106,7 @@ use rustc_ast::LitKind; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::{HashMapExt, Lock}; use rustc_data_structures::tiny_list::TinyList; +use rustc_errors::ErrorGuaranteed; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; use rustc_middle::ty::print::with_no_trimmed_paths; @@ -127,8 +128,8 @@ pub use self::error::{ pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar}; pub use self::allocation::{ - alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask, - ProvenanceMap, + alloc_range, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation, InitChunk, + InitChunkIter, }; pub use self::pointer::{Pointer, PointerArithmetic, Provenance}; @@ -176,7 +177,7 @@ pub enum LitToConstError { /// This is used for graceful error handling (`delay_span_bug`) in /// type checking (`Const::from_anon_const`). TypeError, - Reported, + Reported(ErrorGuaranteed), } #[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index 23c2ce647..9c270ba1e 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -103,8 +103,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {} /// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is /// mostly opaque; the `Machine` trait extends it with some more operations that also have access to /// some global state. -/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire -/// pointer), but `derive` adds some unnecessary bounds. +/// The `Debug` rendering is used to distplay bare provenance, and for the default impl of `fmt`. pub trait Provenance: Copy + fmt::Debug { /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address. /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are @@ -115,14 +114,23 @@ pub trait Provenance: Copy + fmt::Debug { /// pointer, and implement ptr-to-int transmutation by stripping provenance. const OFFSET_IS_ADDR: bool; - /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten - /// (this avoids a separate trait in `allocation.rs` just for this purpose). - const ERR_ON_PARTIAL_PTR_OVERWRITE: bool; - /// Determines how a pointer should be printed. + /// + /// Default impl is only good for when `OFFSET_IS_ADDR == true`. fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result where - Self: Sized; + Self: Sized, + { + assert!(Self::OFFSET_IS_ADDR); + let (prov, addr) = ptr.into_parts(); // address is absolute + write!(f, "{:#x}", addr.bytes())?; + if f.alternate() { + write!(f, "{prov:#?}")?; + } else { + write!(f, "{prov:?}")?; + } + Ok(()) + } /// If `OFFSET_IS_ADDR == false`, provenance must always be able to /// identify the allocation this ptr points to (i.e., this must return `Some`). @@ -139,9 +147,6 @@ impl Provenance for AllocId { // so ptr-to-int casts are not possible (since we do not know the global physical offset). const OFFSET_IS_ADDR: bool = false; - // For now, do not allow this, so that we keep our options open. - const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true; - fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Forward `alternate` flag to `alloc_id` printing. if f.alternate() { @@ -168,7 +173,7 @@ impl Provenance for AllocId { /// Represents a pointer in the Miri engine. /// /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] +#[derive(Copy, Clone, Eq, PartialEq, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub struct Pointer<Prov = AllocId> { pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type) diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs index 473894ac1..b6c6e9d55 100644 --- a/compiler/rustc_middle/src/mir/interpret/queries.rs +++ b/compiler/rustc_middle/src/mir/interpret/queries.rs @@ -175,6 +175,8 @@ impl<'tcx> TyCtxt<'tcx> { impl<'tcx> TyCtxtAt<'tcx> { /// Evaluate a static's initializer, returning the allocation of the initializer's memory. + /// + /// The span is entirely ignored here, but still helpful for better query cycle errors. pub fn eval_static_initializer( self, def_id: DefId, @@ -187,6 +189,8 @@ impl<'tcx> TyCtxtAt<'tcx> { } /// Evaluate anything constant-like, returning the allocation of the final memory. + /// + /// The span is entirely ignored here, but still helpful for better query cycle errors. fn eval_to_allocation( self, gid: GlobalId<'tcx>, diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index ac5fddb7a..e6636e50e 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -1,6 +1,8 @@ use std::convert::{TryFrom, TryInto}; use std::fmt; +use either::{Either, Left, Right}; + use rustc_apfloat::{ ieee::{Double, Single}, Float, @@ -18,15 +20,15 @@ use super::{ /// Represents the result of const evaluation via the `eval_to_allocation` query. #[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)] pub struct ConstAlloc<'tcx> { - // the value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory` - // (so you can use `AllocMap::unwrap_memory`). + /// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory` + /// (so you can use `AllocMap::unwrap_memory`). pub alloc_id: AllocId, pub ty: Ty<'tcx>, } /// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for /// array length computations, enum discriminants and the pattern matching logic. -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)] #[derive(HashStable, Lift)] pub enum ConstValue<'tcx> { /// Used only for types with `layout::abi::Scalar` ABI. @@ -108,7 +110,7 @@ impl<'tcx> ConstValue<'tcx> { /// /// These variants would be private if there was a convenient way to achieve that in Rust. /// Do *not* match on a `Scalar`! Use the various `to_*` methods instead. -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] +#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub enum Scalar<Prov = AllocId> { /// The raw bytes of a simple value. @@ -293,10 +295,10 @@ impl<Prov> Scalar<Prov> { pub fn to_bits_or_ptr_internal( self, target_size: Size, - ) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> { + ) -> Result<Either<u128, Pointer<Prov>>, ScalarSizeMismatch> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); Ok(match self { - Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| { + Scalar::Int(int) => Left(int.to_bits(target_size).map_err(|size| { ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() } })?), Scalar::Ptr(ptr, sz) => { @@ -306,7 +308,7 @@ impl<Prov> Scalar<Prov> { data_size: sz.into(), }); } - Err(ptr) + Right(ptr) } }) } @@ -318,8 +320,8 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { .to_bits_or_ptr_internal(cx.pointer_size()) .map_err(|s| err_ub!(ScalarSizeMismatch(s)))? { - Err(ptr) => Ok(ptr.into()), - Ok(bits) => { + Right(ptr) => Ok(ptr.into()), + Left(bits) => { let addr = u64::try_from(bits).unwrap(); Ok(Pointer::from_addr(addr)) } |