summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle/src/mir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:32 +0000
commit4547b622d8d29df964fa2914213088b148c498fc (patch)
tree9fc6b25f3c3add6b745be9a2400a6e96140046e9 /compiler/rustc_middle/src/mir
parentReleasing progress-linux version 1.66.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-4547b622d8d29df964fa2914213088b148c498fc.tar.xz
rustc-4547b622d8d29df964fa2914213088b148c498fc.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_middle/src/mir')
-rw-r--r--compiler/rustc_middle/src/mir/generic_graphviz.rs7
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs849
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs530
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs321
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/tests.rs19
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs36
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs7
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs27
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs4
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs20
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs178
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs73
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs31
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs2
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs20
15 files changed, 1215 insertions, 909 deletions
diff --git a/compiler/rustc_middle/src/mir/generic_graphviz.rs b/compiler/rustc_middle/src/mir/generic_graphviz.rs
index 11ac45943..ccae7e159 100644
--- a/compiler/rustc_middle/src/mir/generic_graphviz.rs
+++ b/compiler/rustc_middle/src/mir/generic_graphviz.rs
@@ -126,7 +126,7 @@ impl<
write!(
w,
r#"<tr><td align="left" balign="left">{}</td></tr>"#,
- dot::escape_html(&section).replace('\n', "<br/>")
+ dot::escape_html(&section)
)?;
}
@@ -147,7 +147,7 @@ impl<
let src = self.node(source);
let trg = self.node(target);
let escaped_edge_label = if let Some(edge_label) = edge_labels.get(index) {
- dot::escape_html(edge_label).replace('\n', r#"<br align="left"/>"#)
+ dot::escape_html(edge_label)
} else {
"".to_owned()
};
@@ -162,8 +162,7 @@ impl<
where
W: Write,
{
- let lines = label.split('\n').map(|s| dot::escape_html(s)).collect::<Vec<_>>();
- let escaped_label = lines.join(r#"<br align="left"/>"#);
+ let escaped_label = dot::escape_html(label);
writeln!(w, r#" label=<<br/><br/>{}<br align="left"/><br/><br/><br/>>;"#, escaped_label)
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index 37ec04b07..221105ac4 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -1,16 +1,20 @@
//! The virtual memory representation of the MIR interpreter.
+mod init_mask;
+mod provenance_map;
+#[cfg(test)]
+mod tests;
+
use std::borrow::Cow;
-use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::hash;
-use std::iter;
-use std::ops::{Deref, Range};
+use std::ops::Range;
use std::ptr;
+use either::{Left, Right};
+
use rustc_ast::Mutability;
use rustc_data_structures::intern::Interned;
-use rustc_data_structures::sorted_map::SortedMap;
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Align, HasDataLayout, Size};
@@ -20,6 +24,10 @@ use super::{
UnsupportedOpInfo,
};
use crate::ty;
+use init_mask::*;
+use provenance_map::*;
+
+pub use init_mask::{InitChunk, InitChunkIter};
/// This type represents an Allocation in the Miri/CTFE core engine.
///
@@ -28,9 +36,9 @@ use crate::ty;
/// module provides higher-level access.
// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Clone, Eq, PartialEq, TyEncodable, TyDecodable)]
#[derive(HashStable)]
-pub struct Allocation<Prov = AllocId, Extra = ()> {
+pub struct Allocation<Prov: Provenance = AllocId, Extra = ()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Box<[u8]>,
@@ -95,27 +103,25 @@ impl hash::Hash for Allocation {
/// Interned types generally have an `Outer` type and an `Inner` type, where
/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
-/// outer type and `TyS` is its inner type.
+/// outer type and `TyKind` is its inner type.
///
/// Here things are different because only const allocations are interned. This
/// means that both the inner type (`Allocation`) and the outer type
/// (`ConstAllocation`) are used quite a bit.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable)]
#[rustc_pass_by_value]
-pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>(
- pub Interned<'tcx, Allocation<Prov, Extra>>,
-);
+pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- // This matches how `Allocation` is printed. We print it like this to
- // avoid having to update expected output in a lot of tests.
- write!(f, "{:?}", self.inner())
+ // The debug representation of this is very verbose and basically useless,
+ // so don't print it.
+ write!(f, "ConstAllocation {{ .. }}")
}
}
-impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> {
- pub fn inner(self) -> &'tcx Allocation<Prov, Extra> {
+impl<'tcx> ConstAllocation<'tcx> {
+ pub fn inner(self) -> &'tcx Allocation {
self.0.0
}
}
@@ -183,12 +189,21 @@ pub fn alloc_range(start: Size, size: Size) -> AllocRange {
AllocRange { start, size }
}
-impl AllocRange {
+impl From<Range<Size>> for AllocRange {
#[inline]
- pub fn from(r: Range<Size>) -> Self {
+ fn from(r: Range<Size>) -> Self {
alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
}
+}
+impl From<Range<usize>> for AllocRange {
+ #[inline]
+ fn from(r: Range<usize>) -> Self {
+ AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
+ }
+}
+
+impl AllocRange {
#[inline(always)]
pub fn end(self) -> Size {
self.start + self.size // This does overflow checking.
@@ -205,7 +220,7 @@ impl AllocRange {
}
// The constructors are all without extra; the extra gets added by a machine hook later.
-impl<Prov> Allocation<Prov> {
+impl<Prov: Provenance> Allocation<Prov> {
/// Creates an allocation initialized by the given bytes
pub fn from_bytes<'a>(
slice: impl Into<Cow<'a, [u8]>>,
@@ -263,7 +278,7 @@ impl<Prov> Allocation<Prov> {
impl Allocation {
/// Adjust allocation from the ones in tcx to a custom Machine instance
/// with a different Provenance and Extra type.
- pub fn adjust_from_tcx<Prov, Extra, Err>(
+ pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>(
self,
cx: &impl HasDataLayout,
extra: Extra,
@@ -271,10 +286,10 @@ impl Allocation {
) -> Result<Allocation<Prov, Extra>, Err> {
// Compute new pointer provenance, which also adjusts the bytes.
let mut bytes = self.bytes;
- let mut new_provenance = Vec::with_capacity(self.provenance.0.len());
+ let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
let endian = cx.data_layout().endian;
- for &(offset, alloc_id) in self.provenance.iter() {
+ for &(offset, alloc_id) in self.provenance.ptrs().iter() {
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
@@ -286,7 +301,7 @@ impl Allocation {
// Create allocation.
Ok(Allocation {
bytes,
- provenance: ProvenanceMap::from_presorted(new_provenance),
+ provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
init_mask: self.init_mask,
align: self.align,
mutability: self.mutability,
@@ -296,7 +311,7 @@ impl Allocation {
}
/// Raw accessors. Provide access to otherwise private bytes.
-impl<Prov, Extra> Allocation<Prov, Extra> {
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
pub fn len(&self) -> usize {
self.bytes.len()
}
@@ -349,9 +364,14 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
cx: &impl HasDataLayout,
range: AllocRange,
) -> AllocResult<&[u8]> {
- self.check_init(range)?;
+ self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
+ AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+ access: range,
+ uninit: uninit_range,
+ }))
+ })?;
if !Prov::OFFSET_IS_ADDR {
- if self.range_has_provenance(cx, range) {
+ if !self.provenance.range_empty(range, cx) {
return Err(AllocError::ReadPointerAsBytes);
}
}
@@ -370,7 +390,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<&mut [u8]> {
self.mark_init(range, true);
- self.clear_provenance(cx, range)?;
+ self.provenance.clear(range, cx)?;
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
@@ -382,7 +402,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<*mut [u8]> {
self.mark_init(range, true);
- self.clear_provenance(cx, range)?;
+ self.provenance.clear(range, cx)?;
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
@@ -393,6 +413,15 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Reading and writing.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+ /// Sets the init bit for the given range.
+ fn mark_init(&mut self, range: AllocRange, is_init: bool) {
+ if range.size.bytes() == 0 {
+ return;
+ }
+ assert!(self.mutability == Mutability::Mut);
+ self.init_mask.set_range(range, is_init);
+ }
+
/// Reads a *non-ZST* scalar.
///
/// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
@@ -410,7 +439,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
read_provenance: bool,
) -> AllocResult<Scalar<Prov>> {
// First and foremost, if anything is uninit, bail.
- if self.is_init(range).is_err() {
+ if self.init_mask.is_range_initialized(range).is_err() {
return Err(AllocError::InvalidUninitBytes(None));
}
@@ -423,7 +452,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// When reading data with provenance, the easy case is finding provenance exactly where we
// are reading, then we can put data and provenance back together and return that.
- if let Some(&prov) = self.provenance.get(&range.start) {
+ if let Some(prov) = self.provenance.get_ptr(range.start) {
// Now we can return the bits, with their appropriate provenance.
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(Scalar::from_pointer(ptr, cx));
@@ -431,10 +460,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// If we can work on pointers byte-wise, join the byte-wise provenances.
if Prov::OFFSET_IS_ADDR {
- let mut prov = self.offset_get_provenance(cx, range.start);
- for offset in 1..range.size.bytes() {
- let this_prov =
- self.offset_get_provenance(cx, range.start + Size::from_bytes(offset));
+ let mut prov = self.provenance.get(range.start, cx);
+ for offset in Size::from_bytes(1)..range.size {
+ let this_prov = self.provenance.get(range.start + offset, cx);
prov = Prov::join(prov, this_prov);
}
// Now use this provenance.
@@ -452,7 +480,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// Fallback path for when we cannot treat provenance bytewise or ignore it.
assert!(!Prov::OFFSET_IS_ADDR);
- if self.range_has_provenance(cx, range) {
+ if !self.provenance.range_empty(range, cx) {
return Err(AllocError::ReadPointerAsBytes);
}
// There is no provenance, we can just return the bits.
@@ -466,7 +494,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
- #[instrument(skip(self, cx), level = "debug")]
pub fn write_scalar(
&mut self,
cx: &impl HasDataLayout,
@@ -478,11 +505,11 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
- Err(val) => {
- let (provenance, offset) = val.into_parts();
+ Right(ptr) => {
+ let (provenance, offset) = ptr.into_parts();
(u128::from(offset.bytes()), Some(provenance))
}
- Ok(data) => (data, None),
+ Left(data) => (data, None),
};
let endian = cx.data_layout().endian;
@@ -491,7 +518,8 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// See if we have to also store some provenance.
if let Some(provenance) = provenance {
- self.provenance.0.insert(range.start, provenance);
+ assert_eq!(range.size, cx.data_layout().pointer_size);
+ self.provenance.insert_ptr(range.start, provenance, cx);
}
Ok(())
@@ -500,750 +528,25 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Write "uninit" to the given memory range.
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
self.mark_init(range, false);
- self.clear_provenance(cx, range)?;
+ self.provenance.clear(range, cx)?;
return Ok(());
}
-}
-
-/// Provenance.
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- /// Returns all provenance overlapping with the given pointer-offset pair.
- fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
- // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
- // the beginning of this range.
- let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
- self.provenance.range(Size::from_bytes(start)..range.end())
- }
-
- /// Get the provenance of a single byte.
- fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> {
- let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1)));
- assert!(prov.len() <= 1);
- prov.first().map(|(_offset, prov)| *prov)
- }
-
- /// Returns whether this allocation has progrnance overlapping with the given range.
- ///
- /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
- /// limit access to provenance outside of the `Allocation` abstraction.
- ///
- pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
- !self.range_get_provenance(cx, range).is_empty()
- }
-
- /// Removes all provenance inside the given range.
- /// If there is provenance overlapping with the edges, it
- /// are removed as well *and* the bytes they cover are marked as
- /// uninitialized. This is a somewhat odd "spooky action at a distance",
- /// but it allows strictly more code to run than if we would just error
- /// immediately in that case.
- fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
- where
- Prov: Provenance,
- {
- // Find the start and end of the given range and its outermost provenance.
- let (first, last) = {
- // Find all provenance overlapping the given range.
- let provenance = self.range_get_provenance(cx, range);
- if provenance.is_empty() {
- return Ok(());
- }
-
- (
- provenance.first().unwrap().0,
- provenance.last().unwrap().0 + cx.data_layout().pointer_size,
- )
- };
- let start = range.start;
- let end = range.end();
-
- // We need to handle clearing the provenance from parts of a pointer.
- // FIXME: Miri should preserve partial provenance; see
- // https://github.com/rust-lang/miri/issues/2181.
- if first < start {
- if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
- return Err(AllocError::PartialPointerOverwrite(first));
- }
- warn!(
- "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
- );
- self.init_mask.set_range(first, start, false);
- }
- if last > end {
- if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
- return Err(AllocError::PartialPointerOverwrite(
- last - cx.data_layout().pointer_size,
- ));
- }
- warn!(
- "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
- );
- self.init_mask.set_range(end, last, false);
- }
-
- // Forget all the provenance.
- // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
- // i.e., this will not remove any other provenance just after the ones we care about.
- self.provenance.0.remove_range(first..last);
-
- Ok(())
- }
-}
-
-/// Stores the provenance information of pointers stored in memory.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>);
-
-impl<Prov> ProvenanceMap<Prov> {
- pub fn new() -> Self {
- ProvenanceMap(SortedMap::new())
- }
-
- // The caller must guarantee that the given provenance list is already sorted
- // by address and contain no duplicates.
- pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
- ProvenanceMap(SortedMap::from_presorted_elements(r))
- }
-}
-
-impl<Prov> Deref for ProvenanceMap<Prov> {
- type Target = SortedMap<Size, Prov>;
-
- fn deref(&self) -> &Self::Target {
- &self.0
- }
-}
-
-/// A partial, owned list of provenance to transfer into another allocation.
-///
-/// Offsets are already adjusted to the destination allocation.
-pub struct AllocationProvenance<Prov> {
- dest_provenance: Vec<(Size, Prov)>,
-}
-
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- pub fn prepare_provenance_copy(
- &self,
- cx: &impl HasDataLayout,
- src: AllocRange,
- dest: Size,
- count: u64,
- ) -> AllocationProvenance<Prov> {
- let provenance = self.range_get_provenance(cx, src);
- if provenance.is_empty() {
- return AllocationProvenance { dest_provenance: Vec::new() };
- }
-
- let size = src.size;
- let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize));
-
- // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
- // is mostly filled with redundant information since it's just N copies of the same `Prov`s
- // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
- // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
- // the right sequence of provenance for all N copies.
- for i in 0..count {
- new_provenance.extend(provenance.iter().map(|&(offset, reloc)| {
- // compute offset for current repetition
- let dest_offset = dest + size * i; // `Size` operations
- (
- // shift offsets from source allocation to destination allocation
- (offset + dest_offset) - src.start, // `Size` operations
- reloc,
- )
- }));
- }
-
- AllocationProvenance { dest_provenance: new_provenance }
- }
- /// Applies a provenance copy.
- /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected
+ /// Applies a previously prepared provenance copy.
+ /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
/// to be clear of provenance.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
- pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) {
- self.provenance.0.insert_presorted(provenance.dest_provenance);
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Uninitialized byte tracking
-////////////////////////////////////////////////////////////////////////////////
-
-type Block = u64;
-
-/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
-/// is initialized. If it is `false` the byte is uninitialized.
-// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
-// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
-pub struct InitMask {
- blocks: Vec<Block>,
- len: Size,
-}
-
-// Const allocations are only hashed for interning. However, they can be large, making the hashing
-// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
-// big buffers like the allocation's init mask. We can partially hash some fields when they're
-// large.
-impl hash::Hash for InitMask {
- fn hash<H: hash::Hasher>(&self, state: &mut H) {
- const MAX_BLOCKS_TO_HASH: usize = MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
- const MAX_BLOCKS_LEN: usize = MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
-
- // Partially hash the `blocks` buffer when it is large. To limit collisions with common
- // prefixes and suffixes, we hash the length and some slices of the buffer.
- let block_count = self.blocks.len();
- if block_count > MAX_BLOCKS_LEN {
- // Hash the buffer's length.
- block_count.hash(state);
-
- // And its head and tail.
- self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
- self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
- } else {
- self.blocks.hash(state);
- }
-
- // Hash the other fields as usual.
- self.len.hash(state);
- }
-}
-
-impl InitMask {
- pub const BLOCK_SIZE: u64 = 64;
-
- #[inline]
- fn bit_index(bits: Size) -> (usize, usize) {
- // BLOCK_SIZE is the number of bits that can fit in a `Block`.
- // Each bit in a `Block` represents the initialization state of one byte of an allocation,
- // so we use `.bytes()` here.
- let bits = bits.bytes();
- let a = bits / InitMask::BLOCK_SIZE;
- let b = bits % InitMask::BLOCK_SIZE;
- (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
- }
-
- #[inline]
- fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
- let block = block.try_into().ok().unwrap();
- let bit = bit.try_into().ok().unwrap();
- Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
- }
-
- pub fn new(size: Size, state: bool) -> Self {
- let mut m = InitMask { blocks: vec![], len: Size::ZERO };
- m.grow(size, state);
- m
- }
-
- pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
- let len = self.len;
- if end > len {
- self.grow(end - len, new_state);
- }
- self.set_range_inbounds(start, end, new_state);
- }
-
- pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
- let (blocka, bita) = Self::bit_index(start);
- let (blockb, bitb) = Self::bit_index(end);
- if blocka == blockb {
- // First set all bits except the first `bita`,
- // then unset the last `64 - bitb` bits.
- let range = if bitb == 0 {
- u64::MAX << bita
- } else {
- (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
- };
- if new_state {
- self.blocks[blocka] |= range;
- } else {
- self.blocks[blocka] &= !range;
- }
- return;
- }
- // across block boundaries
- if new_state {
- // Set `bita..64` to `1`.
- self.blocks[blocka] |= u64::MAX << bita;
- // Set `0..bitb` to `1`.
- if bitb != 0 {
- self.blocks[blockb] |= u64::MAX >> (64 - bitb);
- }
- // Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
- self.blocks[block] = u64::MAX;
- }
- } else {
- // Set `bita..64` to `0`.
- self.blocks[blocka] &= !(u64::MAX << bita);
- // Set `0..bitb` to `0`.
- if bitb != 0 {
- self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
- }
- // Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
- self.blocks[block] = 0;
- }
- }
- }
-
- #[inline]
- pub fn get(&self, i: Size) -> bool {
- let (block, bit) = Self::bit_index(i);
- (self.blocks[block] & (1 << bit)) != 0
+ pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
+ self.provenance.apply_copy(copy)
}
- #[inline]
- pub fn set(&mut self, i: Size, new_state: bool) {
- let (block, bit) = Self::bit_index(i);
- self.set_bit(block, bit, new_state);
- }
-
- #[inline]
- fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
- if new_state {
- self.blocks[block] |= 1 << bit;
- } else {
- self.blocks[block] &= !(1 << bit);
- }
- }
-
- pub fn grow(&mut self, amount: Size, new_state: bool) {
- if amount.bytes() == 0 {
- return;
- }
- let unused_trailing_bits =
- u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
- if amount.bytes() > unused_trailing_bits {
- let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
- self.blocks.extend(
- // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
- iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
- );
- }
- let start = self.len;
- self.len += amount;
- self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
- }
-
- /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
- fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
- /// A fast implementation of `find_bit`,
- /// which skips over an entire block at a time if it's all 0s (resp. 1s),
- /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
- ///
- /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
- /// and with the least significant bit (and lowest block) first:
- /// ```text
- /// 00000000|00000000
- /// ^ ^ ^ ^
- /// index: 0 7 8 15
- /// ```
- /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
- fn find_bit_fast(
- init_mask: &InitMask,
- start: Size,
- end: Size,
- is_init: bool,
- ) -> Option<Size> {
- /// Search one block, returning the index of the first bit equal to `is_init`.
- fn search_block(
- bits: Block,
- block: usize,
- start_bit: usize,
- is_init: bool,
- ) -> Option<Size> {
- // For the following examples, assume this function was called with:
- // bits = 0b00111011
- // start_bit = 3
- // is_init = false
- // Note that, for the examples in this function, the most significant bit is written first,
- // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
-
- // Invert bits so we're always looking for the first set bit.
- // ! 0b00111011
- // bits = 0b11000100
- let bits = if is_init { bits } else { !bits };
- // Mask off unused start bits.
- // 0b11000100
- // & 0b11111000
- // bits = 0b11000000
- let bits = bits & (!0 << start_bit);
- // Find set bit, if any.
- // bit = trailing_zeros(0b11000000)
- // bit = 6
- if bits == 0 {
- None
- } else {
- let bit = bits.trailing_zeros();
- Some(InitMask::size_from_bit_index(block, bit))
- }
- }
-
- if start >= end {
- return None;
- }
-
- // Convert `start` and `end` to block indexes and bit indexes within each block.
- // We must convert `end` to an inclusive bound to handle block boundaries correctly.
- //
- // For example:
- //
- // (a) 00000000|00000000 (b) 00000000|
- // ^~~~~~~~~~~^ ^~~~~~~~~^
- // start end start end
- //
- // In both cases, the block index of `end` is 1.
- // But we do want to search block 1 in (a), and we don't in (b).
- //
- // We subtract 1 from both end positions to make them inclusive:
- //
- // (a) 00000000|00000000 (b) 00000000|
- // ^~~~~~~~~~^ ^~~~~~~^
- // start end_inclusive start end_inclusive
- //
- // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
- // This provides the desired behavior of searching blocks 0 and 1 for (a),
- // and searching only block 0 for (b).
- // There is no concern of overflows since we checked for `start >= end` above.
- let (start_block, start_bit) = InitMask::bit_index(start);
- let end_inclusive = Size::from_bytes(end.bytes() - 1);
- let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
-
- // Handle first block: need to skip `start_bit` bits.
- //
- // We need to handle the first block separately,
- // because there may be bits earlier in the block that should be ignored,
- // such as the bit marked (1) in this example:
- //
- // (1)
- // -|------
- // (c) 01000000|00000000|00000001
- // ^~~~~~~~~~~~~~~~~~^
- // start end
- if let Some(i) =
- search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
- {
- // If the range is less than a block, we may find a matching bit after `end`.
- //
- // For example, we shouldn't successfully find bit (2), because it's after `end`:
- //
- // (2)
- // -------|
- // (d) 00000001|00000000|00000001
- // ^~~~~^
- // start end
- //
- // An alternative would be to mask off end bits in the same way as we do for start bits,
- // but performing this check afterwards is faster and simpler to implement.
- if i < end {
- return Some(i);
- } else {
- return None;
- }
- }
-
- // Handle remaining blocks.
- //
- // We can skip over an entire block at once if it's all 0s (resp. 1s).
- // The block marked (3) in this example is the first block that will be handled by this loop,
- // and it will be skipped for that reason:
- //
- // (3)
- // --------
- // (e) 01000000|00000000|00000001
- // ^~~~~~~~~~~~~~~~~~^
- // start end
- if start_block < end_block_inclusive {
- // This loop is written in a specific way for performance.
- // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
- // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
- // because both alternatives result in significantly worse codegen.
- // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
- // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
- for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
- .iter()
- .zip(start_block + 1..)
- {
- if let Some(i) = search_block(bits, block, 0, is_init) {
- // If this is the last block, we may find a matching bit after `end`.
- //
- // For example, we shouldn't successfully find bit (4), because it's after `end`:
- //
- // (4)
- // -------|
- // (f) 00000001|00000000|00000001
- // ^~~~~~~~~~~~~~~~~~^
- // start end
- //
- // As above with example (d), we could handle the end block separately and mask off end bits,
- // but unconditionally searching an entire block at once and performing this check afterwards
- // is faster and much simpler to implement.
- if i < end {
- return Some(i);
- } else {
- return None;
- }
- }
- }
- }
-
- None
- }
-
- #[cfg_attr(not(debug_assertions), allow(dead_code))]
- fn find_bit_slow(
- init_mask: &InitMask,
- start: Size,
- end: Size,
- is_init: bool,
- ) -> Option<Size> {
- (start..end).find(|&i| init_mask.get(i) == is_init)
- }
-
- let result = find_bit_fast(self, start, end, is_init);
-
- debug_assert_eq!(
- result,
- find_bit_slow(self, start, end, is_init),
- "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
- start,
- end,
- is_init,
- self
- );
-
- result
- }
-}
-
-/// A contiguous chunk of initialized or uninitialized memory.
-pub enum InitChunk {
- Init(Range<Size>),
- Uninit(Range<Size>),
-}
-
-impl InitChunk {
- #[inline]
- pub fn is_init(&self) -> bool {
- match self {
- Self::Init(_) => true,
- Self::Uninit(_) => false,
- }
- }
-
- #[inline]
- pub fn range(&self) -> Range<Size> {
- match self {
- Self::Init(r) => r.clone(),
- Self::Uninit(r) => r.clone(),
- }
- }
-}
-
-impl InitMask {
- /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
- ///
- /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
- /// indexes for the first contiguous span of the uninitialized access.
- #[inline]
- pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
- if end > self.len {
- return Err(AllocRange::from(self.len..end));
- }
-
- let uninit_start = self.find_bit(start, end, false);
-
- match uninit_start {
- Some(uninit_start) => {
- let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
- Err(AllocRange::from(uninit_start..uninit_end))
- }
- None => Ok(()),
- }
- }
-
- /// Returns an iterator, yielding a range of byte indexes for each contiguous region
- /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
- ///
- /// The iterator guarantees the following:
- /// - Chunks are nonempty.
- /// - Chunks are adjacent (each range's start is equal to the previous range's end).
- /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
- /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
- #[inline]
- pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
- assert!(end <= self.len);
-
- let is_init = if start < end {
- self.get(start)
- } else {
- // `start..end` is empty: there are no chunks, so use some arbitrary value
- false
- };
-
- InitChunkIter { init_mask: self, is_init, start, end }
- }
-}
-
-/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
-#[derive(Clone)]
-pub struct InitChunkIter<'a> {
- init_mask: &'a InitMask,
- /// Whether the next chunk we will return is initialized.
- /// If there are no more chunks, contains some arbitrary value.
- is_init: bool,
- /// The current byte index into `init_mask`.
- start: Size,
- /// The end byte index into `init_mask`.
- end: Size,
-}
-
-impl<'a> Iterator for InitChunkIter<'a> {
- type Item = InitChunk;
-
- #[inline]
- fn next(&mut self) -> Option<Self::Item> {
- if self.start >= self.end {
- return None;
- }
-
- let end_of_chunk =
- self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
- let range = self.start..end_of_chunk;
-
- let ret =
- Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
-
- self.is_init = !self.is_init;
- self.start = end_of_chunk;
-
- ret
- }
-}
-
-/// Uninitialized bytes.
-impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- /// Checks whether the given range is entirely initialized.
- ///
- /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
- /// indexes of the first contiguous uninitialized access.
- fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> {
- self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
- }
-
- /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
- /// error which will report the first range of bytes which is uninitialized.
- fn check_init(&self, range: AllocRange) -> AllocResult {
- self.is_init(range).map_err(|uninit_range| {
- AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
- access: range,
- uninit: uninit_range,
- }))
- })
- }
-
- fn mark_init(&mut self, range: AllocRange, is_init: bool) {
- if range.size.bytes() == 0 {
- return;
- }
- assert!(self.mutability == Mutability::Mut);
- self.init_mask.set_range(range.start, range.end(), is_init);
- }
-}
-
-/// Run-length encoding of the uninit mask.
-/// Used to copy parts of a mask multiple times to another allocation.
-pub struct InitMaskCompressed {
- /// Whether the first range is initialized.
- initial: bool,
- /// The lengths of ranges that are run-length encoded.
- /// The initialization state of the ranges alternate starting with `initial`.
- ranges: smallvec::SmallVec<[u64; 1]>,
-}
-
-impl InitMaskCompressed {
- pub fn no_bytes_init(&self) -> bool {
- // The `ranges` are run-length encoded and of alternating initialization state.
- // So if `ranges.len() > 1` then the second block is an initialized range.
- !self.initial && self.ranges.len() == 1
- }
-}
-
-/// Transferring the initialization mask to other allocations.
-impl<Prov, Extra> Allocation<Prov, Extra> {
- /// Creates a run-length encoding of the initialization mask; panics if range is empty.
- ///
- /// This is essentially a more space-efficient version of
- /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
- pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
- // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
- // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
- // the source and write it to the destination. Even if we optimized the memory accesses,
- // we'd be doing all of this `repeat` times.
- // Therefore we precompute a compressed version of the initialization mask of the source value and
- // then write it back `repeat` times without computing any more information from the source.
-
- // A precomputed cache for ranges of initialized / uninitialized bits
- // 0000010010001110 will become
- // `[5, 1, 2, 1, 3, 3, 1]`,
- // where each element toggles the state.
-
- let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
-
- let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
-
- let initial = chunks.peek().expect("range should be nonempty").is_init();
-
- // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
- for chunk in chunks {
- let len = chunk.range().end.bytes() - chunk.range().start.bytes();
- ranges.push(len);
- }
-
- InitMaskCompressed { ranges, initial }
- }
-
- /// Applies multiple instances of the run-length encoding to the initialization mask.
+ /// Applies a previously prepared copy of the init mask.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
- pub fn mark_compressed_init_range(
- &mut self,
- defined: &InitMaskCompressed,
- range: AllocRange,
- repeat: u64,
- ) {
- // An optimization where we can just overwrite an entire range of initialization
- // bits if they are going to be uniformly `1` or `0`.
- if defined.ranges.len() <= 1 {
- self.init_mask.set_range_inbounds(
- range.start,
- range.start + range.size * repeat, // `Size` operations
- defined.initial,
- );
- return;
- }
-
- for mut j in 0..repeat {
- j *= range.size.bytes();
- j += range.start.bytes();
- let mut cur = defined.initial;
- for range in &defined.ranges {
- let old_j = j;
- j += range;
- self.init_mask.set_range_inbounds(
- Size::from_bytes(old_j),
- Size::from_bytes(j),
- cur,
- );
- cur = !cur;
- }
- }
+ pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
+ self.init_mask.apply_copy(copy, range, repeat)
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
new file mode 100644
index 000000000..82e9a961a
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -0,0 +1,530 @@
+use std::hash;
+use std::iter;
+use std::ops::Range;
+
+use rustc_target::abi::Size;
+
+use super::AllocRange;
+
+type Block = u64;
+
+/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
+/// is initialized. If it is `false` the byte is uninitialized.
+// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct InitMask {
+ blocks: Vec<Block>,
+ len: Size,
+}
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the allocation's init mask. We can partially hash some fields when they're
+// large.
+impl hash::Hash for InitMask {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
+ const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
+
+ // Partially hash the `blocks` buffer when it is large. To limit collisions with common
+ // prefixes and suffixes, we hash the length and some slices of the buffer.
+ let block_count = self.blocks.len();
+ if block_count > MAX_BLOCKS_LEN {
+ // Hash the buffer's length.
+ block_count.hash(state);
+
+ // And its head and tail.
+ self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
+ self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
+ } else {
+ self.blocks.hash(state);
+ }
+
+ // Hash the other fields as usual.
+ self.len.hash(state);
+ }
+}
+
+impl InitMask {
+ pub const BLOCK_SIZE: u64 = 64;
+
+ pub fn new(size: Size, state: bool) -> Self {
+ let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+ m.grow(size, state);
+ m
+ }
+
+ #[inline]
+ fn bit_index(bits: Size) -> (usize, usize) {
+ // BLOCK_SIZE is the number of bits that can fit in a `Block`.
+ // Each bit in a `Block` represents the initialization state of one byte of an allocation,
+ // so we use `.bytes()` here.
+ let bits = bits.bytes();
+ let a = bits / InitMask::BLOCK_SIZE;
+ let b = bits % InitMask::BLOCK_SIZE;
+ (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
+ }
+
+ #[inline]
+ fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
+ let block = block.try_into().ok().unwrap();
+ let bit = bit.try_into().ok().unwrap();
+ Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+ }
+
+ /// Checks whether the `range` is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+ /// indexes for the first contiguous span of the uninitialized access.
+ #[inline]
+ pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
+ let end = range.end();
+ if end > self.len {
+ return Err(AllocRange::from(self.len..end));
+ }
+
+ let uninit_start = self.find_bit(range.start, end, false);
+
+ match uninit_start {
+ Some(uninit_start) => {
+ let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
+ Err(AllocRange::from(uninit_start..uninit_end))
+ }
+ None => Ok(()),
+ }
+ }
+
+ pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
+ let end = range.end();
+ let len = self.len;
+ if end > len {
+ self.grow(end - len, new_state);
+ }
+ self.set_range_inbounds(range.start, end, new_state);
+ }
+
+ fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+ let (blocka, bita) = Self::bit_index(start);
+ let (blockb, bitb) = Self::bit_index(end);
+ if blocka == blockb {
+ // First set all bits except the first `bita`,
+ // then unset the last `64 - bitb` bits.
+ let range = if bitb == 0 {
+ u64::MAX << bita
+ } else {
+ (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+ };
+ if new_state {
+ self.blocks[blocka] |= range;
+ } else {
+ self.blocks[blocka] &= !range;
+ }
+ return;
+ }
+ // across block boundaries
+ if new_state {
+ // Set `bita..64` to `1`.
+ self.blocks[blocka] |= u64::MAX << bita;
+ // Set `0..bitb` to `1`.
+ if bitb != 0 {
+ self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = u64::MAX;
+ }
+ } else {
+ // Set `bita..64` to `0`.
+ self.blocks[blocka] &= !(u64::MAX << bita);
+ // Set `0..bitb` to `0`.
+ if bitb != 0 {
+ self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = 0;
+ }
+ }
+ }
+
+ #[inline]
+ pub fn get(&self, i: Size) -> bool {
+ let (block, bit) = Self::bit_index(i);
+ (self.blocks[block] & (1 << bit)) != 0
+ }
+
+ fn grow(&mut self, amount: Size, new_state: bool) {
+ if amount.bytes() == 0 {
+ return;
+ }
+ let unused_trailing_bits =
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+ if amount.bytes() > unused_trailing_bits {
+ let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
+ self.blocks.extend(
+ // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
+ iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
+ );
+ }
+ let start = self.len;
+ self.len += amount;
+ self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
+ }
+
+ /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
+ fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
+ /// A fast implementation of `find_bit`,
+ /// which skips over an entire block at a time if it's all 0s (resp. 1s),
+ /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
+ ///
+ /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
+ /// and with the least significant bit (and lowest block) first:
+ /// ```text
+ /// 00000000|00000000
+ /// ^ ^ ^ ^
+ /// index: 0 7 8 15
+ /// ```
+ /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
+ fn find_bit_fast(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ /// Search one block, returning the index of the first bit equal to `is_init`.
+ fn search_block(
+ bits: Block,
+ block: usize,
+ start_bit: usize,
+ is_init: bool,
+ ) -> Option<Size> {
+ // For the following examples, assume this function was called with:
+ // bits = 0b00111011
+ // start_bit = 3
+ // is_init = false
+ // Note that, for the examples in this function, the most significant bit is written first,
+ // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
+
+ // Invert bits so we're always looking for the first set bit.
+ // ! 0b00111011
+ // bits = 0b11000100
+ let bits = if is_init { bits } else { !bits };
+ // Mask off unused start bits.
+ // 0b11000100
+ // & 0b11111000
+ // bits = 0b11000000
+ let bits = bits & (!0 << start_bit);
+ // Find set bit, if any.
+ // bit = trailing_zeros(0b11000000)
+ // bit = 6
+ if bits == 0 {
+ None
+ } else {
+ let bit = bits.trailing_zeros();
+ Some(InitMask::size_from_bit_index(block, bit))
+ }
+ }
+
+ if start >= end {
+ return None;
+ }
+
+ // Convert `start` and `end` to block indexes and bit indexes within each block.
+ // We must convert `end` to an inclusive bound to handle block boundaries correctly.
+ //
+ // For example:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~~^ ^~~~~~~~~^
+ // start end start end
+ //
+ // In both cases, the block index of `end` is 1.
+ // But we do want to search block 1 in (a), and we don't in (b).
+ //
+ // We subtract 1 from both end positions to make them inclusive:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~^ ^~~~~~~^
+ // start end_inclusive start end_inclusive
+ //
+ // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
+ // This provides the desired behavior of searching blocks 0 and 1 for (a),
+ // and searching only block 0 for (b).
+ // There is no concern of overflows since we checked for `start >= end` above.
+ let (start_block, start_bit) = InitMask::bit_index(start);
+ let end_inclusive = Size::from_bytes(end.bytes() - 1);
+ let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+
+ // Handle first block: need to skip `start_bit` bits.
+ //
+ // We need to handle the first block separately,
+ // because there may be bits earlier in the block that should be ignored,
+ // such as the bit marked (1) in this example:
+ //
+ // (1)
+ // -|------
+ // (c) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if let Some(i) =
+ search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
+ {
+ // If the range is less than a block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (2), because it's after `end`:
+ //
+ // (2)
+ // -------|
+ // (d) 00000001|00000000|00000001
+ // ^~~~~^
+ // start end
+ //
+ // An alternative would be to mask off end bits in the same way as we do for start bits,
+ // but performing this check afterwards is faster and simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+
+ // Handle remaining blocks.
+ //
+ // We can skip over an entire block at once if it's all 0s (resp. 1s).
+ // The block marked (3) in this example is the first block that will be handled by this loop,
+ // and it will be skipped for that reason:
+ //
+ // (3)
+ // --------
+ // (e) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if start_block < end_block_inclusive {
+ // This loop is written in a specific way for performance.
+ // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
+ // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
+ // because both alternatives result in significantly worse codegen.
+ // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
+ // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
+ for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
+ .iter()
+ .zip(start_block + 1..)
+ {
+ if let Some(i) = search_block(bits, block, 0, is_init) {
+ // If this is the last block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (4), because it's after `end`:
+ //
+ // (4)
+ // -------|
+ // (f) 00000001|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ //
+ // As above with example (d), we could handle the end block separately and mask off end bits,
+ // but unconditionally searching an entire block at once and performing this check afterwards
+ // is faster and much simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ #[cfg_attr(not(debug_assertions), allow(dead_code))]
+ fn find_bit_slow(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ (start..end).find(|&i| init_mask.get(i) == is_init)
+ }
+
+ let result = find_bit_fast(self, start, end, is_init);
+
+ debug_assert_eq!(
+ result,
+ find_bit_slow(self, start, end, is_init),
+ "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
+ start,
+ end,
+ is_init,
+ self
+ );
+
+ result
+ }
+}
+
+/// A contiguous chunk of initialized or uninitialized memory.
+pub enum InitChunk {
+ Init(Range<Size>),
+ Uninit(Range<Size>),
+}
+
+impl InitChunk {
+ #[inline]
+ pub fn is_init(&self) -> bool {
+ match self {
+ Self::Init(_) => true,
+ Self::Uninit(_) => false,
+ }
+ }
+
+ #[inline]
+ pub fn range(&self) -> Range<Size> {
+ match self {
+ Self::Init(r) => r.clone(),
+ Self::Uninit(r) => r.clone(),
+ }
+ }
+}
+
+impl InitMask {
+ /// Returns an iterator, yielding a range of byte indexes for each contiguous region
+ /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
+ ///
+ /// The iterator guarantees the following:
+ /// - Chunks are nonempty.
+ /// - Chunks are adjacent (each range's start is equal to the previous range's end).
+ /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
+ /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
+ #[inline]
+ pub fn range_as_init_chunks(&self, range: AllocRange) -> InitChunkIter<'_> {
+ let start = range.start;
+ let end = range.end();
+ assert!(end <= self.len);
+
+ let is_init = if start < end {
+ self.get(start)
+ } else {
+ // `start..end` is empty: there are no chunks, so use some arbitrary value
+ false
+ };
+
+ InitChunkIter { init_mask: self, is_init, start, end }
+ }
+}
+
+/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
+#[derive(Clone)]
+pub struct InitChunkIter<'a> {
+ init_mask: &'a InitMask,
+ /// Whether the next chunk we will return is initialized.
+ /// If there are no more chunks, contains some arbitrary value.
+ is_init: bool,
+ /// The current byte index into `init_mask`.
+ start: Size,
+ /// The end byte index into `init_mask`.
+ end: Size,
+}
+
+impl<'a> Iterator for InitChunkIter<'a> {
+ type Item = InitChunk;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.start >= self.end {
+ return None;
+ }
+
+ let end_of_chunk =
+ self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ let range = self.start..end_of_chunk;
+
+ let ret =
+ Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
+
+ self.is_init = !self.is_init;
+ self.start = end_of_chunk;
+
+ ret
+ }
+}
+
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitCopy {
+ /// Whether the first range is initialized.
+ initial: bool,
+ /// The lengths of ranges that are run-length encoded.
+ /// The initialization state of the ranges alternate starting with `initial`.
+ ranges: smallvec::SmallVec<[u64; 1]>,
+}
+
+impl InitCopy {
+ pub fn no_bytes_init(&self) -> bool {
+ // The `ranges` are run-length encoded and of alternating initialization state.
+ // So if `ranges.len() > 1` then the second block is an initialized range.
+ !self.initial && self.ranges.len() == 1
+ }
+}
+
+/// Transferring the initialization mask to other allocations.
+impl InitMask {
+ /// Creates a run-length encoding of the initialization mask; panics if range is empty.
+ ///
+ /// This is essentially a more space-efficient version of
+ /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
+ pub fn prepare_copy(&self, range: AllocRange) -> InitCopy {
+ // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+ // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+ // the source and write it to the destination. Even if we optimized the memory accesses,
+ // we'd be doing all of this `repeat` times.
+ // Therefore we precompute a compressed version of the initialization mask of the source value and
+ // then write it back `repeat` times without computing any more information from the source.
+
+ // A precomputed cache for ranges of initialized / uninitialized bits
+ // 0000010010001110 will become
+ // `[5, 1, 2, 1, 3, 3, 1]`,
+ // where each element toggles the state.
+
+ let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+
+ let mut chunks = self.range_as_init_chunks(range).peekable();
+
+ let initial = chunks.peek().expect("range should be nonempty").is_init();
+
+ // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
+ for chunk in chunks {
+ let len = chunk.range().end.bytes() - chunk.range().start.bytes();
+ ranges.push(len);
+ }
+
+ InitCopy { ranges, initial }
+ }
+
+ /// Applies multiple instances of the run-length encoding to the initialization mask.
+ pub fn apply_copy(&mut self, defined: InitCopy, range: AllocRange, repeat: u64) {
+ // An optimization where we can just overwrite an entire range of initialization
+ // bits if they are going to be uniformly `1` or `0`.
+ if defined.ranges.len() <= 1 {
+ self.set_range_inbounds(
+ range.start,
+ range.start + range.size * repeat, // `Size` operations
+ defined.initial,
+ );
+ return;
+ }
+
+ for mut j in 0..repeat {
+ j *= range.size.bytes();
+ j += range.start.bytes();
+ let mut cur = defined.initial;
+ for range in &defined.ranges {
+ let old_j = j;
+ j += range;
+ self.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
+ cur = !cur;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
new file mode 100644
index 000000000..ddd3f3943
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
@@ -0,0 +1,321 @@
+//! Store the provenance for each byte in the range, with a more efficient
+//! representation for the common case where PTR_SIZE consecutive bytes have the same provenance.
+
+use std::cmp;
+
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use super::{alloc_range, AllocError, AllocId, AllocRange, AllocResult, Provenance};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+/// Stores the provenance information of pointers stored in memory.
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(HashStable)]
+pub struct ProvenanceMap<Prov = AllocId> {
+ /// Provenance in this map applies from the given offset for an entire pointer-size worth of
+ /// bytes. Two entires in this map are always at least a pointer size apart.
+ ptrs: SortedMap<Size, Prov>,
+ /// Provenance in this map only applies to the given single byte.
+ /// This map is disjoint from the previous. It will always be empty when
+ /// `Prov::OFFSET_IS_ADDR` is false.
+ bytes: Option<Box<SortedMap<Size, Prov>>>,
+}
+
+impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> {
+ fn decode(d: &mut D) -> Self {
+ assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized
+ Self { ptrs: Decodable::decode(d), bytes: None }
+ }
+}
+
+impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> {
+ fn encode(&self, s: &mut S) {
+ let Self { ptrs, bytes } = self;
+ assert!(!Prov::OFFSET_IS_ADDR); // only `AllocId` is ever serialized
+ debug_assert!(bytes.is_none());
+ ptrs.encode(s)
+ }
+}
+
+impl<Prov> ProvenanceMap<Prov> {
+ pub fn new() -> Self {
+ ProvenanceMap { ptrs: SortedMap::new(), bytes: None }
+ }
+
+ /// The caller must guarantee that the given provenance list is already sorted
+ /// by address and contain no duplicates.
+ pub fn from_presorted_ptrs(r: Vec<(Size, Prov)>) -> Self {
+ ProvenanceMap { ptrs: SortedMap::from_presorted_elements(r), bytes: None }
+ }
+}
+
+impl ProvenanceMap {
+ /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
+ /// indeed that is how codegen treats them).
+ ///
+ /// Only exposed with `AllocId` provenance, since it panics if there is bytewise provenance.
+ #[inline]
+ pub fn ptrs(&self) -> &SortedMap<Size, AllocId> {
+ debug_assert!(self.bytes.is_none()); // `AllocId::OFFSET_IS_ADDR` is false so this cannot fail
+ &self.ptrs
+ }
+}
+
+impl<Prov: Provenance> ProvenanceMap<Prov> {
+ /// Returns all ptr-sized provenance in the given range.
+ /// If the range has length 0, returns provenance that crosses the edge between `start-1` and
+ /// `start`.
+ fn range_get_ptrs(&self, range: AllocRange, cx: &impl HasDataLayout) -> &[(Size, Prov)] {
+ // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
+ // the beginning of this range.
+ let adjusted_start = Size::from_bytes(
+ range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1),
+ );
+ self.ptrs.range(adjusted_start..range.end())
+ }
+
+ /// Returns all byte-wise provenance in the given range.
+ fn range_get_bytes(&self, range: AllocRange) -> &[(Size, Prov)] {
+ if let Some(bytes) = self.bytes.as_ref() {
+ bytes.range(range.start..range.end())
+ } else {
+ &[]
+ }
+ }
+
+ /// Get the provenance of a single byte.
+ pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> {
+ let prov = self.range_get_ptrs(alloc_range(offset, Size::from_bytes(1)), cx);
+ debug_assert!(prov.len() <= 1);
+ if let Some(entry) = prov.first() {
+ // If it overlaps with this byte, it is on this byte.
+ debug_assert!(self.bytes.as_ref().map_or(true, |b| b.get(&offset).is_none()));
+ Some(entry.1)
+ } else {
+ // Look up per-byte provenance.
+ self.bytes.as_ref().and_then(|b| b.get(&offset).copied())
+ }
+ }
+
+ /// Check if here is ptr-sized provenance at the given index.
+ /// Does not mean anything for bytewise provenance! But can be useful as an optimization.
+ pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
+ self.ptrs.get(&offset).copied()
+ }
+
+ /// Returns whether this allocation has provenance overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
+ /// limit access to provenance outside of the `Allocation` abstraction.
+ ///
+ pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
+ self.range_get_ptrs(range, cx).is_empty() && self.range_get_bytes(range).is_empty()
+ }
+
+ /// Yields all the provenances stored in this map.
+ pub fn provenances(&self) -> impl Iterator<Item = Prov> + '_ {
+ let bytes = self.bytes.iter().flat_map(|b| b.values());
+ self.ptrs.values().chain(bytes).copied()
+ }
+
+ pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) {
+ debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx));
+ self.ptrs.insert(offset, prov);
+ }
+
+ /// Removes all provenance inside the given range.
+ /// If there is provenance overlapping with the edges, might result in an error.
+ pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult {
+ let start = range.start;
+ let end = range.end();
+ // Clear the bytewise part -- this is easy.
+ if Prov::OFFSET_IS_ADDR {
+ if let Some(bytes) = self.bytes.as_mut() {
+ bytes.remove_range(start..end);
+ }
+ } else {
+ debug_assert!(self.bytes.is_none());
+ }
+
+ // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
+ // provenance that overlaps with the given range.
+ let (first, last) = {
+ // Find all provenance overlapping the given range.
+ let provenance = self.range_get_ptrs(range, cx);
+ if provenance.is_empty() {
+ // No provenance in this range, we are done.
+ return Ok(());
+ }
+
+ (
+ provenance.first().unwrap().0,
+ provenance.last().unwrap().0 + cx.data_layout().pointer_size,
+ )
+ };
+
+ // We need to handle clearing the provenance from parts of a pointer.
+ if first < start {
+ if !Prov::OFFSET_IS_ADDR {
+ // We can't split up the provenance into less than a pointer.
+ return Err(AllocError::PartialPointerOverwrite(first));
+ }
+ // Insert the remaining part in the bytewise provenance.
+ let prov = self.ptrs[&first];
+ let bytes = self.bytes.get_or_insert_with(Box::default);
+ for offset in first..start {
+ bytes.insert(offset, prov);
+ }
+ }
+ if last > end {
+ let begin_of_last = last - cx.data_layout().pointer_size;
+ if !Prov::OFFSET_IS_ADDR {
+ // We can't split up the provenance into less than a pointer.
+ return Err(AllocError::PartialPointerOverwrite(begin_of_last));
+ }
+ // Insert the remaining part in the bytewise provenance.
+ let prov = self.ptrs[&begin_of_last];
+ let bytes = self.bytes.get_or_insert_with(Box::default);
+ for offset in end..last {
+ bytes.insert(offset, prov);
+ }
+ }
+
+ // Forget all the provenance.
+ // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
+ // i.e., this will not remove any other provenance just after the ones we care about.
+ self.ptrs.remove_range(first..last);
+
+ Ok(())
+ }
+}
+
+/// A partial, owned list of provenance to transfer into another allocation.
+///
+/// Offsets are already adjusted to the destination allocation.
+pub struct ProvenanceCopy<Prov> {
+ dest_ptrs: Option<Box<[(Size, Prov)]>>,
+ dest_bytes: Option<Box<[(Size, Prov)]>>,
+}
+
+impl<Prov: Provenance> ProvenanceMap<Prov> {
+ pub fn prepare_copy(
+ &self,
+ src: AllocRange,
+ dest: Size,
+ count: u64,
+ cx: &impl HasDataLayout,
+ ) -> AllocResult<ProvenanceCopy<Prov>> {
+ let shift_offset = move |idx, offset| {
+ // compute offset for current repetition
+ let dest_offset = dest + src.size * idx; // `Size` operations
+ // shift offsets from source allocation to destination allocation
+ (offset - src.start) + dest_offset // `Size` operations
+ };
+ let ptr_size = cx.data_layout().pointer_size;
+
+ // # Pointer-sized provenances
+ // Get the provenances that are entirely within this range.
+ // (Different from `range_get_ptrs` which asks if they overlap the range.)
+ // Only makes sense if we are copying at least one pointer worth of bytes.
+ let mut dest_ptrs_box = None;
+ if src.size >= ptr_size {
+ let adjusted_end = Size::from_bytes(src.end().bytes() - (ptr_size.bytes() - 1));
+ let ptrs = self.ptrs.range(src.start..adjusted_end);
+ // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
+ // is mostly filled with redundant information since it's just N copies of the same `Prov`s
+ // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
+ // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
+ // the right sequence of provenance for all N copies.
+ // Basically, this large array would have to be created anyway in the target allocation.
+ let mut dest_ptrs = Vec::with_capacity(ptrs.len() * (count as usize));
+ for i in 0..count {
+ dest_ptrs
+ .extend(ptrs.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
+ }
+ debug_assert_eq!(dest_ptrs.len(), dest_ptrs.capacity());
+ dest_ptrs_box = Some(dest_ptrs.into_boxed_slice());
+ };
+
+ // # Byte-sized provenances
+ // This includes the existing bytewise provenance in the range, and ptr provenance
+ // that overlaps with the begin/end of the range.
+ let mut dest_bytes_box = None;
+ let begin_overlap = self.range_get_ptrs(alloc_range(src.start, Size::ZERO), cx).first();
+ let end_overlap = self.range_get_ptrs(alloc_range(src.end(), Size::ZERO), cx).first();
+ if !Prov::OFFSET_IS_ADDR {
+ // There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
+ if let Some(entry) = begin_overlap {
+ return Err(AllocError::PartialPointerCopy(entry.0));
+ }
+ if let Some(entry) = end_overlap {
+ return Err(AllocError::PartialPointerCopy(entry.0));
+ }
+ debug_assert!(self.bytes.is_none());
+ } else {
+ let mut bytes = Vec::new();
+ // First, if there is a part of a pointer at the start, add that.
+ if let Some(entry) = begin_overlap {
+ trace!("start overlapping entry: {entry:?}");
+ // For really small copies, make sure we don't run off the end of the `src` range.
+ let entry_end = cmp::min(entry.0 + ptr_size, src.end());
+ for offset in src.start..entry_end {
+ bytes.push((offset, entry.1));
+ }
+ } else {
+ trace!("no start overlapping entry");
+ }
+ // Then the main part, bytewise provenance from `self.bytes`.
+ if let Some(all_bytes) = self.bytes.as_ref() {
+ bytes.extend(all_bytes.range(src.start..src.end()));
+ }
+ // And finally possibly parts of a pointer at the end.
+ if let Some(entry) = end_overlap {
+ trace!("end overlapping entry: {entry:?}");
+ // For really small copies, make sure we don't start before `src` does.
+ let entry_start = cmp::max(entry.0, src.start);
+ for offset in entry_start..src.end() {
+ if bytes.last().map_or(true, |bytes_entry| bytes_entry.0 < offset) {
+ // The last entry, if it exists, has a lower offset than us.
+ bytes.push((offset, entry.1));
+ } else {
+ // There already is an entry for this offset in there! This can happen when the
+ // start and end range checks actually end up hitting the same pointer, so we
+ // already added this in the "pointer at the start" part above.
+ assert!(entry.0 <= src.start);
+ }
+ }
+ } else {
+ trace!("no end overlapping entry");
+ }
+ trace!("byte provenances: {bytes:?}");
+
+ // And again a buffer for the new list on the target side.
+ let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize));
+ for i in 0..count {
+ dest_bytes
+ .extend(bytes.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
+ }
+ debug_assert_eq!(dest_bytes.len(), dest_bytes.capacity());
+ dest_bytes_box = Some(dest_bytes.into_boxed_slice());
+ }
+
+ Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box })
+ }
+
+ /// Applies a provenance copy.
+ /// The affected range, as defined in the parameters to `prepare_copy` is expected
+ /// to be clear of provenance.
+ pub fn apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
+ if let Some(dest_ptrs) = copy.dest_ptrs {
+ self.ptrs.insert_presorted(dest_ptrs.into());
+ }
+ if Prov::OFFSET_IS_ADDR {
+ if let Some(dest_bytes) = copy.dest_bytes && !dest_bytes.is_empty() {
+ self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into());
+ }
+ } else {
+ debug_assert!(copy.dest_bytes.is_none());
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
new file mode 100644
index 000000000..c9c3c50c5
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
@@ -0,0 +1,19 @@
+use super::*;
+
+#[test]
+fn uninit_mask() {
+ let mut mask = InitMask::new(Size::from_bytes(500), false);
+ assert!(!mask.get(Size::from_bytes(499)));
+ mask.set_range(alloc_range(Size::from_bytes(499), Size::from_bytes(1)), true);
+ assert!(mask.get(Size::from_bytes(499)));
+ mask.set_range((100..256).into(), true);
+ for i in 0..100 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+ for i in 100..256 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+ for i in 256..499 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index b5a50cc15..bd9cd53e1 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -16,8 +16,6 @@ pub enum ErrorHandled {
/// Already reported an error for this evaluation, and the compilation is
/// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
Reported(ErrorGuaranteed),
- /// Already emitted a lint for this evaluation.
- Linted,
/// Don't emit an error, the evaluation failed because the MIR was generic
/// and the substs didn't fully monomorphize it.
TooGeneric,
@@ -89,18 +87,6 @@ fn print_backtrace(backtrace: &Backtrace) {
eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
}
-impl From<ErrorHandled> for InterpErrorInfo<'_> {
- fn from(err: ErrorHandled) -> Self {
- match err {
- ErrorHandled::Reported(ErrorGuaranteed { .. }) | ErrorHandled::Linted => {
- err_inval!(ReferencedConstant)
- }
- ErrorHandled::TooGeneric => err_inval!(TooGeneric),
- }
- .into()
- }
-}
-
impl From<ErrorGuaranteed> for InterpErrorInfo<'_> {
fn from(err: ErrorGuaranteed) -> Self {
InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into()
@@ -138,9 +124,6 @@ impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
pub enum InvalidProgramInfo<'tcx> {
/// Resolution can fail if we are in a too generic context.
TooGeneric,
- /// Cannot compute this constant because it depends on another one
- /// which already produced an error.
- ReferencedConstant,
/// Abort in case errors are already reported.
AlreadyReported(ErrorGuaranteed),
/// An error occurred during layout computation.
@@ -158,9 +141,11 @@ impl fmt::Display for InvalidProgramInfo<'_> {
use InvalidProgramInfo::*;
match self {
TooGeneric => write!(f, "encountered overly generic constant"),
- ReferencedConstant => write!(f, "referenced constant has errors"),
AlreadyReported(ErrorGuaranteed { .. }) => {
- write!(f, "encountered constants with type errors, stopping evaluation")
+ write!(
+ f,
+ "an error has already been reported elsewhere (this should not usually be printed)"
+ )
}
Layout(ref err) => write!(f, "{err}"),
FnAbiAdjustForForeignAbi(ref err) => write!(f, "{err}"),
@@ -401,16 +386,15 @@ impl fmt::Display for UndefinedBehaviorInfo {
pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught!
Unsupported(String),
- /// Overwriting parts of a pointer; the resulting state cannot be represented in our
- /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
- PartialPointerOverwrite(Pointer<AllocId>),
- /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be
- /// represented in our `Allocation` data structure. See
- /// <https://github.com/rust-lang/miri/issues/2181>.
- PartialPointerCopy(Pointer<AllocId>),
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
+ /// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state
+ /// cannot be represented by the CTFE interpreter.
+ PartialPointerOverwrite(Pointer<AllocId>),
+ /// Attempting to `copy` parts of a pointer to somewhere else; without knowing absolute
+ /// addresses, the resulting state cannot be represented by the CTFE interpreter.
+ PartialPointerCopy(Pointer<AllocId>),
/// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes,
/// Accessing thread local statics
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 5e3dfcbcc..d79cd8b7a 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -106,6 +106,7 @@ use rustc_ast::LitKind;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{HashMapExt, Lock};
use rustc_data_structures::tiny_list::TinyList;
+use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -127,8 +128,8 @@ pub use self::error::{
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
pub use self::allocation::{
- alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
- ProvenanceMap,
+ alloc_range, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation, InitChunk,
+ InitChunkIter,
};
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
@@ -176,7 +177,7 @@ pub enum LitToConstError {
/// This is used for graceful error handling (`delay_span_bug`) in
/// type checking (`Const::from_anon_const`).
TypeError,
- Reported,
+ Reported(ErrorGuaranteed),
}
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 23c2ce647..9c270ba1e 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -103,8 +103,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
-/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
-/// pointer), but `derive` adds some unnecessary bounds.
+/// The `Debug` rendering is used to distplay bare provenance, and for the default impl of `fmt`.
pub trait Provenance: Copy + fmt::Debug {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
/// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
@@ -115,14 +114,23 @@ pub trait Provenance: Copy + fmt::Debug {
/// pointer, and implement ptr-to-int transmutation by stripping provenance.
const OFFSET_IS_ADDR: bool;
- /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
- /// (this avoids a separate trait in `allocation.rs` just for this purpose).
- const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
-
/// Determines how a pointer should be printed.
+ ///
+ /// Default impl is only good for when `OFFSET_IS_ADDR == true`.
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
where
- Self: Sized;
+ Self: Sized,
+ {
+ assert!(Self::OFFSET_IS_ADDR);
+ let (prov, addr) = ptr.into_parts(); // address is absolute
+ write!(f, "{:#x}", addr.bytes())?;
+ if f.alternate() {
+ write!(f, "{prov:#?}")?;
+ } else {
+ write!(f, "{prov:?}")?;
+ }
+ Ok(())
+ }
/// If `OFFSET_IS_ADDR == false`, provenance must always be able to
/// identify the allocation this ptr points to (i.e., this must return `Some`).
@@ -139,9 +147,6 @@ impl Provenance for AllocId {
// so ptr-to-int casts are not possible (since we do not know the global physical offset).
const OFFSET_IS_ADDR: bool = false;
- // For now, do not allow this, so that we keep our options open.
- const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
-
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
@@ -168,7 +173,7 @@ impl Provenance for AllocId {
/// Represents a pointer in the Miri engine.
///
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
-#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(Copy, Clone, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub struct Pointer<Prov = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type)
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 473894ac1..b6c6e9d55 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -175,6 +175,8 @@ impl<'tcx> TyCtxt<'tcx> {
impl<'tcx> TyCtxtAt<'tcx> {
/// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ ///
+ /// The span is entirely ignored here, but still helpful for better query cycle errors.
pub fn eval_static_initializer(
self,
def_id: DefId,
@@ -187,6 +189,8 @@ impl<'tcx> TyCtxtAt<'tcx> {
}
/// Evaluate anything constant-like, returning the allocation of the final memory.
+ ///
+ /// The span is entirely ignored here, but still helpful for better query cycle errors.
fn eval_to_allocation(
self,
gid: GlobalId<'tcx>,
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index ac5fddb7a..e6636e50e 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -1,6 +1,8 @@
use std::convert::{TryFrom, TryInto};
use std::fmt;
+use either::{Either, Left, Right};
+
use rustc_apfloat::{
ieee::{Double, Single},
Float,
@@ -18,15 +20,15 @@ use super::{
/// Represents the result of const evaluation via the `eval_to_allocation` query.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
pub struct ConstAlloc<'tcx> {
- // the value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
- // (so you can use `AllocMap::unwrap_memory`).
+ /// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
+ /// (so you can use `AllocMap::unwrap_memory`).
pub alloc_id: AllocId,
pub ty: Ty<'tcx>,
}
/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
/// array length computations, enum discriminants and the pattern matching logic.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable, Lift)]
pub enum ConstValue<'tcx> {
/// Used only for types with `layout::abi::Scalar` ABI.
@@ -108,7 +110,7 @@ impl<'tcx> ConstValue<'tcx> {
///
/// These variants would be private if there was a convenient way to achieve that in Rust.
/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
-#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub enum Scalar<Prov = AllocId> {
/// The raw bytes of a simple value.
@@ -293,10 +295,10 @@ impl<Prov> Scalar<Prov> {
pub fn to_bits_or_ptr_internal(
self,
target_size: Size,
- ) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> {
+ ) -> Result<Either<u128, Pointer<Prov>>, ScalarSizeMismatch> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
Ok(match self {
- Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
+ Scalar::Int(int) => Left(int.to_bits(target_size).map_err(|size| {
ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() }
})?),
Scalar::Ptr(ptr, sz) => {
@@ -306,7 +308,7 @@ impl<Prov> Scalar<Prov> {
data_size: sz.into(),
});
}
- Err(ptr)
+ Right(ptr)
}
})
}
@@ -318,8 +320,8 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
.to_bits_or_ptr_internal(cx.pointer_size())
.map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
{
- Err(ptr) => Ok(ptr.into()),
- Ok(bits) => {
+ Right(ptr) => Ok(ptr.into()),
+ Left(bits) => {
let addr = u64::try_from(bits).unwrap();
Ok(Pointer::from_addr(addr))
}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 79db35a76..a513444e1 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -10,7 +10,7 @@ use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable};
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::visit::{TypeVisitable, TypeVisitor};
-use crate::ty::{self, List, Ty, TyCtxt};
+use crate::ty::{self, DefIdTree, List, Ty, TyCtxt};
use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
@@ -100,13 +100,9 @@ impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
/// pass will be named after the type, and it will consist of a main
/// loop that goes over each available MIR and applies `run_pass`.
pub trait MirPass<'tcx> {
- fn name(&self) -> Cow<'_, str> {
+ fn name(&self) -> &str {
let name = std::any::type_name::<Self>();
- if let Some(tail) = name.rfind(':') {
- Cow::from(&name[tail + 1..])
- } else {
- Cow::from(name)
- }
+ if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name }
}
/// Returns `true` if this pass is enabled with the current combination of compiler flags.
@@ -138,33 +134,46 @@ impl MirPhase {
}
}
}
-}
-impl Display for MirPhase {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- match self {
- MirPhase::Built => write!(f, "built"),
- MirPhase::Analysis(p) => write!(f, "analysis-{}", p),
- MirPhase::Runtime(p) => write!(f, "runtime-{}", p),
+ /// Parses an `MirPhase` from a pair of strings. Panics if this isn't possible for any reason.
+ pub fn parse(dialect: String, phase: Option<String>) -> Self {
+ match &*dialect.to_ascii_lowercase() {
+ "built" => {
+ assert!(phase.is_none(), "Cannot specify a phase for `Built` MIR");
+ MirPhase::Built
+ }
+ "analysis" => Self::Analysis(AnalysisPhase::parse(phase)),
+ "runtime" => Self::Runtime(RuntimePhase::parse(phase)),
+ _ => panic!("Unknown MIR dialect {}", dialect),
}
}
}
-impl Display for AnalysisPhase {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- match self {
- AnalysisPhase::Initial => write!(f, "initial"),
- AnalysisPhase::PostCleanup => write!(f, "post_cleanup"),
+impl AnalysisPhase {
+ pub fn parse(phase: Option<String>) -> Self {
+ let Some(phase) = phase else {
+ return Self::Initial;
+ };
+
+ match &*phase.to_ascii_lowercase() {
+ "initial" => Self::Initial,
+ "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup,
+ _ => panic!("Unknown analysis phase {}", phase),
}
}
}
-impl Display for RuntimePhase {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- match self {
- RuntimePhase::Initial => write!(f, "initial"),
- RuntimePhase::PostCleanup => write!(f, "post_cleanup"),
- RuntimePhase::Optimized => write!(f, "optimized"),
+impl RuntimePhase {
+ pub fn parse(phase: Option<String>) -> Self {
+ let Some(phase) = phase else {
+ return Self::Initial;
+ };
+
+ match &*phase.to_ascii_lowercase() {
+ "initial" => Self::Initial,
+ "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup,
+ "optimized" => Self::Optimized,
+ _ => panic!("Unknown runtime phase {}", phase),
}
}
}
@@ -293,6 +302,13 @@ pub struct Body<'tcx> {
/// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
pub is_polymorphic: bool,
+ /// The phase at which this MIR should be "injected" into the compilation process.
+ ///
+ /// Everything that comes before this `MirPhase` should be skipped.
+ ///
+ /// This is only `Some` if the function that this body comes from was annotated with `rustc_custom_mir`.
+ pub injection_phase: Option<MirPhase>,
+
pub tainted_by_errors: Option<ErrorGuaranteed>,
}
@@ -319,7 +335,7 @@ impl<'tcx> Body<'tcx> {
let mut body = Body {
phase: MirPhase::Built,
- pass_count: 1,
+ pass_count: 0,
source,
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes,
@@ -339,6 +355,7 @@ impl<'tcx> Body<'tcx> {
span,
required_consts: Vec::new(),
is_polymorphic: false,
+ injection_phase: None,
tainted_by_errors,
};
body.is_polymorphic = body.has_non_region_param();
@@ -353,7 +370,7 @@ impl<'tcx> Body<'tcx> {
pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
let mut body = Body {
phase: MirPhase::Built,
- pass_count: 1,
+ pass_count: 0,
source: MirSource::item(CRATE_DEF_ID.to_def_id()),
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes: IndexVec::new(),
@@ -366,6 +383,7 @@ impl<'tcx> Body<'tcx> {
required_consts: Vec::new(),
var_debug_info: Vec::new(),
is_polymorphic: false,
+ injection_phase: None,
tainted_by_errors: None,
};
body.is_polymorphic = body.has_non_region_param();
@@ -508,6 +526,14 @@ impl<'tcx> Body<'tcx> {
pub fn generator_kind(&self) -> Option<GeneratorKind> {
self.generator.as_ref().map(|generator| generator.generator_kind)
}
+
+ #[inline]
+ pub fn should_skip(&self) -> bool {
+ let Some(injection_phase) = self.injection_phase else {
+ return false;
+ };
+ injection_phase > self.phase
+ }
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
@@ -1012,6 +1038,18 @@ pub enum VarDebugInfoContents<'tcx> {
/// based on a `Local`, not a `Static`, and contains no indexing.
Place(Place<'tcx>),
Const(Constant<'tcx>),
+ /// The user variable's data is split across several fragments,
+ /// each described by a `VarDebugInfoFragment`.
+ /// See DWARF 5's "2.6.1.2 Composite Location Descriptions"
+ /// and LLVM's `DW_OP_LLVM_fragment` for more details on
+ /// the underlying debuginfo feature this relies on.
+ Composite {
+ /// Type of the original user variable.
+ ty: Ty<'tcx>,
+ /// All the parts of the original user variable, which ended
+ /// up in disjoint places, due to optimizations.
+ fragments: Vec<VarDebugInfoFragment<'tcx>>,
+ },
}
impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
@@ -1019,7 +1057,48 @@ impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
match self {
VarDebugInfoContents::Const(c) => write!(fmt, "{}", c),
VarDebugInfoContents::Place(p) => write!(fmt, "{:?}", p),
+ VarDebugInfoContents::Composite { ty, fragments } => {
+ write!(fmt, "{:?}{{ ", ty)?;
+ for f in fragments.iter() {
+ write!(fmt, "{:?}, ", f)?;
+ }
+ write!(fmt, "}}")
+ }
+ }
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct VarDebugInfoFragment<'tcx> {
+ /// Where in the composite user variable this fragment is,
+ /// represented as a "projection" into the composite variable.
+ /// At lower levels, this corresponds to a byte/bit range.
+ // NOTE(eddyb) there's an unenforced invariant that this contains
+ // only `Field`s, and not into `enum` variants or `union`s.
+ // FIXME(eddyb) support this for `enum`s by either using DWARF's
+ // more advanced control-flow features (unsupported by LLVM?)
+ // to match on the discriminant, or by using custom type debuginfo
+ // with non-overlapping variants for the composite variable.
+ pub projection: Vec<PlaceElem<'tcx>>,
+
+ /// Where the data for this fragment can be found.
+ // NOTE(eddyb) There's an unenforced invariant that this `Place` is
+ // contains no indexing (with a non-constant index).
+ pub contents: Place<'tcx>,
+}
+
+impl Debug for VarDebugInfoFragment<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ for elem in self.projection.iter() {
+ match elem {
+ ProjectionElem::Field(field, _) => {
+ write!(fmt, ".{:?}", field.index())?;
+ }
+ _ => bug!("unsupported fragment projection `{:?}`", elem),
+ }
}
+
+ write!(fmt, " => {:?}", self.contents)
}
}
@@ -1186,6 +1265,11 @@ impl<'tcx> BasicBlockData<'tcx> {
pub fn visitable(&self, index: usize) -> &dyn MirVisitable<'tcx> {
if index < self.statements.len() { &self.statements[index] } else { &self.terminator }
}
+
+ /// Does the block have no statements and an unreachable terminator?
+ pub fn is_empty_unreachable(&self) -> bool {
+ self.statements.is_empty() && matches!(self.terminator().kind, TerminatorKind::Unreachable)
+ }
}
impl<O> AssertKind<O> {
@@ -1477,7 +1561,7 @@ impl<'tcx> Place<'tcx> {
/// If MirPhase >= Derefered and if projection contains Deref,
/// It's guaranteed to be in the first place
pub fn has_deref(&self) -> bool {
- // To make sure this is not accidently used in wrong mir phase
+ // To make sure this is not accidentally used in wrong mir phase
debug_assert!(
self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
);
@@ -1767,7 +1851,7 @@ impl<'tcx> Operand<'tcx> {
substs: SubstsRef<'tcx>,
span: Span,
) -> Self {
- let ty = tcx.bound_type_of(def_id).subst(tcx, substs);
+ let ty = tcx.mk_fn_def(def_id, substs);
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
@@ -1893,6 +1977,7 @@ impl BorrowKind {
}
}
+ // FIXME: won't be used after diagnostic migration
pub fn describe_mutability(&self) -> &str {
match *self {
BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => "immutable",
@@ -1997,10 +2082,10 @@ impl<'tcx> Debug for Rvalue<'tcx> {
.print_def_path(variant_def.def_id, substs)?
.into_buffer();
- match variant_def.ctor_kind {
- CtorKind::Const => fmt.write_str(&name),
- CtorKind::Fn => fmt_tuple(fmt, &name),
- CtorKind::Fictive => {
+ match variant_def.ctor_kind() {
+ Some(CtorKind::Const) => fmt.write_str(&name),
+ Some(CtorKind::Fn) => fmt_tuple(fmt, &name),
+ None => {
let mut struct_fmt = fmt.debug_struct(&name);
for (field, place) in iter::zip(&variant_def.fields, places) {
struct_fmt.field(field.name.as_str(), place);
@@ -2186,8 +2271,10 @@ impl<'tcx> ConstantKind<'tcx> {
// FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
match tcx.const_eval_resolve(param_env, uneval, None) {
Ok(val) => Self::Val(val, ty),
- Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => self,
- Err(_) => Self::Ty(tcx.const_error(ty)),
+ Err(ErrorHandled::TooGeneric) => self,
+ Err(ErrorHandled::Reported(guar)) => {
+ Self::Ty(tcx.const_error_with_guaranteed(ty, guar))
+ }
}
}
}
@@ -2403,16 +2490,11 @@ impl<'tcx> ConstantKind<'tcx> {
ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
// Find the name and index of the const parameter by indexing the generics of
// the parent item and construct a `ParamConst`.
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- let item_id = tcx.hir().get_parent_node(hir_id);
- let item_def_id = tcx.hir().local_def_id(item_id);
- let generics = tcx.generics_of(item_def_id.to_def_id());
+ let item_def_id = tcx.parent(def_id);
+ let generics = tcx.generics_of(item_def_id);
let index = generics.param_def_id_to_index[&def_id];
- let name = tcx.hir().name(hir_id);
- let ty_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
- ty,
- });
+ let name = tcx.item_name(def_id);
+ let ty_const = tcx.mk_const(ty::ParamConst::new(index, name), ty);
debug!(?ty_const);
return Self::Ty(ty_const);
@@ -2839,14 +2921,14 @@ fn pretty_print_const_value<'tcx>(
let cx = cx.print_value_path(variant_def.def_id, substs)?;
fmt.write_str(&cx.into_buffer())?;
- match variant_def.ctor_kind {
- CtorKind::Const => {}
- CtorKind::Fn => {
+ match variant_def.ctor_kind() {
+ Some(CtorKind::Const) => {}
+ Some(CtorKind::Fn) => {
fmt.write_str("(")?;
comma_sep(fmt, fields)?;
fmt.write_str(")")?;
}
- CtorKind::Fictive => {
+ None => {
fmt.write_str(" {{ ")?;
let mut first = true;
for (field_def, field) in iter::zip(&variant_def.fields, fields)
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 05dcfba77..2a4ff4b88 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -12,11 +12,10 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_middle::mir::interpret::{
- read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc, Pointer,
- Provenance,
+ alloc_range, read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc,
+ Pointer, Provenance,
};
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::MirSource;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use rustc_target::abi::Size;
@@ -74,7 +73,7 @@ pub enum PassWhere {
#[inline]
pub fn dump_mir<'tcx, F>(
tcx: TyCtxt<'tcx>,
- pass_num: Option<&dyn Display>,
+ pass_num: bool,
pass_name: &str,
disambiguator: &dyn Display,
body: &Body<'tcx>,
@@ -111,7 +110,7 @@ pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) ->
fn dump_matched_mir_node<'tcx, F>(
tcx: TyCtxt<'tcx>,
- pass_num: Option<&dyn Display>,
+ pass_num: bool,
pass_name: &str,
disambiguator: &dyn Display,
body: &Body<'tcx>,
@@ -120,8 +119,7 @@ fn dump_matched_mir_node<'tcx, F>(
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
let _: io::Result<()> = try {
- let mut file =
- create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body.source)?;
+ let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body)?;
// see notes on #41697 above
let def_path =
ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id()));
@@ -143,16 +141,14 @@ fn dump_matched_mir_node<'tcx, F>(
if tcx.sess.opts.unstable_opts.dump_mir_graphviz {
let _: io::Result<()> = try {
- let mut file =
- create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, body.source)?;
+ let mut file = create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, body)?;
write_mir_fn_graphviz(tcx, body, false, &mut file)?;
};
}
if let Some(spanview) = tcx.sess.opts.unstable_opts.dump_mir_spanview {
let _: io::Result<()> = try {
- let file_basename =
- dump_file_basename(tcx, pass_num, pass_name, disambiguator, body.source);
+ let file_basename = dump_file_basename(tcx, pass_num, pass_name, disambiguator, body);
let mut file = create_dump_file_with_basename(tcx, &file_basename, "html")?;
if body.source.def_id().is_local() {
write_mir_fn_spanview(tcx, body, spanview, &file_basename, &mut file)?;
@@ -165,11 +161,12 @@ fn dump_matched_mir_node<'tcx, F>(
/// where we should dump a MIR representation output files.
fn dump_file_basename<'tcx>(
tcx: TyCtxt<'tcx>,
- pass_num: Option<&dyn Display>,
+ pass_num: bool,
pass_name: &str,
disambiguator: &dyn Display,
- source: MirSource<'tcx>,
+ body: &Body<'tcx>,
) -> String {
+ let source = body.source;
let promotion_id = match source.promoted {
Some(id) => format!("-{:?}", id),
None => String::new(),
@@ -178,9 +175,10 @@ fn dump_file_basename<'tcx>(
let pass_num = if tcx.sess.opts.unstable_opts.dump_mir_exclude_pass_number {
String::new()
} else {
- match pass_num {
- None => ".-------".to_string(),
- Some(pass_num) => format!(".{}", pass_num),
+ if pass_num {
+ format!(".{:03}-{:03}", body.phase.phase_index(), body.pass_count)
+ } else {
+ ".-------".to_string()
}
};
@@ -250,14 +248,14 @@ fn create_dump_file_with_basename(
pub fn create_dump_file<'tcx>(
tcx: TyCtxt<'tcx>,
extension: &str,
- pass_num: Option<&dyn Display>,
+ pass_num: bool,
pass_name: &str,
disambiguator: &dyn Display,
- source: MirSource<'tcx>,
+ body: &Body<'tcx>,
) -> io::Result<io::BufWriter<fs::File>> {
create_dump_file_with_basename(
tcx,
- &dump_file_basename(tcx, pass_num, pass_name, disambiguator, source),
+ &dump_file_basename(tcx, pass_num, pass_name, disambiguator, body),
extension,
)
}
@@ -476,6 +474,7 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
// These variants shouldn't exist in the MIR.
ty::ConstKind::Placeholder(_)
| ty::ConstKind::Infer(_)
+ | ty::ConstKind::Expr(_)
| ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
},
ConstantKind::Unevaluated(uv, _) => {
@@ -685,7 +684,7 @@ pub fn write_allocations<'tcx>(
fn alloc_ids_from_alloc(
alloc: ConstAllocation<'_>,
) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
- alloc.inner().provenance().values().map(|id| *id)
+ alloc.inner().provenance().ptrs().values().map(|id| *id)
}
fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
@@ -788,7 +787,7 @@ pub fn write_allocations<'tcx>(
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
/// This also prints provenance adequately.
-pub fn display_allocation<'a, 'tcx, Prov, Extra>(
+pub fn display_allocation<'a, 'tcx, Prov: Provenance, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Prov, Extra>,
) -> RenderAllocation<'a, 'tcx, Prov, Extra> {
@@ -796,7 +795,7 @@ pub fn display_allocation<'a, 'tcx, Prov, Extra>(
}
#[doc(hidden)]
-pub struct RenderAllocation<'a, 'tcx, Prov, Extra> {
+pub struct RenderAllocation<'a, 'tcx, Prov: Provenance, Extra> {
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Prov, Extra>,
}
@@ -882,9 +881,9 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
if i != line_start {
write!(w, " ")?;
}
- if let Some(&prov) = alloc.provenance().get(&i) {
+ if let Some(prov) = alloc.provenance().get_ptr(i) {
// Memory with provenance must be defined
- assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
+ assert!(alloc.init_mask().is_range_initialized(alloc_range(i, ptr_size)).is_ok());
let j = i.bytes_usize();
let offset = alloc
.inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
@@ -904,9 +903,9 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
let overflow = ptr_size - remainder;
let remainder_width = provenance_width(remainder.bytes_usize()) - 2;
let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1;
- ascii.push('╾');
- for _ in 0..remainder.bytes() - 1 {
- ascii.push('─');
+ ascii.push('╾'); // HEAVY LEFT AND LIGHT RIGHT
+ for _ in 1..remainder.bytes() {
+ ascii.push('─'); // LIGHT HORIZONTAL
}
if overflow_width > remainder_width && overflow_width >= target.len() {
// The case where the provenance fits into the part in the next line
@@ -926,7 +925,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
for _ in 0..overflow.bytes() - 1 {
ascii.push('─');
}
- ascii.push('╼');
+ ascii.push('╼'); // LIGHT LEFT AND HEAVY RIGHT
i += ptr_size;
continue;
} else {
@@ -941,7 +940,23 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
ascii.push('╼');
i += ptr_size;
}
- } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+ } else if let Some(prov) = alloc.provenance().get(i, &tcx) {
+ // Memory with provenance must be defined
+ assert!(
+ alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok()
+ );
+ ascii.push('━'); // HEAVY HORIZONTAL
+ // We have two characters to display this, which is obviously not enough.
+ // Format is similar to "oversized" above.
+ let j = i.bytes_usize();
+ let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+ write!(w, "╾{:02x}{:#?} (1 ptr byte)╼", c, prov)?;
+ i += Size::from_bytes(1);
+ } else if alloc
+ .init_mask()
+ .is_range_initialized(alloc_range(i, Size::from_bytes(1)))
+ .is_ok()
+ {
let j = i.bytes_usize();
// Checked definedness (and thus range) and provenance. This access also doesn't
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index 85ef51f12..5ba053820 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -6,6 +6,7 @@
use super::{BasicBlock, Constant, Field, Local, SwitchTargets, UserTypeProjection};
use crate::mir::coverage::{CodeRegion, CoverageKind};
+use crate::traits::Reveal;
use crate::ty::adjustment::PointerCast;
use crate::ty::subst::SubstsRef;
use crate::ty::{self, List, Ty};
@@ -85,10 +86,30 @@ pub enum MirPhase {
///
/// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part
/// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that
- /// transformations which may supress such errors should not run on analysis MIR.
+ /// transformations which may suppress such errors should not run on analysis MIR.
Runtime(RuntimePhase),
}
+impl MirPhase {
+ pub fn name(&self) -> &'static str {
+ match *self {
+ MirPhase::Built => "built",
+ MirPhase::Analysis(AnalysisPhase::Initial) => "analysis",
+ MirPhase::Analysis(AnalysisPhase::PostCleanup) => "analysis-post-cleanup",
+ MirPhase::Runtime(RuntimePhase::Initial) => "runtime",
+ MirPhase::Runtime(RuntimePhase::PostCleanup) => "runtime-post-cleanup",
+ MirPhase::Runtime(RuntimePhase::Optimized) => "runtime-optimized",
+ }
+ }
+
+ pub fn reveal(&self) -> Reveal {
+ match *self {
+ MirPhase::Built | MirPhase::Analysis(_) => Reveal::UserFacing,
+ MirPhase::Runtime(_) => Reveal::All,
+ }
+ }
+}
+
/// See [`MirPhase::Analysis`].
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)]
@@ -387,7 +408,7 @@ impl std::fmt::Display for NonDivergingIntrinsic<'_> {
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
#[rustc_pass_by_value]
pub enum RetagKind {
- /// The initial retag when entering a function.
+ /// The initial retag of arguments when entering a function.
FnEntry,
/// Retag preparing for a two-phase borrow.
TwoPhase,
@@ -1185,7 +1206,8 @@ pub enum NullOp {
AlignOf,
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
pub enum UnOp {
/// The `!` operator for logical inversion
Not,
@@ -1193,7 +1215,8 @@ pub enum UnOp {
Neg,
}
-#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum BinOp {
/// The `+` operator (addition)
Add,
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index 4c0974f86..0705b4cff 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -16,9 +16,7 @@ TrivialTypeTraversalAndLiftImpls! {
UserTypeAnnotationIndex,
BorrowKind,
CastKind,
- BinOp,
NullOp,
- UnOp,
hir::Movability,
BasicBlock,
SwitchTargets,
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index ddcf3711b..b21f50ae5 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -847,6 +847,17 @@ macro_rules! make_mir_visitor {
PlaceContext::NonUse(NonUseContext::VarDebugInfo),
location
),
+ VarDebugInfoContents::Composite { ty, fragments } => {
+ // FIXME(eddyb) use a better `TyContext` here.
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ for VarDebugInfoFragment { projection: _, contents } in fragments {
+ self.visit_place(
+ contents,
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo),
+ location,
+ );
+ }
+ }
}
}
@@ -1320,6 +1331,15 @@ impl PlaceContext {
)
}
+ /// Returns `true` if this place context represents an address-of.
+ pub fn is_address_of(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+ | PlaceContext::MutatingUse(MutatingUseContext::AddressOf)
+ )
+ }
+
/// Returns `true` if this place context represents a storage live or storage dead marker.
#[inline]
pub fn is_storage_marker(&self) -> bool {