summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle/src/mir
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_middle/src/mir')
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs2
-rw-r--r--compiler/rustc_middle/src/mir/generic_graph.rs4
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs331
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs13
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs6
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs15
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs4
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs163
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs194
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs36
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs60
-rw-r--r--compiler/rustc_middle/src/mir/query.rs26
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs4
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs207
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs4
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs4
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs207
-rw-r--r--compiler/rustc_middle/src/mir/type_visitable.rs170
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs186
19 files changed, 607 insertions, 1029 deletions
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
index 78080fcd5..752cbdeae 100644
--- a/compiler/rustc_middle/src/mir/basic_blocks.rs
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -86,7 +86,7 @@ impl<'tcx> BasicBlocks<'tcx> {
///
/// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`].
/// All other methods that allow you to mutate the basic blocks also call this method
- /// themselves, thereby avoiding any risk of accidentaly cache invalidation.
+ /// themselves, thereby avoiding any risk of accidentally cache invalidation.
pub fn invalidate_cfg_cache(&mut self) {
self.predecessor_cache.invalidate();
self.switch_source_cache.invalidate();
diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs
index f3621cd99..d1f3561c0 100644
--- a/compiler/rustc_middle/src/mir/generic_graph.rs
+++ b/compiler/rustc_middle/src/mir/generic_graph.rs
@@ -12,14 +12,14 @@ pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Grap
// Nodes
let nodes: Vec<Node> = body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.map(|(block, _)| bb_to_graph_node(block, body, dark_mode))
.collect();
// Edges
let mut edges = Vec::new();
- for (source, _) in body.basic_blocks().iter_enumerated() {
+ for (source, _) in body.basic_blocks.iter_enumerated() {
let def_id = body.source.def_id();
let terminator = body[source].terminator();
let labels = terminator.kind.fmt_successor_labels();
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index db7e0fb8a..37ec04b07 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -16,8 +16,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
- ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
- UninitBytesAccess, UnsupportedOpInfo,
+ ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
+ UnsupportedOpInfo,
};
use crate::ty;
@@ -34,11 +34,11 @@ pub struct Allocation<Prov = AllocId, Extra = ()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Box<[u8]>,
- /// Maps from byte addresses to extra data for each pointer.
+ /// Maps from byte addresses to extra provenance data for each pointer.
/// Only the first byte of a pointer is inserted into the map; i.e.,
/// every entry in this map applies to `pointer_size` consecutive bytes starting
/// at the given offset.
- relocations: Relocations<Prov>,
+ provenance: ProvenanceMap<Prov>,
/// Denotes which part of this allocation is initialized.
init_mask: InitMask,
/// The alignment of the allocation to detect unaligned reads.
@@ -84,7 +84,7 @@ impl hash::Hash for Allocation {
}
// Hash the other fields as usual.
- self.relocations.hash(state);
+ self.provenance.hash(state);
self.init_mask.hash(state);
self.align.hash(state);
self.mutability.hash(state);
@@ -130,6 +130,8 @@ pub enum AllocError {
ReadPointerAsBytes,
/// Partially overwriting a pointer.
PartialPointerOverwrite(Size),
+ /// Partially copying a pointer.
+ PartialPointerCopy(Size),
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<UninitBytesAccess>),
}
@@ -152,6 +154,9 @@ impl AllocError {
PartialPointerOverwrite(offset) => InterpError::Unsupported(
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
),
+ PartialPointerCopy(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
+ ),
InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
),
@@ -211,7 +216,7 @@ impl<Prov> Allocation<Prov> {
let size = Size::from_bytes(bytes.len());
Self {
bytes,
- relocations: Relocations::new(),
+ provenance: ProvenanceMap::new(),
init_mask: InitMask::new(size, true),
align,
mutability,
@@ -246,7 +251,7 @@ impl<Prov> Allocation<Prov> {
let bytes = unsafe { bytes.assume_init() };
Ok(Allocation {
bytes,
- relocations: Relocations::new(),
+ provenance: ProvenanceMap::new(),
init_mask: InitMask::new(size, false),
align,
mutability: Mutability::Mut,
@@ -266,22 +271,22 @@ impl Allocation {
) -> Result<Allocation<Prov, Extra>, Err> {
// Compute new pointer provenance, which also adjusts the bytes.
let mut bytes = self.bytes;
- let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
+ let mut new_provenance = Vec::with_capacity(self.provenance.0.len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
let endian = cx.data_layout().endian;
- for &(offset, alloc_id) in self.relocations.iter() {
+ for &(offset, alloc_id) in self.provenance.iter() {
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
let (ptr_prov, ptr_offset) =
adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
- new_relocations.push((offset, ptr_prov));
+ new_provenance.push((offset, ptr_prov));
}
// Create allocation.
Ok(Allocation {
bytes,
- relocations: Relocations::from_presorted(new_relocations),
+ provenance: ProvenanceMap::from_presorted(new_provenance),
init_mask: self.init_mask,
align: self.align,
mutability: self.mutability,
@@ -300,8 +305,8 @@ impl<Prov, Extra> Allocation<Prov, Extra> {
Size::from_bytes(self.len())
}
- /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
- /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
+ /// Looks at a slice which may contain uninitialized bytes or provenance. This differs
+ /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
/// edges) at all.
/// This must not be used for reads affecting the interpreter execution.
pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
@@ -313,74 +318,47 @@ impl<Prov, Extra> Allocation<Prov, Extra> {
&self.init_mask
}
- /// Returns the relocation list.
- pub fn relocations(&self) -> &Relocations<Prov> {
- &self.relocations
+ /// Returns the provenance map.
+ pub fn provenance(&self) -> &ProvenanceMap<Prov> {
+ &self.provenance
}
}
/// Byte accessors.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// This is the entirely abstraction-violating way to just grab the raw bytes without
- /// caring about relocations. It just deduplicates some code between `read_scalar`
- /// and `get_bytes_internal`.
- fn get_bytes_even_more_internal(&self, range: AllocRange) -> &[u8] {
- &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
- }
-
- /// The last argument controls whether we error out when there are uninitialized or pointer
- /// bytes. However, we *always* error when there are relocations overlapping the edges of the
- /// range.
- ///
- /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead,
+ /// caring about provenance or initialization.
///
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
/// on that.
- ///
- /// It is the caller's responsibility to check bounds and alignment beforehand.
- fn get_bytes_internal(
- &self,
- cx: &impl HasDataLayout,
- range: AllocRange,
- check_init_and_ptr: bool,
- ) -> AllocResult<&[u8]> {
- if check_init_and_ptr {
- self.check_init(range)?;
- self.check_relocations(cx, range)?;
- } else {
- // We still don't want relocations on the *edges*.
- self.check_relocation_edges(cx, range)?;
- }
-
- Ok(self.get_bytes_even_more_internal(range))
+ #[inline]
+ pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
+ &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
}
- /// Checks that these bytes are initialized and not pointer bytes, and then return them
- /// as a slice.
+ /// Checks that these bytes are initialized, and then strip provenance (if possible) and return
+ /// them.
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
/// on `InterpCx` instead.
#[inline]
- pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
- self.get_bytes_internal(cx, range, true)
- }
-
- /// It is the caller's responsibility to handle uninitialized and pointer bytes.
- /// However, this still checks that there are no relocations on the *edges*.
- ///
- /// It is the caller's responsibility to check bounds and alignment beforehand.
- #[inline]
- pub fn get_bytes_with_uninit_and_ptr(
+ pub fn get_bytes_strip_provenance(
&self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> AllocResult<&[u8]> {
- self.get_bytes_internal(cx, range, false)
+ self.check_init(range)?;
+ if !Prov::OFFSET_IS_ADDR {
+ if self.range_has_provenance(cx, range) {
+ return Err(AllocError::ReadPointerAsBytes);
+ }
+ }
+ Ok(self.get_bytes_unchecked(range))
}
- /// Just calling this already marks everything as defined and removes relocations,
+ /// Just calling this already marks everything as defined and removes provenance,
/// so be sure to actually put data there!
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
@@ -392,7 +370,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<&mut [u8]> {
self.mark_init(range, true);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
@@ -404,7 +382,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<*mut [u8]> {
self.mark_init(range, true);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
@@ -415,28 +393,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Reading and writing.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
- /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
- /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
- /// given range contains no uninitialized bytes/relocations.
- pub fn check_bytes(
- &self,
- cx: &impl HasDataLayout,
- range: AllocRange,
- allow_uninit: bool,
- allow_ptr: bool,
- ) -> AllocResult {
- // Check bounds and relocations on the edges.
- self.get_bytes_with_uninit_and_ptr(cx, range)?;
- // Check uninit and ptr.
- if !allow_uninit {
- self.check_init(range)?;
- }
- if !allow_ptr {
- self.check_relocations(cx, range)?;
- }
- Ok(())
- }
-
/// Reads a *non-ZST* scalar.
///
/// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
@@ -452,47 +408,55 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
cx: &impl HasDataLayout,
range: AllocRange,
read_provenance: bool,
- ) -> AllocResult<ScalarMaybeUninit<Prov>> {
- if read_provenance {
- assert_eq!(range.size, cx.data_layout().pointer_size);
- }
-
+ ) -> AllocResult<Scalar<Prov>> {
// First and foremost, if anything is uninit, bail.
if self.is_init(range).is_err() {
- // This inflates uninitialized bytes to the entire scalar, even if only a few
- // bytes are uninitialized.
- return Ok(ScalarMaybeUninit::Uninit);
+ return Err(AllocError::InvalidUninitBytes(None));
}
- // If we are doing a pointer read, and there is a relocation exactly where we
- // are reading, then we can put data and relocation back together and return that.
- if read_provenance && let Some(&prov) = self.relocations.get(&range.start) {
- // We already checked init and relocations, so we can use this function.
- let bytes = self.get_bytes_even_more_internal(range);
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- let ptr = Pointer::new(prov, Size::from_bytes(bits));
- return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
- }
+ // Get the integer part of the result. We HAVE TO check provenance before returning this!
+ let bytes = self.get_bytes_unchecked(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- // If we are *not* reading a pointer, and we can just ignore relocations,
- // then do exactly that.
- if !read_provenance && Prov::OFFSET_IS_ADDR {
- // We just strip provenance.
- let bytes = self.get_bytes_even_more_internal(range);
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)));
+ if read_provenance {
+ assert_eq!(range.size, cx.data_layout().pointer_size);
+
+ // When reading data with provenance, the easy case is finding provenance exactly where we
+ // are reading, then we can put data and provenance back together and return that.
+ if let Some(&prov) = self.provenance.get(&range.start) {
+ // Now we can return the bits, with their appropriate provenance.
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(Scalar::from_pointer(ptr, cx));
+ }
+
+ // If we can work on pointers byte-wise, join the byte-wise provenances.
+ if Prov::OFFSET_IS_ADDR {
+ let mut prov = self.offset_get_provenance(cx, range.start);
+ for offset in 1..range.size.bytes() {
+ let this_prov =
+ self.offset_get_provenance(cx, range.start + Size::from_bytes(offset));
+ prov = Prov::join(prov, this_prov);
+ }
+ // Now use this provenance.
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(Scalar::from_maybe_pointer(ptr, cx));
+ }
+ } else {
+ // We are *not* reading a pointer.
+ // If we can just ignore provenance, do exactly that.
+ if Prov::OFFSET_IS_ADDR {
+ // We just strip provenance.
+ return Ok(Scalar::from_uint(bits, range.size));
+ }
}
- // It's complicated. Better make sure there is no provenance anywhere.
- // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then
- // `read_pointer` is true and we ideally would distinguish the following two cases:
- // - The entire `range` is covered by 2 relocations for the same provenance.
- // Then we should return a pointer with that provenance.
- // - The range has inhomogeneous provenance. Then we should return just the
- // underlying bits.
- let bytes = self.get_bytes(cx, range)?;
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
+ // Fallback path for when we cannot treat provenance bytewise or ignore it.
+ assert!(!Prov::OFFSET_IS_ADDR);
+ if self.range_has_provenance(cx, range) {
+ return Err(AllocError::ReadPointerAsBytes);
+ }
+ // There is no provenance, we can just return the bits.
+ Ok(Scalar::from_uint(bits, range.size))
}
/// Writes a *non-ZST* scalar.
@@ -507,17 +471,10 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
- val: ScalarMaybeUninit<Prov>,
+ val: Scalar<Prov>,
) -> AllocResult {
assert!(self.mutability == Mutability::Mut);
- let val = match val {
- ScalarMaybeUninit::Scalar(scalar) => scalar,
- ScalarMaybeUninit::Uninit => {
- return self.write_uninit(cx, range);
- }
- };
-
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
@@ -532,9 +489,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
let dst = self.get_bytes_mut(cx, range)?;
write_target_uint(endian, dst, bytes).unwrap();
- // See if we have to also write a relocation.
+ // See if we have to also store some provenance.
if let Some(provenance) = provenance {
- self.relocations.0.insert(range.start, provenance);
+ self.provenance.0.insert(range.start, provenance);
}
Ok(())
@@ -543,64 +500,65 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Write "uninit" to the given memory range.
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
self.mark_init(range, false);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
return Ok(());
}
}
-/// Relocations.
+/// Provenance.
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- /// Returns all relocations overlapping with the given pointer-offset pair.
- fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
+ /// Returns all provenance overlapping with the given pointer-offset pair.
+ fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
- self.relocations.range(Size::from_bytes(start)..range.end())
+ self.provenance.range(Size::from_bytes(start)..range.end())
}
- /// Returns whether this allocation has relocations overlapping with the given range.
- ///
- /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat
- /// limit access to relocations outside of the `Allocation` abstraction.
- ///
- pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
- !self.get_relocations(cx, range).is_empty()
+ /// Get the provenance of a single byte.
+ fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> {
+ let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1)));
+ assert!(prov.len() <= 1);
+ prov.first().map(|(_offset, prov)| *prov)
}
- /// Checks that there are no relocations overlapping with the given range.
- #[inline(always)]
- fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
- if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) }
+ /// Returns whether this allocation has progrnance overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
+ /// limit access to provenance outside of the `Allocation` abstraction.
+ ///
+ pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
+ !self.range_get_provenance(cx, range).is_empty()
}
- /// Removes all relocations inside the given range.
- /// If there are relocations overlapping with the edges, they
+ /// Removes all provenance inside the given range.
+ /// If there is provenance overlapping with the edges, it
/// are removed as well *and* the bytes they cover are marked as
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
- fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
+ fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
where
Prov: Provenance,
{
- // Find the start and end of the given range and its outermost relocations.
+ // Find the start and end of the given range and its outermost provenance.
let (first, last) = {
- // Find all relocations overlapping the given range.
- let relocations = self.get_relocations(cx, range);
- if relocations.is_empty() {
+ // Find all provenance overlapping the given range.
+ let provenance = self.range_get_provenance(cx, range);
+ if provenance.is_empty() {
return Ok(());
}
(
- relocations.first().unwrap().0,
- relocations.last().unwrap().0 + cx.data_layout().pointer_size,
+ provenance.first().unwrap().0,
+ provenance.last().unwrap().0 + cx.data_layout().pointer_size,
)
};
let start = range.start;
let end = range.end();
- // We need to handle clearing the relocations from parts of a pointer.
- // FIXME: Miri should preserve partial relocations; see
+ // We need to handle clearing the provenance from parts of a pointer.
+ // FIXME: Miri should preserve partial provenance; see
// https://github.com/rust-lang/miri/issues/2181.
if first < start {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
@@ -623,41 +581,32 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
self.init_mask.set_range(end, last, false);
}
- // Forget all the relocations.
- // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine,
- // i.e., this will not remove any other relocations just after the ones we care about.
- self.relocations.0.remove_range(first..last);
+ // Forget all the provenance.
+ // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
+ // i.e., this will not remove any other provenance just after the ones we care about.
+ self.provenance.0.remove_range(first..last);
Ok(())
}
-
- /// Errors if there are relocations overlapping with the edges of the
- /// given memory range.
- #[inline]
- fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
- self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
- self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
- Ok(())
- }
}
-/// "Relocations" stores the provenance information of pointers stored in memory.
+/// Stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>);
+pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>);
-impl<Prov> Relocations<Prov> {
+impl<Prov> ProvenanceMap<Prov> {
pub fn new() -> Self {
- Relocations(SortedMap::new())
+ ProvenanceMap(SortedMap::new())
}
- // The caller must guarantee that the given relocations are already sorted
+ // The caller must guarantee that the given provenance list is already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
- Relocations(SortedMap::from_presorted_elements(r))
+ ProvenanceMap(SortedMap::from_presorted_elements(r))
}
}
-impl<Prov> Deref for Relocations<Prov> {
+impl<Prov> Deref for ProvenanceMap<Prov> {
type Target = SortedMap<Size, Prov>;
fn deref(&self) -> &Self::Target {
@@ -665,36 +614,36 @@ impl<Prov> Deref for Relocations<Prov> {
}
}
-/// A partial, owned list of relocations to transfer into another allocation.
+/// A partial, owned list of provenance to transfer into another allocation.
///
/// Offsets are already adjusted to the destination allocation.
-pub struct AllocationRelocations<Prov> {
- dest_relocations: Vec<(Size, Prov)>,
+pub struct AllocationProvenance<Prov> {
+ dest_provenance: Vec<(Size, Prov)>,
}
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- pub fn prepare_relocation_copy(
+ pub fn prepare_provenance_copy(
&self,
cx: &impl HasDataLayout,
src: AllocRange,
dest: Size,
count: u64,
- ) -> AllocationRelocations<Prov> {
- let relocations = self.get_relocations(cx, src);
- if relocations.is_empty() {
- return AllocationRelocations { dest_relocations: Vec::new() };
+ ) -> AllocationProvenance<Prov> {
+ let provenance = self.range_get_provenance(cx, src);
+ if provenance.is_empty() {
+ return AllocationProvenance { dest_provenance: Vec::new() };
}
let size = src.size;
- let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
+ let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize));
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
- // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
+ // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
- // the right sequence of relocations for all N copies.
+ // the right sequence of provenance for all N copies.
for i in 0..count {
- new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
+ new_provenance.extend(provenance.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = dest + size * i; // `Size` operations
(
@@ -705,17 +654,17 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
}));
}
- AllocationRelocations { dest_relocations: new_relocations }
+ AllocationProvenance { dest_provenance: new_provenance }
}
- /// Applies a relocation copy.
- /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
- /// to be clear of relocations.
+ /// Applies a provenance copy.
+ /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected
+ /// to be clear of provenance.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
- pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) {
- self.relocations.0.insert_presorted(relocations.dest_relocations);
+ pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) {
+ self.provenance.0.insert_presorted(provenance.dest_provenance);
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index cecb55578..e4039cc7c 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -401,14 +401,18 @@ impl fmt::Display for UndefinedBehaviorInfo {
pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught!
Unsupported(String),
- /// Encountered a pointer where we needed raw bytes.
- ReadPointerAsBytes,
/// Overwriting parts of a pointer; the resulting state cannot be represented in our
/// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
PartialPointerOverwrite(Pointer<AllocId>),
+ /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be
+ /// represented in our `Allocation` data structure. See
+ /// <https://github.com/rust-lang/miri/issues/2181>.
+ PartialPointerCopy(Pointer<AllocId>),
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
/// Accessing thread local statics
ThreadLocalStatic(DefId),
/// Accessing an unsupported extern static.
@@ -420,10 +424,13 @@ impl fmt::Display for UnsupportedOpInfo {
use UnsupportedOpInfo::*;
match self {
Unsupported(ref msg) => write!(f, "{msg}"),
- ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
PartialPointerOverwrite(ptr) => {
write!(f, "unable to overwrite parts of a pointer in memory at {ptr:?}")
}
+ PartialPointerCopy(ptr) => {
+ write!(f, "unable to copy parts of a pointer from memory at {ptr:?}")
+ }
+ ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({did:?})"),
ReadExternStatic(did) => write!(f, "cannot read from extern static ({did:?})"),
}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 967f8ece1..5e3dfcbcc 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -124,11 +124,11 @@ pub use self::error::{
UninitBytesAccess, UnsupportedOpInfo,
};
-pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
+pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
pub use self::allocation::{
alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
- Relocations,
+ ProvenanceMap,
};
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
@@ -137,7 +137,7 @@ pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
/// - A constant
/// - A static
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, Lift)]
+#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
pub struct GlobalId<'tcx> {
/// For a constant or static, the `Instance` of the item itself.
/// For a promoted global, the `Instance` of the function they belong to.
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 384954cbb..95e52e391 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -107,8 +107,12 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// pointer), but `derive` adds some unnecessary bounds.
pub trait Provenance: Copy + fmt::Debug {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
- /// If `true, ptr-to-int casts work by simply discarding the provenance.
- /// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case.
+ /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
+ /// different from what the Abstract Machine prescribes, so the interpreter must prevent any
+ /// operation that would inspect the underlying bytes of a pointer, such as ptr-to-int
+ /// transmutation. A `ReadPointerAsBytes` error will be raised in such situations.
+ /// - If `true`, the interpreter will permit operations to inspect the underlying bytes of a
+ /// pointer, and implement ptr-to-int transmutation by stripping provenance.
const OFFSET_IS_ADDR: bool;
/// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
@@ -125,6 +129,9 @@ pub trait Provenance: Copy + fmt::Debug {
/// Otherwise this function is best-effort (but must agree with `Machine::ptr_get_alloc`).
/// (Identifying the offset in that allocation, however, is harder -- use `Memory::ptr_get_alloc` for that.)
fn get_alloc_id(self) -> Option<AllocId>;
+
+ /// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance.
+ fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>;
}
impl Provenance for AllocId {
@@ -152,6 +159,10 @@ impl Provenance for AllocId {
fn get_alloc_id(self) -> Option<AllocId> {
Some(self)
}
+
+ fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> {
+ panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false")
+ }
}
/// Represents a pointer in the Miri engine.
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 786927e2d..4207988d7 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -63,7 +63,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn const_eval_resolve_for_typeck(
self,
param_env: ty::ParamEnv<'tcx>,
- ct: ty::Unevaluated<'tcx>,
+ ct: ty::Unevaluated<'tcx, ()>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
@@ -78,7 +78,7 @@ impl<'tcx> TyCtxt<'tcx> {
match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
Ok(Some(instance)) => {
- let cid = GlobalId { instance, promoted: ct.promoted };
+ let cid = GlobalId { instance, promoted: None };
self.const_eval_global_id_for_typeck(param_env, cid, span)
}
Ok(None) => Err(ErrorHandled::TooGeneric),
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 834c114ee..ac5fddb7a 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -8,7 +8,7 @@ use rustc_apfloat::{
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size};
-use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
+use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{
AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
@@ -27,7 +27,7 @@ pub struct ConstAlloc<'tcx> {
/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
/// array length computations, enum discriminants and the pattern matching logic.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
-#[derive(HashStable)]
+#[derive(HashStable, Lift)]
pub enum ConstValue<'tcx> {
/// Used only for types with `layout::abi::Scalar` ABI.
///
@@ -53,22 +53,6 @@ pub enum ConstValue<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32);
-impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
- type Lifted = ConstValue<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
- Some(match self {
- ConstValue::Scalar(s) => ConstValue::Scalar(s),
- ConstValue::ZeroSized => ConstValue::ZeroSized,
- ConstValue::Slice { data, start, end } => {
- ConstValue::Slice { data: tcx.lift(data)?, start, end }
- }
- ConstValue::ByRef { alloc, offset } => {
- ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset }
- }
- })
- }
-}
-
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
@@ -79,7 +63,7 @@ impl<'tcx> ConstValue<'tcx> {
}
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
- Some(self.try_to_scalar()?.assert_int())
+ self.try_to_scalar()?.try_to_int().ok()
}
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
@@ -130,9 +114,7 @@ pub enum Scalar<Prov = AllocId> {
/// The raw bytes of a simple value.
Int(ScalarInt),
- /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
- /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
- /// relocation and its associated offset together as a `Pointer` here.
+ /// A pointer.
///
/// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
/// The size is always the pointer size of the current target, but this is not information
@@ -368,6 +350,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
#[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_int(self) -> ScalarInt {
self.try_to_int().unwrap()
}
@@ -389,6 +372,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
#[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).unwrap()
}
@@ -502,145 +486,12 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
}
-#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
-pub enum ScalarMaybeUninit<Prov = AllocId> {
- Scalar(Scalar<Prov>),
- Uninit,
-}
-
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(ScalarMaybeUninit, 24);
-
-impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
- #[inline(always)]
- fn from(s: Scalar<Prov>) -> Self {
- ScalarMaybeUninit::Scalar(s)
- }
-}
-
-// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
-// all the Miri types.
-impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
- ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
- }
- }
-}
-
-impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
- ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s),
- }
- }
-}
-
-impl<Prov> ScalarMaybeUninit<Prov> {
- #[inline]
- pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
- }
-
- #[inline]
- pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
- }
-
- #[inline]
- pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
- match self {
- ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
- ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
- }
- }
-}
-
-impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
- #[inline(always)]
- pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
- self.check_init()?.to_pointer(cx)
- }
-
- #[inline(always)]
- pub fn to_bool(self) -> InterpResult<'tcx, bool> {
- self.check_init()?.to_bool()
- }
-
- #[inline(always)]
- pub fn to_char(self) -> InterpResult<'tcx, char> {
- self.check_init()?.to_char()
- }
-
- #[inline(always)]
- pub fn to_f32(self) -> InterpResult<'tcx, Single> {
- self.check_init()?.to_f32()
- }
-
- #[inline(always)]
- pub fn to_f64(self) -> InterpResult<'tcx, Double> {
- self.check_init()?.to_f64()
- }
-
- #[inline(always)]
- pub fn to_u8(self) -> InterpResult<'tcx, u8> {
- self.check_init()?.to_u8()
- }
-
- #[inline(always)]
- pub fn to_u16(self) -> InterpResult<'tcx, u16> {
- self.check_init()?.to_u16()
- }
-
- #[inline(always)]
- pub fn to_u32(self) -> InterpResult<'tcx, u32> {
- self.check_init()?.to_u32()
- }
-
- #[inline(always)]
- pub fn to_u64(self) -> InterpResult<'tcx, u64> {
- self.check_init()?.to_u64()
- }
-
- #[inline(always)]
- pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- self.check_init()?.to_machine_usize(cx)
- }
-
- #[inline(always)]
- pub fn to_i8(self) -> InterpResult<'tcx, i8> {
- self.check_init()?.to_i8()
- }
-
- #[inline(always)]
- pub fn to_i16(self) -> InterpResult<'tcx, i16> {
- self.check_init()?.to_i16()
- }
-
- #[inline(always)]
- pub fn to_i32(self) -> InterpResult<'tcx, i32> {
- self.check_init()?.to_i32()
- }
-
- #[inline(always)]
- pub fn to_i64(self) -> InterpResult<'tcx, i64> {
- self.check_init()?.to_i64()
- }
-
- #[inline(always)]
- pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
- self.check_init()?.to_machine_isize(cx)
- }
-}
-
/// Gets the bytes of a constant slice value.
pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
if let ConstValue::Slice { data, start, end } = val {
let len = end - start;
data.inner()
- .get_bytes(
+ .get_bytes_strip_provenance(
cx,
AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
)
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 7ab71f900..3d7a6230e 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -3,7 +3,7 @@
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
use crate::mir::interpret::{
- AllocRange, ConstAllocation, ConstValue, GlobalAlloc, LitToConstInput, Scalar,
+ AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, LitToConstInput, Scalar,
};
use crate::mir::visit::MirVisitable;
use crate::ty::codec::{TyDecoder, TyEncoder};
@@ -18,7 +18,7 @@ use rustc_data_structures::captures::Captures;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::{CtorKind, Namespace};
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
-use rustc_hir::{self, GeneratorKind};
+use rustc_hir::{self, GeneratorKind, ImplicitSelfKind};
use rustc_hir::{self as hir, HirId};
use rustc_session::Session;
use rustc_target::abi::{Size, VariantIdx};
@@ -128,8 +128,20 @@ pub trait MirPass<'tcx> {
impl MirPhase {
/// Gets the index of the current MirPhase within the set of all `MirPhase`s.
+ ///
+ /// FIXME(JakobDegen): Return a `(usize, usize)` instead.
pub fn phase_index(&self) -> usize {
- *self as usize
+ const BUILT_PHASE_COUNT: usize = 1;
+ const ANALYSIS_PHASE_COUNT: usize = 2;
+ match self {
+ MirPhase::Built => 1,
+ MirPhase::Analysis(analysis_phase) => {
+ 1 + BUILT_PHASE_COUNT + (*analysis_phase as usize)
+ }
+ MirPhase::Runtime(runtime_phase) => {
+ 1 + BUILT_PHASE_COUNT + ANALYSIS_PHASE_COUNT + (*runtime_phase as usize)
+ }
+ }
}
}
@@ -332,11 +344,6 @@ impl<'tcx> Body<'tcx> {
}
#[inline]
- pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
- &self.basic_blocks
- }
-
- #[inline]
pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
self.basic_blocks.as_mut()
}
@@ -490,7 +497,7 @@ impl<'tcx> Index<BasicBlock> for Body<'tcx> {
#[inline]
fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
- &self.basic_blocks()[index]
+ &self.basic_blocks[index]
}
}
@@ -646,22 +653,6 @@ pub enum BindingForm<'tcx> {
RefForGuard,
}
-/// Represents what type of implicit self a function has, if any.
-#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-pub enum ImplicitSelfKind {
- /// Represents a `fn x(self);`.
- Imm,
- /// Represents a `fn x(mut self);`.
- Mut,
- /// Represents a `fn x(&self);`.
- ImmRef,
- /// Represents a `fn x(&mut self);`.
- MutRef,
- /// Represents when a function does not have a self argument or
- /// when a function has a `self: X` argument.
- None,
-}
-
TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx>, }
mod binding_form_impl {
@@ -832,10 +823,6 @@ pub struct LocalDecl<'tcx> {
pub source_info: SourceInfo,
}
-// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(LocalDecl<'_>, 56);
-
/// Extra information about a some locals that's used for diagnostics and for
/// classifying variables into local variables, statics, etc, which is needed e.g.
/// for unsafety checking.
@@ -1310,10 +1297,6 @@ pub struct Statement<'tcx> {
pub kind: StatementKind<'tcx>,
}
-// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(Statement<'_>, 32);
-
impl Statement<'_> {
/// Changes a statement to a nop. This is both faster than deleting instructions and avoids
/// invalidating statement indices in `Location`s.
@@ -1363,13 +1346,7 @@ impl Debug for Statement<'_> {
write!(fmt, "Coverage::{:?} for {:?}", kind, rgn)
}
Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
- CopyNonOverlapping(box crate::mir::CopyNonOverlapping {
- ref src,
- ref dst,
- ref count,
- }) => {
- write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count)
- }
+ Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"),
Nop => write!(fmt, "nop"),
}
}
@@ -1450,7 +1427,7 @@ pub struct PlaceRef<'tcx> {
// Once we stop implementing `Ord` for `DefId`,
// this impl will be unnecessary. Until then, we'll
// leave this impl in place to prevent re-adding a
-// dependnecy on the `Ord` impl for `DefId`
+// dependency on the `Ord` impl for `DefId`
impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
impl<'tcx> Place<'tcx> {
@@ -1471,7 +1448,9 @@ impl<'tcx> Place<'tcx> {
/// It's guaranteed to be in the first place
pub fn has_deref(&self) -> bool {
// To make sure this is not accidently used in wrong mir phase
- debug_assert!(!self.projection[1..].contains(&PlaceElem::Deref));
+ debug_assert!(
+ self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
+ );
self.projection.first() == Some(&PlaceElem::Deref)
}
@@ -1531,6 +1510,7 @@ impl<'tcx> Place<'tcx> {
}
impl From<Local> for Place<'_> {
+ #[inline]
fn from(local: Local) -> Self {
Place { local, projection: List::empty() }
}
@@ -1838,6 +1818,7 @@ impl<'tcx> Rvalue<'tcx> {
// While the model is undecided, we should be conservative. See
// <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
+ Rvalue::Cast(CastKind::DynStar, _, _) => false,
Rvalue::Use(_)
| Rvalue::CopyForDeref(_)
@@ -2047,6 +2028,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
/// particular, one must be wary of `NaN`!
#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub struct Constant<'tcx> {
pub span: Span,
@@ -2065,6 +2047,10 @@ pub struct Constant<'tcx> {
pub enum ConstantKind<'tcx> {
/// This constant came from the type system
Ty(ty::Const<'tcx>),
+
+ /// An unevaluated mir constant which is not part of the type system.
+ Unevaluated(ty::Unevaluated<'tcx, Option<Promoted>>, Ty<'tcx>),
+
/// This constant cannot go back into the type system, as it represents
/// something the type system cannot handle (e.g. pointers).
Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
@@ -2090,20 +2076,11 @@ impl<'tcx> Constant<'tcx> {
}
impl<'tcx> ConstantKind<'tcx> {
- /// Returns `None` if the constant is not trivially safe for use in the type system.
- #[inline]
- pub fn const_for_ty(&self) -> Option<ty::Const<'tcx>> {
- match self {
- ConstantKind::Ty(c) => Some(*c),
- ConstantKind::Val(..) => None,
- }
- }
-
#[inline(always)]
pub fn ty(&self) -> Ty<'tcx> {
match self {
ConstantKind::Ty(c) => c.ty(),
- ConstantKind::Val(_, ty) => *ty,
+ ConstantKind::Val(_, ty) | ConstantKind::Unevaluated(_, ty) => *ty,
}
}
@@ -2115,6 +2092,7 @@ impl<'tcx> ConstantKind<'tcx> {
_ => None,
},
ConstantKind::Val(val, _) => Some(val),
+ ConstantKind::Unevaluated(..) => None,
}
}
@@ -2129,6 +2107,7 @@ impl<'tcx> ConstantKind<'tcx> {
_ => None,
},
ConstantKind::Val(val, _) => val.try_to_scalar(),
+ ConstantKind::Unevaluated(..) => None,
}
}
@@ -2161,6 +2140,14 @@ impl<'tcx> ConstantKind<'tcx> {
}
}
Self::Val(_, _) => self,
+ Self::Unevaluated(uneval, ty) => {
+ // FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
+ match tcx.const_eval_resolve(param_env, uneval, None) {
+ Ok(val) => Self::Val(val, ty),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => self,
+ Err(_) => Self::Ty(tcx.const_error(ty)),
+ }
+ }
}
}
@@ -2186,6 +2173,18 @@ impl<'tcx> ConstantKind<'tcx> {
tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
val.try_to_bits(size)
}
+ Self::Unevaluated(uneval, ty) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => {
+ let size = tcx
+ .layout_of(param_env.with_reveal_all_normalized(tcx).and(*ty))
+ .ok()?
+ .size;
+ val.try_to_bits(size)
+ }
+ Err(_) => None,
+ }
+ }
}
}
@@ -2194,6 +2193,12 @@ impl<'tcx> ConstantKind<'tcx> {
match self {
Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
Self::Val(val, _) => val.try_to_bool(),
+ Self::Unevaluated(uneval, _) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => val.try_to_bool(),
+ Err(_) => None,
+ }
+ }
}
}
@@ -2202,6 +2207,12 @@ impl<'tcx> ConstantKind<'tcx> {
match self {
Self::Ty(ct) => ct.try_eval_usize(tcx, param_env),
Self::Val(val, _) => val.try_to_machine_usize(tcx),
+ Self::Unevaluated(uneval, _) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => val.try_to_machine_usize(tcx),
+ Err(_) => None,
+ }
+ }
}
}
@@ -2259,7 +2270,7 @@ impl<'tcx> ConstantKind<'tcx> {
Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id), param_env)
}
- #[instrument(skip(tcx), level = "debug")]
+ #[instrument(skip(tcx), level = "debug", ret)]
pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
let body_id = match tcx.hir().get(hir_id) {
@@ -2297,21 +2308,19 @@ impl<'tcx> ConstantKind<'tcx> {
let substs =
ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
.substs;
- let uneval_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: ty::WithOptConstParam::unknown(def_id).to_global(),
- substs,
- promoted: None,
- }),
- ty,
- });
- debug!(?uneval_const);
- debug_assert!(!uneval_const.has_free_regions());
- Self::Ty(uneval_const)
+ let uneval = ty::Unevaluated {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ };
+
+ debug_assert!(!uneval.has_free_regions());
+
+ Self::Unevaluated(uneval, ty)
}
- #[instrument(skip(tcx), level = "debug")]
+ #[instrument(skip(tcx), level = "debug", ret)]
fn from_opt_const_arg_anon_const(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
@@ -2394,24 +2403,21 @@ impl<'tcx> ConstantKind<'tcx> {
match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
Ok(val) => {
- debug!("evaluated const value: {:?}", val);
+ debug!("evaluated const value");
Self::Val(val, ty)
}
Err(_) => {
debug!("error encountered during evaluation");
// Error was handled in `const_eval_resolve`. Here we just create a
// new unevaluated const and error hard later in codegen
- let ty_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ Self::Unevaluated(
+ ty::Unevaluated {
def: def.to_global(),
substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
promoted: None,
- }),
+ },
ty,
- });
- debug!(?ty_const);
-
- Self::Ty(ty_const)
+ )
}
}
}
@@ -2422,6 +2428,7 @@ impl<'tcx> ConstantKind<'tcx> {
let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
Self::Val(const_val, c.ty())
}
+ ty::ConstKind::Unevaluated(uv) => Self::Unevaluated(uv.expand(), c.ty()),
_ => Self::Ty(c),
}
}
@@ -2576,8 +2583,6 @@ impl UserTypeProjection {
}
}
-TrivialTypeTraversalAndLiftImpls! { ProjectionKind, }
-
impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
Ok(UserTypeProjection {
@@ -2622,6 +2627,11 @@ impl<'tcx> Display for ConstantKind<'tcx> {
match *self {
ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true),
+ // FIXME(valtrees): Correctly print mir constants.
+ ConstantKind::Unevaluated(..) => {
+ fmt.write_str("_")?;
+ Ok(())
+ }
}
}
}
@@ -2643,15 +2653,7 @@ fn pretty_print_const<'tcx>(
}
fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
- fmt.write_str("b\"")?;
- for &c in byte_str {
- for e in std::ascii::escape_default(c) {
- fmt.write_char(e as char)?;
- }
- }
- fmt.write_str("\"")?;
-
- Ok(())
+ write!(fmt, "b\"{}\"", byte_str.escape_ascii())
}
fn comma_sep<'tcx>(fmt: &mut Formatter<'_>, elems: Vec<ConstantKind<'tcx>>) -> fmt::Result {
@@ -2691,8 +2693,8 @@ fn pretty_print_const_value<'tcx>(
match inner.kind() {
ty::Slice(t) => {
if *t == u8_type {
- // The `inspect` here is okay since we checked the bounds, and there are
- // no relocations (we have an active slice reference here). We don't use
+ // The `inspect` here is okay since we checked the bounds, and `u8` carries
+ // no provenance (we have an active slice reference here). We don't use
// this result to affect interpreter execution.
let byte_str = data
.inner()
@@ -2702,8 +2704,8 @@ fn pretty_print_const_value<'tcx>(
}
}
ty::Str => {
- // The `inspect` here is okay since we checked the bounds, and there are no
- // relocations (we have an active `str` reference here). We don't use this
+ // The `inspect` here is okay since we checked the bounds, and `str` carries
+ // no provenance (we have an active `str` reference here). We don't use this
// result to affect interpreter execution.
let slice = data
.inner()
@@ -2718,7 +2720,7 @@ fn pretty_print_const_value<'tcx>(
let n = n.kind().try_to_bits(tcx.data_layout.pointer_size).unwrap();
// cast is ok because we already checked for pointer size (32 or 64 bit) above
let range = AllocRange { start: offset, size: Size::from_bytes(n) };
- let byte_str = alloc.inner().get_bytes(&tcx, range).unwrap();
+ let byte_str = alloc.inner().get_bytes_strip_provenance(&tcx, range).unwrap();
fmt.write_str("*")?;
pretty_print_byte_str(fmt, byte_str)?;
return Ok(());
@@ -2898,3 +2900,17 @@ impl Location {
}
}
}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // These are in alphabetical order, which is easy to maintain.
+ static_assert_size!(BasicBlockData<'_>, 144);
+ static_assert_size!(LocalDecl<'_>, 56);
+ static_assert_size!(Statement<'_>, 32);
+ static_assert_size!(StatementKind<'_>, 16);
+ static_assert_size!(Terminator<'_>, 112);
+ static_assert_size!(TerminatorKind<'_>, 96);
+}
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
index 15496842d..24fe3b472 100644
--- a/compiler/rustc_middle/src/mir/patch.rs
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -19,7 +19,7 @@ pub struct MirPatch<'tcx> {
impl<'tcx> MirPatch<'tcx> {
pub fn new(body: &Body<'tcx>) -> Self {
let mut result = MirPatch {
- patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+ patch_map: IndexVec::from_elem(None, &body.basic_blocks),
new_blocks: vec![],
new_statements: vec![],
new_locals: vec![],
@@ -29,7 +29,7 @@ impl<'tcx> MirPatch<'tcx> {
};
// Check if we already have a resume block
- for (bb, block) in body.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
result.resume_block = Some(bb);
break;
@@ -61,14 +61,14 @@ impl<'tcx> MirPatch<'tcx> {
}
pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
- let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+ let offset = match bb.index().checked_sub(body.basic_blocks.len()) {
Some(index) => self.new_blocks[index].statements.len(),
None => body[bb].statements.len(),
};
Location { block: bb, statement_index: offset }
}
- pub fn new_local_with_info(
+ pub fn new_internal_with_info(
&mut self,
ty: Ty<'tcx>,
span: Span,
@@ -76,14 +76,17 @@ impl<'tcx> MirPatch<'tcx> {
) -> Local {
let index = self.next_local;
self.next_local += 1;
- let mut new_decl = LocalDecl::new(ty, span);
+ let mut new_decl = LocalDecl::new(ty, span).internal();
new_decl.local_info = local_info;
self.new_locals.push(new_decl);
Local::new(index as usize)
}
pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
- self.new_local_with_info(ty, span, None)
+ let index = self.next_local;
+ self.next_local += 1;
+ self.new_locals.push(LocalDecl::new(ty, span));
+ Local::new(index as usize)
}
pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
@@ -126,7 +129,7 @@ impl<'tcx> MirPatch<'tcx> {
debug!(
"MirPatch: {} new blocks, starting from index {}",
self.new_blocks.len(),
- body.basic_blocks().len()
+ body.basic_blocks.len()
);
let bbs = if self.patch_map.is_empty() && self.new_blocks.is_empty() {
body.basic_blocks.as_mut_preserves_cfg()
@@ -147,7 +150,6 @@ impl<'tcx> MirPatch<'tcx> {
let mut delta = 0;
let mut last_bb = START_BLOCK;
- let mut stmts_and_targets: Vec<(Statement<'_>, BasicBlock)> = Vec::new();
for (mut loc, stmt) in new_statements {
if loc.block != last_bb {
delta = 0;
@@ -156,27 +158,11 @@ impl<'tcx> MirPatch<'tcx> {
debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
loc.statement_index += delta;
let source_info = Self::source_info_for_index(&body[loc.block], loc);
-
- // For mir-opt `Derefer` to work in all cases we need to
- // get terminator's targets and apply the statement to all of them.
- if loc.statement_index > body[loc.block].statements.len() {
- let term = body[loc.block].terminator();
- for i in term.successors() {
- stmts_and_targets.push((Statement { source_info, kind: stmt.clone() }, i));
- }
- delta += 1;
- continue;
- }
-
body[loc.block]
.statements
.insert(loc.statement_index, Statement { source_info, kind: stmt });
delta += 1;
}
-
- for (stmt, target) in stmts_and_targets.into_iter().rev() {
- body[target].statements.insert(0, stmt);
- }
}
pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
@@ -187,7 +173,7 @@ impl<'tcx> MirPatch<'tcx> {
}
pub fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
- let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+ let data = match loc.block.index().checked_sub(body.basic_blocks.len()) {
Some(new) => &self.new_blocks[new],
None => &body[loc.block],
};
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 0ce41337b..0b42137d4 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -318,10 +318,10 @@ where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
write_mir_intro(tcx, body, w)?;
- for block in body.basic_blocks().indices() {
+ for block in body.basic_blocks.indices() {
extra_data(PassWhere::BeforeBlock(block), w)?;
write_basic_block(tcx, block, body, extra_data, w)?;
- if block.index() + 1 != body.basic_blocks().len() {
+ if block.index() + 1 != body.basic_blocks.len() {
writeln!(w)?;
}
}
@@ -464,12 +464,14 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
let val = match literal {
ConstantKind::Ty(ct) => match ct.kind() {
ty::ConstKind::Param(p) => format!("Param({})", p),
- ty::ConstKind::Unevaluated(uv) => format!(
- "Unevaluated({}, {:?}, {:?})",
- self.tcx.def_path_str(uv.def.did),
- uv.substs,
- uv.promoted,
- ),
+ ty::ConstKind::Unevaluated(uv) => {
+ format!(
+ "Unevaluated({}, {:?}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ uv.promoted,
+ )
+ }
ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
ty::ConstKind::Error(_) => "Error".to_string(),
// These variants shouldn't exist in the MIR.
@@ -477,6 +479,14 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
| ty::ConstKind::Infer(_)
| ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
},
+ ConstantKind::Unevaluated(uv, _) => {
+ format!(
+ "Unevaluated({}, {:?}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ uv.promoted,
+ )
+ }
// To keep the diffs small, we render this like we render `ty::Const::Value`.
//
// This changes once `ty::Const::Value` is represented using valtrees.
@@ -676,7 +686,7 @@ pub fn write_allocations<'tcx>(
fn alloc_ids_from_alloc(
alloc: ConstAllocation<'_>,
) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
- alloc.inner().relocations().values().map(|id| *id)
+ alloc.inner().provenance().values().map(|id| *id)
}
fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
@@ -696,9 +706,9 @@ pub fn write_allocations<'tcx>(
struct CollectAllocIds(BTreeSet<AllocId>);
impl<'tcx> Visitor<'tcx> for CollectAllocIds {
- fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) {
+ fn visit_constant(&mut self, c: &Constant<'tcx>, _: Location) {
match c.literal {
- ConstantKind::Ty(c) => self.visit_const(c, loc),
+ ConstantKind::Ty(_) | ConstantKind::Unevaluated(..) => {}
ConstantKind::Val(val, _) => {
self.0.extend(alloc_ids_from_const_val(val));
}
@@ -778,7 +788,7 @@ pub fn write_allocations<'tcx>(
/// If the allocation is small enough to fit into a single line, no start address is given.
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
-/// This also prints relocations adequately.
+/// This also prints provenance adequately.
pub fn display_allocation<'a, 'tcx, Prov, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Prov, Extra>,
@@ -873,34 +883,34 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
if i != line_start {
write!(w, " ")?;
}
- if let Some(&prov) = alloc.relocations().get(&i) {
- // Memory with a relocation must be defined
+ if let Some(&prov) = alloc.provenance().get(&i) {
+ // Memory with provenance must be defined
assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
let j = i.bytes_usize();
let offset = alloc
.inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
let offset = Size::from_bytes(offset);
- let relocation_width = |bytes| bytes * 3;
+ let provenance_width = |bytes| bytes * 3;
let ptr = Pointer::new(prov, offset);
let mut target = format!("{:?}", ptr);
- if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+ if target.len() > provenance_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space.
target = format!("{:#?}", ptr);
}
if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
- // This branch handles the situation where a relocation starts in the current line
+ // This branch handles the situation where a provenance starts in the current line
// but ends in the next one.
let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
let overflow = ptr_size - remainder;
- let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
- let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+ let remainder_width = provenance_width(remainder.bytes_usize()) - 2;
+ let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1;
ascii.push('╾');
for _ in 0..remainder.bytes() - 1 {
ascii.push('─');
}
if overflow_width > remainder_width && overflow_width >= target.len() {
- // The case where the relocation fits into the part in the next line
+ // The case where the provenance fits into the part in the next line
write!(w, "╾{0:─^1$}", "", remainder_width)?;
line_start =
write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
@@ -921,11 +931,11 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
i += ptr_size;
continue;
} else {
- // This branch handles a relocation that starts and ends in the current line.
- let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
- oversized_ptr(&mut target, relocation_width);
+ // This branch handles a provenance that starts and ends in the current line.
+ let provenance_width = provenance_width(ptr_size.bytes_usize() - 1);
+ oversized_ptr(&mut target, provenance_width);
ascii.push('╾');
- write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+ write!(w, "╾{0:─^1$}╼", target, provenance_width)?;
for _ in 0..ptr_size.bytes() - 2 {
ascii.push('─');
}
@@ -935,7 +945,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
} else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
let j = i.bytes_usize();
- // Checked definedness (and thus range) and relocations. This access also doesn't
+ // Checked definedness (and thus range) and provenance. This access also doesn't
// influence interpreter execution but is only for debugging.
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
write!(w, "{:02x}", c)?;
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index dd9f8795f..d89efe2b3 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -2,7 +2,7 @@
use crate::mir::{Body, ConstantKind, Promoted};
use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::vec_map::VecMap;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
@@ -115,21 +115,6 @@ pub enum UnusedUnsafe {
/// `unsafe` block nested under another (used) `unsafe` block
/// > ``… because it's nested under this `unsafe` block``
InUnsafeBlock(hir::HirId),
- /// `unsafe` block nested under `unsafe fn`
- /// > ``… because it's nested under this `unsafe fn` ``
- ///
- /// the second HirId here indicates the first usage of the `unsafe` block,
- /// which allows retrieval of the LintLevelSource for why that operation would
- /// have been permitted without the block
- InUnsafeFn(hir::HirId, hir::HirId),
-}
-
-#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
-pub enum UsedUnsafeBlockData {
- SomeDisallowedInUnsafeFn,
- // the HirId here indicates the first usage of the `unsafe` block
- // (i.e. the one that's first encountered in the MIR traversal of the unsafety check)
- AllAllowedInUnsafeFn(hir::HirId),
}
#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
@@ -138,10 +123,7 @@ pub struct UnsafetyCheckResult {
pub violations: Vec<UnsafetyViolation>,
/// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
- ///
- /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
- /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
- pub used_unsafe_blocks: FxHashMap<hir::HirId, UsedUnsafeBlockData>,
+ pub used_unsafe_blocks: FxHashSet<hir::HirId>,
/// This is `Some` iff the item is not a closure.
pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>,
@@ -345,7 +327,7 @@ rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16);
///
/// See also `rustc_const_eval::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, Lift, TypeVisitable, TypeFoldable)]
pub enum ConstraintCategory<'tcx> {
Return(ReturnConstraint),
Yield,
@@ -387,7 +369,7 @@ pub enum ConstraintCategory<'tcx> {
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)]
pub enum ReturnConstraint {
Normal,
ClosureUpvar(Field),
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
index 4418b848e..4e06d9101 100644
--- a/compiler/rustc_middle/src/mir/spanview.rs
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -105,7 +105,7 @@ where
}
let body_span = hir_body.unwrap().value.span;
let mut span_viewables = Vec::new();
- for (bb, data) in body.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks.iter_enumerated() {
match spanview {
MirSpanview::Statement => {
for (i, statement) in data.statements.iter().enumerate() {
@@ -249,7 +249,7 @@ pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
Retag(..) => "Retag",
AscribeUserType(..) => "AscribeUserType",
Coverage(..) => "Coverage",
- CopyNonOverlapping(..) => "CopyNonOverlapping",
+ Intrinsic(..) => "Intrinsic",
Nop => "Nop",
}
}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index eb90169d0..c7d0283aa 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -23,75 +23,110 @@ use rustc_span::symbol::Symbol;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
-/// The various "big phases" that MIR goes through.
+/// Represents the "flavors" of MIR.
///
-/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
-/// dialects forbid certain variants or values in certain phases. The sections below summarize the
-/// changes, but do not document them thoroughly. The full documentation is found in the appropriate
-/// documentation for the thing the change is affecting.
+/// All flavors of MIR use the same data structure, but there are some important differences. These
+/// differences come in two forms: Dialects and phases.
///
-/// Warning: ordering of variants is significant.
+/// Dialects represent a stronger distinction than phases. This is because the transitions between
+/// dialects are semantic changes, and therefore technically *lowerings* between distinct IRs. In
+/// other words, the same [`Body`](crate::mir::Body) might be well-formed for multiple dialects, but
+/// have different semantic meaning and different behavior at runtime.
+///
+/// Each dialect additionally has a number of phases. However, phase changes never involve semantic
+/// changes. If some MIR is well-formed both before and after a phase change, it is also guaranteed
+/// that it has the same semantic meaning. In this sense, phase changes can only add additional
+/// restrictions on what MIR is well-formed.
+///
+/// When adding phases, remember to update [`MirPhase::phase_index`].
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)]
pub enum MirPhase {
- /// The dialect of MIR used during all phases before `DropsLowered` is the same. This is also
- /// the MIR that analysis such as borrowck uses.
- ///
- /// One important thing to remember about the behavior of this section of MIR is that drop terminators
- /// (including drop and replace) are *conditional*. The elaborate drops pass will then replace each
- /// instance of a drop terminator with a nop, an unconditional drop, or a drop conditioned on a drop
- /// flag. Of course, this means that it is important that the drop elaboration can accurately recognize
- /// when things are initialized and when things are de-initialized. That means any code running on this
- /// version of MIR must be sure to produce output that drop elaboration can reason about. See the
- /// section on the drop terminatorss for more details.
- Built = 0,
- // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
- // We used to have this for pre-miri MIR based const eval.
- Const = 1,
- /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
- /// by creating a new MIR body per promoted element. After this phase (and thus the termination
- /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
- /// query.
- ConstsPromoted = 2,
- /// After this projections may only contain deref projections as the first element.
- Derefered = 3,
- /// Beginning with this phase, the following variants are disallowed:
- /// * [`TerminatorKind::DropAndReplace`]
+ /// The MIR that is generated by MIR building.
+ ///
+ /// The only things that operate on this dialect are unsafeck, the various MIR lints, and const
+ /// qualifs.
+ ///
+ /// This has no distinct phases.
+ Built,
+ /// The MIR used for most analysis.
+ ///
+ /// The only semantic change between analysis and built MIR is constant promotion. In built MIR,
+ /// sequences of statements that would generally be subject to constant promotion are
+ /// semantically constants, while in analysis MIR all constants are explicit.
+ ///
+ /// The result of const promotion is available from the `mir_promoted` and `promoted_mir` queries.
+ ///
+ /// This is the version of MIR used by borrowck and friends.
+ Analysis(AnalysisPhase),
+ /// The MIR used for CTFE, optimizations, and codegen.
+ ///
+ /// The semantic changes that occur in the lowering from analysis to runtime MIR are as follows:
+ ///
+ /// - Drops: In analysis MIR, `Drop` terminators represent *conditional* drops; roughly speaking,
+ /// if dataflow analysis determines that the place being dropped is uninitialized, the drop will
+ /// not be executed. The exact semantics of this aren't written down anywhere, which means they
+ /// are essentially "what drop elaboration does." In runtime MIR, the drops are unconditional;
+ /// when a `Drop` terminator is reached, if the type has drop glue that drop glue is always
+ /// executed. This may be UB if the underlying place is not initialized.
+ /// - Packed drops: Places might in general be misaligned - in most cases this is UB, the exception
+ /// is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be misaligned
+ /// for this reason implicitly moves `P` to a temporary before dropping. Runtime MIR has no such
+ /// rules, and dropping a misaligned place is simply UB.
+ /// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In runtime
+ /// MIR, this is UB.
+ /// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same way
+ /// that Rust itself has them. Where exactly these are is generally subject to change, and so we
+ /// don't document this here. Runtime MIR has all retags explicit.
+ /// - Generator bodies: In analysis MIR, locals may actually be behind a pointer that user code has
+ /// access to. This occurs in generator bodies. Such locals do not behave like other locals,
+ /// because they eg may be aliased in surprising ways. Runtime MIR has no such special locals -
+ /// all generator bodies are lowered and so all places that look like locals really are locals.
+ /// - Const prop lints: The lint pass which reports eg `200_u8 + 200_u8` as an error is run as a
+ /// part of analysis to runtime MIR lowering. This means that transformations which may supress
+ /// such errors may not run on analysis MIR.
+ Runtime(RuntimePhase),
+}
+
+/// See [`MirPhase::Analysis`].
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum AnalysisPhase {
+ Initial = 0,
+ /// Beginning in this phase, the following variants are disallowed:
/// * [`TerminatorKind::FalseUnwind`]
/// * [`TerminatorKind::FalseEdge`]
/// * [`StatementKind::FakeRead`]
/// * [`StatementKind::AscribeUserType`]
/// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
///
- /// And the following variant is allowed:
- /// * [`StatementKind::Retag`]
- ///
- /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
- /// terminator means that the auto-generated drop glue will be invoked. Also, `Copy` operands
- /// are allowed for non-`Copy` types.
- DropsLowered = 4,
- /// Beginning with this phase, the following variant is disallowed:
+ /// Furthermore, `Deref` projections must be the first projection within any place (if they
+ /// appear at all)
+ PostCleanup = 1,
+}
+
+/// See [`MirPhase::Runtime`].
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum RuntimePhase {
+ /// In addition to the semantic changes, beginning with this phase, the following variants are
+ /// disallowed:
+ /// * [`TerminatorKind::DropAndReplace`]
+ /// * [`TerminatorKind::Yield`]
+ /// * [`TerminatorKind::GeneratorDrop`]
/// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
///
- /// And the following variant is allowed:
+ /// And the following variants are allowed:
+ /// * [`StatementKind::Retag`]
/// * [`StatementKind::SetDiscriminant`]
- Deaggregated = 5,
- /// Before this phase, generators are in the "source code" form, featuring `yield` statements
- /// and such. With this phase change, they are transformed into a proper state machine. Running
- /// optimizations before this change can be potentially dangerous because the source code is to
- /// some extent a "lie." In particular, `yield` terminators effectively make the value of all
- /// locals visible to the caller. This means that dead store elimination before them, or code
- /// motion across them, is not correct in general. This is also exasperated by type checking
- /// having pre-computed a list of the types that it thinks are ok to be live across a yield
- /// point - this is necessary to decide eg whether autotraits are implemented. Introducing new
- /// types across a yield point will lead to ICEs becaues of this.
- ///
- /// Beginning with this phase, the following variants are disallowed:
- /// * [`TerminatorKind::Yield`]
- /// * [`TerminatorKind::GeneratorDrop`]
+ /// * [`StatementKind::Deinit`]
+ ///
+ /// Furthermore, `Copy` operands are allowed for non-`Copy` types.
+ Initial = 0,
+ /// Beginning with this phase, the following variant is disallowed:
/// * [`ProjectionElem::Deref`] of `Box`
- GeneratorsLowered = 6,
- Optimized = 7,
+ PostCleanup = 1,
+ Optimized = 2,
}
///////////////////////////////////////////////////////////////////////////
@@ -292,12 +327,40 @@ pub enum StatementKind<'tcx> {
/// executed.
Coverage(Box<Coverage>),
+ /// Denotes a call to an intrinsic that does not require an unwind path and always returns.
+ /// This avoids adding a new block and a terminator for simple intrinsics.
+ Intrinsic(Box<NonDivergingIntrinsic<'tcx>>),
+
+ /// No-op. Useful for deleting instructions without affecting statement indices.
+ Nop,
+}
+
+#[derive(
+ Clone,
+ TyEncodable,
+ TyDecodable,
+ Debug,
+ PartialEq,
+ Hash,
+ HashStable,
+ TypeFoldable,
+ TypeVisitable
+)]
+pub enum NonDivergingIntrinsic<'tcx> {
+ /// Denotes a call to the intrinsic function `assume`.
+ ///
+ /// The operand must be a boolean. Optimizers may use the value of the boolean to backtrack its
+ /// computation to infer information about other variables. So if the boolean came from a
+ /// `x < y` operation, subsequent operations on `x` and `y` could elide various bound checks.
+ /// If the argument is `false`, this operation is equivalent to `TerminatorKind::Unreachable`.
+ Assume(Operand<'tcx>),
+
/// Denotes a call to the intrinsic function `copy_nonoverlapping`.
///
/// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
/// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
/// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
- /// the `src` place are copied to the continguous range of bytes beginning with the first byte
+ /// the `src` place are copied to the contiguous range of bytes beginning with the first byte
/// of `dest`.
///
/// **Needs clarification**: In what order are operands computed and dereferenced? It should
@@ -305,10 +368,18 @@ pub enum StatementKind<'tcx> {
///
/// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
/// I vaguely remember Ralf saying somewhere that he thought it should not be.
- CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>),
+ CopyNonOverlapping(CopyNonOverlapping<'tcx>),
+}
- /// No-op. Useful for deleting instructions without affecting statement indices.
- Nop,
+impl std::fmt::Display for NonDivergingIntrinsic<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Assume(op) => write!(f, "assume({op:?})"),
+ Self::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
+ write!(f, "copy_nonoverlapping(dst = {dst:?}, src = {src:?}, count = {count:?})")
+ }
+ }
+ }
}
/// Describes what kind of retag is to be performed.
@@ -343,7 +414,7 @@ pub enum FakeReadCause {
/// Some(closure_def_id).
/// Otherwise, the value of the optional LocalDefId will be None.
//
- // We can use LocaDefId here since fake read statements are removed
+ // We can use LocalDefId here since fake read statements are removed
// before codegen in the `CleanupNonCodegenStatements` pass.
ForMatchedPlace(Option<LocalDefId>),
@@ -417,7 +488,7 @@ pub struct CopyNonOverlapping<'tcx> {
/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
/// runtime.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum TerminatorKind<'tcx> {
/// Block has one successor; we continue execution there.
Goto { target: BasicBlock },
@@ -670,7 +741,7 @@ pub enum TerminatorKind<'tcx> {
}
/// Information about an assertion failure.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum AssertKind<O> {
BoundsCheck { len: O, index: O },
Overflow(BinOp, O, O),
@@ -792,7 +863,7 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
///
/// Rust currently requires that every place obey those two rules. This is checked by MIRI and taken
/// advantage of by codegen (via `gep inbounds`). That is possibly subject to change.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Place<'tcx> {
pub local: Local,
@@ -801,7 +872,7 @@ pub struct Place<'tcx> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum ProjectionElem<V, T> {
Deref,
Field(Field, T),
@@ -884,7 +955,7 @@ pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri
/// currently implements it, but it seems like this may be something to check against in the
/// validator.
-#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
pub enum Operand<'tcx> {
/// Creates a value by loading the given place.
///
@@ -915,7 +986,7 @@ pub enum Operand<'tcx> {
/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
/// value that an [`Operand`] produces.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum Rvalue<'tcx> {
/// Yields the operand unchanged
Use(Operand<'tcx>),
@@ -1068,11 +1139,14 @@ pub enum CastKind {
/// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are
/// translated into `&raw mut/const *r`, i.e., they are not actually casts.
Pointer(PointerCast),
+ /// Cast into a dyn* object.
+ DynStar,
/// Remaining unclassified casts.
Misc,
}
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub enum AggregateKind<'tcx> {
/// The type is of the element
Array(Ty<'tcx>),
@@ -1160,7 +1234,8 @@ pub enum BinOp {
mod size_asserts {
use super::*;
// These are in alphabetical order, which is easy to maintain.
- static_assert_size!(AggregateKind<'_>, 48);
+ #[cfg(not(bootstrap))]
+ static_assert_size!(AggregateKind<'_>, 40);
static_assert_size!(Operand<'_>, 24);
static_assert_size!(Place<'_>, 16);
static_assert_size!(PlaceElem<'_>, 24);
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 9ccf5aea6..4ea333cff 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -14,7 +14,7 @@ use std::slice;
pub use super::query::*;
-#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct SwitchTargets {
/// Possible values. The locations to branch to in each case
/// are found in the corresponding indices from the `targets` vector.
@@ -102,7 +102,7 @@ impl<'a> Iterator for SwitchTargetsIter<'a> {
impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Terminator<'tcx> {
pub source_info: SourceInfo,
pub kind: TerminatorKind<'tcx>,
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
index 627dc32f3..55b2c5927 100644
--- a/compiler/rustc_middle/src/mir/traversal.rs
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -37,7 +37,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> {
Preorder {
body,
- visited: BitSet::new_empty(body.basic_blocks().len()),
+ visited: BitSet::new_empty(body.basic_blocks.len()),
worklist,
root_is_start_block: root == START_BLOCK,
}
@@ -71,7 +71,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited.
- let upper = self.body.basic_blocks().len() - self.visited.count();
+ let upper = self.body.basic_blocks.len() - self.visited.count();
let lower = if self.root_is_start_block {
// We will visit all remaining blocks exactly once.
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index 82a6b0c50..9d098c808 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -1,8 +1,9 @@
//! `TypeFoldable` implementations for MIR types
+use rustc_ast::InlineAsmTemplatePiece;
+
use super::*;
use crate::ty;
-use rustc_data_structures::functor::IdFunctor;
TrivialTypeTraversalAndLiftImpls! {
BlockTailInfo,
@@ -13,96 +14,27 @@ TrivialTypeTraversalAndLiftImpls! {
SourceScope,
SourceScopeLocalData,
UserTypeAnnotationIndex,
+ BorrowKind,
+ CastKind,
+ BinOp,
+ NullOp,
+ UnOp,
+ hir::Movability,
+ BasicBlock,
+ SwitchTargets,
+ GeneratorKind,
+ GeneratorSavedLocal,
}
-impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::TerminatorKind::*;
-
- let kind = match self.kind {
- Goto { target } => Goto { target },
- SwitchInt { discr, switch_ty, targets } => SwitchInt {
- discr: discr.try_fold_with(folder)?,
- switch_ty: switch_ty.try_fold_with(folder)?,
- targets,
- },
- Drop { place, target, unwind } => {
- Drop { place: place.try_fold_with(folder)?, target, unwind }
- }
- DropAndReplace { place, value, target, unwind } => DropAndReplace {
- place: place.try_fold_with(folder)?,
- value: value.try_fold_with(folder)?,
- target,
- unwind,
- },
- Yield { value, resume, resume_arg, drop } => Yield {
- value: value.try_fold_with(folder)?,
- resume,
- resume_arg: resume_arg.try_fold_with(folder)?,
- drop,
- },
- Call { func, args, destination, target, cleanup, from_hir_call, fn_span } => Call {
- func: func.try_fold_with(folder)?,
- args: args.try_fold_with(folder)?,
- destination: destination.try_fold_with(folder)?,
- target,
- cleanup,
- from_hir_call,
- fn_span,
- },
- Assert { cond, expected, msg, target, cleanup } => {
- use AssertKind::*;
- let msg = match msg {
- BoundsCheck { len, index } => BoundsCheck {
- len: len.try_fold_with(folder)?,
- index: index.try_fold_with(folder)?,
- },
- Overflow(op, l, r) => {
- Overflow(op, l.try_fold_with(folder)?, r.try_fold_with(folder)?)
- }
- OverflowNeg(op) => OverflowNeg(op.try_fold_with(folder)?),
- DivisionByZero(op) => DivisionByZero(op.try_fold_with(folder)?),
- RemainderByZero(op) => RemainderByZero(op.try_fold_with(folder)?),
- ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg,
- };
- Assert { cond: cond.try_fold_with(folder)?, expected, msg, target, cleanup }
- }
- GeneratorDrop => GeneratorDrop,
- Resume => Resume,
- Abort => Abort,
- Return => Return,
- Unreachable => Unreachable,
- FalseEdge { real_target, imaginary_target } => {
- FalseEdge { real_target, imaginary_target }
- }
- FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind },
- InlineAsm { template, operands, options, line_spans, destination, cleanup } => {
- InlineAsm {
- template,
- operands: operands.try_fold_with(folder)?,
- options,
- line_spans,
- destination,
- cleanup,
- }
- }
- };
- Ok(Terminator { source_info: self.source_info, kind })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for GeneratorKind {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx [InlineAsmTemplatePiece] {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
Ok(self)
}
}
-impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(Place {
- local: self.local.try_fold_with(folder)?,
- projection: self.projection.try_fold_with(folder)?,
- })
+impl<'tcx> TypeFoldable<'tcx> for &'tcx [Span] {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
}
}
@@ -112,114 +44,12 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
}
}
-impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::Rvalue::*;
- Ok(match self {
- Use(op) => Use(op.try_fold_with(folder)?),
- Repeat(op, len) => Repeat(op.try_fold_with(folder)?, len.try_fold_with(folder)?),
- ThreadLocalRef(did) => ThreadLocalRef(did.try_fold_with(folder)?),
- Ref(region, bk, place) => {
- Ref(region.try_fold_with(folder)?, bk, place.try_fold_with(folder)?)
- }
- CopyForDeref(place) => CopyForDeref(place.try_fold_with(folder)?),
- AddressOf(mutability, place) => AddressOf(mutability, place.try_fold_with(folder)?),
- Len(place) => Len(place.try_fold_with(folder)?),
- Cast(kind, op, ty) => Cast(kind, op.try_fold_with(folder)?, ty.try_fold_with(folder)?),
- BinaryOp(op, box (rhs, lhs)) => {
- BinaryOp(op, Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)))
- }
- CheckedBinaryOp(op, box (rhs, lhs)) => CheckedBinaryOp(
- op,
- Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)),
- ),
- UnaryOp(op, val) => UnaryOp(op, val.try_fold_with(folder)?),
- Discriminant(place) => Discriminant(place.try_fold_with(folder)?),
- NullaryOp(op, ty) => NullaryOp(op, ty.try_fold_with(folder)?),
- Aggregate(kind, fields) => {
- let kind = kind.try_map_id(|kind| {
- Ok(match kind {
- AggregateKind::Array(ty) => AggregateKind::Array(ty.try_fold_with(folder)?),
- AggregateKind::Tuple => AggregateKind::Tuple,
- AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt(
- def,
- v,
- substs.try_fold_with(folder)?,
- user_ty.try_fold_with(folder)?,
- n,
- ),
- AggregateKind::Closure(id, substs) => {
- AggregateKind::Closure(id, substs.try_fold_with(folder)?)
- }
- AggregateKind::Generator(id, substs, movablity) => {
- AggregateKind::Generator(id, substs.try_fold_with(folder)?, movablity)
- }
- })
- })?;
- Aggregate(kind, fields.try_fold_with(folder)?)
- }
- ShallowInitBox(op, ty) => {
- ShallowInitBox(op.try_fold_with(folder)?, ty.try_fold_with(folder)?)
- }
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(match self {
- Operand::Copy(place) => Operand::Copy(place.try_fold_with(folder)?),
- Operand::Move(place) => Operand::Move(place.try_fold_with(folder)?),
- Operand::Constant(c) => Operand::Constant(c.try_fold_with(folder)?),
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::ProjectionElem::*;
-
- Ok(match self {
- Deref => Deref,
- Field(f, ty) => Field(f, ty.try_fold_with(folder)?),
- Index(v) => Index(v.try_fold_with(folder)?),
- Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
- ConstantIndex { offset, min_length, from_end } => {
- ConstantIndex { offset, min_length, from_end }
- }
- Subslice { from, to, from_end } => Subslice { from, to, from_end },
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for Field {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
- Ok(self)
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
- Ok(self)
- }
-}
-
impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
Ok(self)
}
}
-impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(Constant {
- span: self.span,
- user_ty: self.user_ty.try_fold_with(folder)?,
- literal: self.literal.try_fold_with(folder)?,
- })
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
#[inline(always)]
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
@@ -235,6 +65,9 @@ impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> {
match self {
ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)),
ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)),
+ ConstantKind::Unevaluated(uv, t) => {
+ Ok(ConstantKind::Unevaluated(uv.try_fold_with(folder)?, t.try_fold_with(folder)?))
+ }
}
}
}
diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs
index 6a0801cb0..be19bb486 100644
--- a/compiler/rustc_middle/src/mir/type_visitable.rs
+++ b/compiler/rustc_middle/src/mir/type_visitable.rs
@@ -1,165 +1,6 @@
//! `TypeVisitable` implementations for MIR types
use super::*;
-use crate::ty;
-
-impl<'tcx> TypeVisitable<'tcx> for Terminator<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- use crate::mir::TerminatorKind::*;
-
- match self.kind {
- SwitchInt { ref discr, switch_ty, .. } => {
- discr.visit_with(visitor)?;
- switch_ty.visit_with(visitor)
- }
- Drop { ref place, .. } => place.visit_with(visitor),
- DropAndReplace { ref place, ref value, .. } => {
- place.visit_with(visitor)?;
- value.visit_with(visitor)
- }
- Yield { ref value, .. } => value.visit_with(visitor),
- Call { ref func, ref args, ref destination, .. } => {
- destination.visit_with(visitor)?;
- func.visit_with(visitor)?;
- args.visit_with(visitor)
- }
- Assert { ref cond, ref msg, .. } => {
- cond.visit_with(visitor)?;
- use AssertKind::*;
- match msg {
- BoundsCheck { ref len, ref index } => {
- len.visit_with(visitor)?;
- index.visit_with(visitor)
- }
- Overflow(_, l, r) => {
- l.visit_with(visitor)?;
- r.visit_with(visitor)
- }
- OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
- op.visit_with(visitor)
- }
- ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE,
- }
- }
- InlineAsm { ref operands, .. } => operands.visit_with(visitor),
- Goto { .. }
- | Resume
- | Abort
- | Return
- | GeneratorDrop
- | Unreachable
- | FalseEdge { .. }
- | FalseUnwind { .. } => ControlFlow::CONTINUE,
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for GeneratorKind {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Place<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.local.visit_with(visitor)?;
- self.projection.visit_with(visitor)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|t| t.visit_with(visitor))
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Rvalue<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- use crate::mir::Rvalue::*;
- match *self {
- Use(ref op) => op.visit_with(visitor),
- CopyForDeref(ref place) => {
- let op = &Operand::Copy(*place);
- op.visit_with(visitor)
- }
- Repeat(ref op, _) => op.visit_with(visitor),
- ThreadLocalRef(did) => did.visit_with(visitor),
- Ref(region, _, ref place) => {
- region.visit_with(visitor)?;
- place.visit_with(visitor)
- }
- AddressOf(_, ref place) => place.visit_with(visitor),
- Len(ref place) => place.visit_with(visitor),
- Cast(_, ref op, ty) => {
- op.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => {
- rhs.visit_with(visitor)?;
- lhs.visit_with(visitor)
- }
- UnaryOp(_, ref val) => val.visit_with(visitor),
- Discriminant(ref place) => place.visit_with(visitor),
- NullaryOp(_, ty) => ty.visit_with(visitor),
- Aggregate(ref kind, ref fields) => {
- match **kind {
- AggregateKind::Array(ty) => {
- ty.visit_with(visitor)?;
- }
- AggregateKind::Tuple => {}
- AggregateKind::Adt(_, _, substs, user_ty, _) => {
- substs.visit_with(visitor)?;
- user_ty.visit_with(visitor)?;
- }
- AggregateKind::Closure(_, substs) => {
- substs.visit_with(visitor)?;
- }
- AggregateKind::Generator(_, substs, _) => {
- substs.visit_with(visitor)?;
- }
- }
- fields.visit_with(visitor)
- }
- ShallowInitBox(ref op, ty) => {
- op.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Operand<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- match *self {
- Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
- Operand::Constant(ref c) => c.visit_with(visitor),
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for PlaceElem<'tcx> {
- fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
- use crate::mir::ProjectionElem::*;
-
- match self {
- Field(_, ty) => ty.visit_with(visitor),
- Index(v) => v.visit_with(visitor),
- _ => ControlFlow::CONTINUE,
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Field {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for GeneratorSavedLocal {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
@@ -167,13 +8,6 @@ impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
}
}
-impl<'tcx> TypeVisitable<'tcx> for Constant<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.literal.visit_with(visitor)?;
- self.user_ty.visit_with(visitor)
- }
-}
-
impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> {
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
visitor.visit_mir_const(*self)
@@ -185,6 +19,10 @@ impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> {
match *self {
ConstantKind::Ty(c) => c.visit_with(visitor),
ConstantKind::Val(_, t) => t.visit_with(visitor),
+ ConstantKind::Unevaluated(uv, t) => {
+ uv.visit_with(visitor)?;
+ t.visit_with(visitor)
+ }
}
}
}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 891608764..d9b24566b 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -80,6 +80,8 @@ macro_rules! make_mir_visitor {
self.super_body(body);
}
+ extra_body_methods!($($mutability)?);
+
fn visit_basic_block_data(
&mut self,
block: BasicBlock,
@@ -235,14 +237,6 @@ macro_rules! make_mir_visitor {
self.super_region(region);
}
- fn visit_const(
- &mut self,
- constant: $(& $mutability)? ty::Const<'tcx>,
- _: Location,
- ) {
- self.super_const(constant);
- }
-
fn visit_substs(
&mut self,
substs: & $($mutability)? SubstsRef<'tcx>,
@@ -287,63 +281,7 @@ macro_rules! make_mir_visitor {
&mut self,
body: &$($mutability)? Body<'tcx>,
) {
- let span = body.span;
- if let Some(gen) = &$($mutability)? body.generator {
- if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
- self.visit_ty(
- yield_ty,
- TyContext::YieldTy(SourceInfo::outermost(span))
- );
- }
- }
-
- // for best performance, we want to use an iterator rather
- // than a for-loop, to avoid calling `body::Body::invalidate` for
- // each basic block.
- #[allow(unused_macro_rules)]
- macro_rules! basic_blocks {
- (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
- () => (body.basic_blocks().iter_enumerated());
- }
- for (bb, data) in basic_blocks!($($mutability)?) {
- self.visit_basic_block_data(bb, data);
- }
-
- for scope in &$($mutability)? body.source_scopes {
- self.visit_source_scope_data(scope);
- }
-
- self.visit_ty(
- $(& $mutability)? body.return_ty(),
- TyContext::ReturnTy(SourceInfo::outermost(body.span))
- );
-
- for local in body.local_decls.indices() {
- self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
- }
-
- #[allow(unused_macro_rules)]
- macro_rules! type_annotations {
- (mut) => (body.user_type_annotations.iter_enumerated_mut());
- () => (body.user_type_annotations.iter_enumerated());
- }
-
- for (index, annotation) in type_annotations!($($mutability)?) {
- self.visit_user_type_annotation(
- index, annotation
- );
- }
-
- for var_debug_info in &$($mutability)? body.var_debug_info {
- self.visit_var_debug_info(var_debug_info);
- }
-
- self.visit_span($(& $mutability)? body.span);
-
- for const_ in &$($mutability)? body.required_consts {
- let location = START_BLOCK.start_location();
- self.visit_constant(const_, location);
- }
+ super_body!(self, body, $($mutability, true)?);
}
fn super_basic_block_data(&mut self,
@@ -479,14 +417,15 @@ macro_rules! make_mir_visitor {
location
)
}
- StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{
- src,
- dst,
- count,
- }) => {
- self.visit_operand(src, location);
- self.visit_operand(dst, location);
- self.visit_operand(count, location)
+ StatementKind::Intrinsic(box ref $($mutability)? intrinsic) => {
+ match intrinsic {
+ NonDivergingIntrinsic::Assume(op) => self.visit_operand(op, location),
+ NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
+ self.visit_operand(src, location);
+ self.visit_operand(dst, location);
+ self.visit_operand(count, location);
+ }
+ }
}
StatementKind::Nop => {}
}
@@ -930,8 +869,9 @@ macro_rules! make_mir_visitor {
self.visit_span($(& $mutability)? *span);
drop(user_ty); // no visit method for this
match literal {
- ConstantKind::Ty(ct) => self.visit_const($(& $mutability)? *ct, location),
+ ConstantKind::Ty(_) => {}
ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ ConstantKind::Unevaluated(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
}
}
@@ -969,9 +909,6 @@ macro_rules! make_mir_visitor {
fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) {
}
- fn super_const(&mut self, _const: $(& $mutability)? ty::Const<'tcx>) {
- }
-
fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
}
@@ -982,12 +919,7 @@ macro_rules! make_mir_visitor {
body: &$($mutability)? Body<'tcx>,
location: Location
) {
- #[allow(unused_macro_rules)]
- macro_rules! basic_blocks {
- (mut) => (body.basic_blocks_mut());
- () => (body.basic_blocks());
- }
- let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
+ let basic_block = & $($mutability)? basic_blocks!(body, $($mutability, true)?)[location.block];
if basic_block.statements.len() == location.statement_index {
if let Some(ref $($mutability)? terminator) = basic_block.terminator {
self.visit_terminator(terminator, location)
@@ -1002,6 +934,94 @@ macro_rules! make_mir_visitor {
}
}
+macro_rules! basic_blocks {
+ ($body:ident, mut, true) => {
+ $body.basic_blocks.as_mut()
+ };
+ ($body:ident, mut, false) => {
+ $body.basic_blocks.as_mut_preserves_cfg()
+ };
+ ($body:ident,) => {
+ $body.basic_blocks
+ };
+}
+
+macro_rules! basic_blocks_iter {
+ ($body:ident, mut, $invalidate:tt) => {
+ basic_blocks!($body, mut, $invalidate).iter_enumerated_mut()
+ };
+ ($body:ident,) => {
+ basic_blocks!($body,).iter_enumerated()
+ };
+}
+
+macro_rules! extra_body_methods {
+ (mut) => {
+ fn visit_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) {
+ self.super_body_preserves_cfg(body);
+ }
+
+ fn super_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) {
+ super_body!(self, body, mut, false);
+ }
+ };
+ () => {};
+}
+
+macro_rules! super_body {
+ ($self:ident, $body:ident, $($mutability:ident, $invalidate:tt)?) => {
+ let span = $body.span;
+ if let Some(gen) = &$($mutability)? $body.generator {
+ if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
+ $self.visit_ty(
+ yield_ty,
+ TyContext::YieldTy(SourceInfo::outermost(span))
+ );
+ }
+ }
+
+ for (bb, data) in basic_blocks_iter!($body, $($mutability, $invalidate)?) {
+ $self.visit_basic_block_data(bb, data);
+ }
+
+ for scope in &$($mutability)? $body.source_scopes {
+ $self.visit_source_scope_data(scope);
+ }
+
+ $self.visit_ty(
+ $(& $mutability)? $body.return_ty(),
+ TyContext::ReturnTy(SourceInfo::outermost($body.span))
+ );
+
+ for local in $body.local_decls.indices() {
+ $self.visit_local_decl(local, & $($mutability)? $body.local_decls[local]);
+ }
+
+ #[allow(unused_macro_rules)]
+ macro_rules! type_annotations {
+ (mut) => ($body.user_type_annotations.iter_enumerated_mut());
+ () => ($body.user_type_annotations.iter_enumerated());
+ }
+
+ for (index, annotation) in type_annotations!($($mutability)?) {
+ $self.visit_user_type_annotation(
+ index, annotation
+ );
+ }
+
+ for var_debug_info in &$($mutability)? $body.var_debug_info {
+ $self.visit_var_debug_info(var_debug_info);
+ }
+
+ $self.visit_span($(& $mutability)? $body.span);
+
+ for const_ in &$($mutability)? $body.required_consts {
+ let location = START_BLOCK.start_location();
+ $self.visit_constant(const_, location);
+ }
+ }
+}
+
macro_rules! visit_place_fns {
(mut) => {
fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;