summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle/src/mir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:39 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:39 +0000
commit1376c5a617be5c25655d0d7cb63e3beaa5a6e026 (patch)
tree3bb8d61aee02bc7a15eab3f36e3b921afc2075d0 /compiler/rustc_middle/src/mir
parentReleasing progress-linux version 1.69.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.tar.xz
rustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.zip
Merging upstream version 1.70.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_middle/src/mir')
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs6
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs28
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs330
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/init_mask/tests.rs195
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation/tests.rs19
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs3
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs214
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs4
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs68
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs1
-rw-r--r--compiler/rustc_middle/src/mir/query.rs26
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs4
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs151
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs34
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs186
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs16
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs51
18 files changed, 884 insertions, 454 deletions
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
index b93871769..3fb468379 100644
--- a/compiler/rustc_middle/src/mir/basic_blocks.rs
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -6,7 +6,7 @@ use rustc_data_structures::graph;
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::OnceCell;
-use rustc_index::vec::IndexVec;
+use rustc_index::vec::{IndexSlice, IndexVec};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use smallvec::SmallVec;
@@ -124,10 +124,10 @@ impl<'tcx> BasicBlocks<'tcx> {
}
impl<'tcx> std::ops::Deref for BasicBlocks<'tcx> {
- type Target = IndexVec<BasicBlock, BasicBlockData<'tcx>>;
+ type Target = IndexSlice<BasicBlock, BasicBlockData<'tcx>>;
#[inline]
- fn deref(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ fn deref(&self) -> &IndexSlice<BasicBlock, BasicBlockData<'tcx>> {
&self.basic_blocks
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index 48375ed30..1a8e48264 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -2,8 +2,6 @@
mod init_mask;
mod provenance_map;
-#[cfg(test)]
-mod tests;
use std::borrow::Cow;
use std::fmt;
@@ -111,26 +109,34 @@ const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
// large.
impl hash::Hash for Allocation {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ let Self {
+ bytes,
+ provenance,
+ init_mask,
+ align,
+ mutability,
+ extra: (), // don't bother hashing ()
+ } = self;
+
// Partially hash the `bytes` buffer when it is large. To limit collisions with common
// prefixes and suffixes, we hash the length and some slices of the buffer.
- let byte_count = self.bytes.len();
+ let byte_count = bytes.len();
if byte_count > MAX_HASHED_BUFFER_LEN {
// Hash the buffer's length.
byte_count.hash(state);
// And its head and tail.
- self.bytes[..MAX_BYTES_TO_HASH].hash(state);
- self.bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
+ bytes[..MAX_BYTES_TO_HASH].hash(state);
+ bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
} else {
- self.bytes.hash(state);
+ bytes.hash(state);
}
// Hash the other fields as usual.
- self.provenance.hash(state);
- self.init_mask.hash(state);
- self.align.hash(state);
- self.mutability.hash(state);
- self.extra.hash(state);
+ provenance.hash(state);
+ init_mask.hash(state);
+ align.hash(state);
+ mutability.hash(state);
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
index 82e9a961a..dcb56a175 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs
@@ -1,3 +1,6 @@
+#[cfg(test)]
+mod tests;
+
use std::hash;
use std::iter;
use std::ops::Range;
@@ -10,20 +13,185 @@ type Block = u64;
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
/// is initialized. If it is `false` the byte is uninitialized.
-// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
-// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
-#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable)]
-#[derive(HashStable)]
+/// The actual bits are only materialized when needed, and we try to keep this data lazy as long as
+/// possible. Currently, if all the blocks have the same value, then the mask represents either a
+/// fully initialized or fully uninitialized const allocation, so we can only store that single
+/// value.
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
pub struct InitMask {
- blocks: Vec<Block>,
+ blocks: InitMaskBlocks,
len: Size,
}
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+enum InitMaskBlocks {
+ Lazy {
+ /// Whether the lazy init mask is fully initialized or uninitialized.
+ state: bool,
+ },
+ Materialized(InitMaskMaterialized),
+}
+
+impl InitMask {
+ pub fn new(size: Size, state: bool) -> Self {
+ // Blocks start lazily allocated, until we have to materialize them.
+ let blocks = InitMaskBlocks::Lazy { state };
+ InitMask { len: size, blocks }
+ }
+
+ /// Checks whether the `range` is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+ /// indexes for the first contiguous span of the uninitialized access.
+ #[inline]
+ pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
+ let end = range.end();
+ if end > self.len {
+ return Err(AllocRange::from(self.len..end));
+ }
+
+ match self.blocks {
+ InitMaskBlocks::Lazy { state } => {
+ // Lazily allocated blocks represent the full mask, and cover the requested range by
+ // definition.
+ if state { Ok(()) } else { Err(range) }
+ }
+ InitMaskBlocks::Materialized(ref blocks) => {
+ blocks.is_range_initialized(range.start, end)
+ }
+ }
+ }
+
+ /// Sets a specified range to a value. If the range is out-of-bounds, the mask will grow to
+ /// accommodate it entirely.
+ pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
+ let start = range.start;
+ let end = range.end();
+
+ let is_full_overwrite = start == Size::ZERO && end >= self.len;
+
+ // Optimize the cases of a full init/uninit state, while handling growth if needed.
+ match self.blocks {
+ InitMaskBlocks::Lazy { ref mut state } if is_full_overwrite => {
+ // This is fully overwriting the mask, and we'll still have a single initialization
+ // state: the blocks can stay lazy.
+ *state = new_state;
+ self.len = end;
+ }
+ InitMaskBlocks::Materialized(_) if is_full_overwrite => {
+ // This is also fully overwriting materialized blocks with a single initialization
+ // state: we'll have no need for these blocks anymore and can make them lazy.
+ self.blocks = InitMaskBlocks::Lazy { state: new_state };
+ self.len = end;
+ }
+ InitMaskBlocks::Lazy { state } if state == new_state => {
+ // Here we're partially overwriting the mask but the initialization state doesn't
+ // change: the blocks can stay lazy.
+ if end > self.len {
+ self.len = end;
+ }
+ }
+ _ => {
+ // Otherwise, we have a partial overwrite that can result in a mix of initialization
+ // states, so we'll need materialized blocks.
+ let len = self.len;
+ let blocks = self.materialize_blocks();
+
+ // There are 3 cases of interest here, if we have:
+ //
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // 1) the range to set can be in-bounds:
+ //
+ // xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // Here, we'll simply set the single `start` to `end` range.
+ //
+ // 2) the range to set can be partially out-of-bounds:
+ //
+ // xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // We have 2 subranges to handle:
+ // - we'll set the existing `start` to `len` range.
+ // - we'll grow and set the `len` to `end` range.
+ //
+ // 3) the range to set can be fully out-of-bounds:
+ //
+ // ---xxxx = [start, end]
+ // [--------]
+ // ^ ^
+ // 0 len
+ //
+ // Since we're growing the mask to a single `new_state` value, we consider the gap
+ // from `len` to `start` to be part of the range, and have a single subrange to
+ // handle: we'll grow and set the `len` to `end` range.
+ //
+ // Note that we have to materialize, set blocks, and grow the mask. We could
+ // therefore slightly optimize things in situations where these writes overlap.
+ // However, as of writing this, growing the mask doesn't happen in practice yet, so
+ // we don't do this micro-optimization.
+
+ if end <= len {
+ // Handle case 1.
+ blocks.set_range_inbounds(start, end, new_state);
+ } else {
+ if start < len {
+ // Handle the first subrange of case 2.
+ blocks.set_range_inbounds(start, len, new_state);
+ }
+
+ // Handle the second subrange of case 2, and case 3.
+ blocks.grow(len, end - len, new_state); // `Size` operation
+ self.len = end;
+ }
+ }
+ }
+ }
+
+ /// Materializes this mask's blocks when the mask is lazy.
+ #[inline]
+ fn materialize_blocks(&mut self) -> &mut InitMaskMaterialized {
+ if let InitMaskBlocks::Lazy { state } = self.blocks {
+ self.blocks = InitMaskBlocks::Materialized(InitMaskMaterialized::new(self.len, state));
+ }
+
+ let InitMaskBlocks::Materialized(ref mut blocks) = self.blocks else {
+ bug!("initmask blocks must be materialized here")
+ };
+ blocks
+ }
+
+ /// Returns the initialization state at the specified in-bounds index.
+ #[inline]
+ pub fn get(&self, idx: Size) -> bool {
+ match self.blocks {
+ InitMaskBlocks::Lazy { state } => state,
+ InitMaskBlocks::Materialized(ref blocks) => blocks.get(idx),
+ }
+ }
+}
+
+/// The actual materialized blocks of the bitmask, when we can't keep the `InitMask` lazy.
+// Note: for performance reasons when interning, some of the fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, HashStable)]
+struct InitMaskMaterialized {
+ blocks: Vec<Block>,
+}
+
// Const allocations are only hashed for interning. However, they can be large, making the hashing
// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
// big buffers like the allocation's init mask. We can partially hash some fields when they're
// large.
-impl hash::Hash for InitMask {
+impl hash::Hash for InitMaskMaterialized {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
@@ -41,18 +209,15 @@ impl hash::Hash for InitMask {
} else {
self.blocks.hash(state);
}
-
- // Hash the other fields as usual.
- self.len.hash(state);
}
}
-impl InitMask {
+impl InitMaskMaterialized {
pub const BLOCK_SIZE: u64 = 64;
- pub fn new(size: Size, state: bool) -> Self {
- let mut m = InitMask { blocks: vec![], len: Size::ZERO };
- m.grow(size, state);
+ fn new(size: Size, state: bool) -> Self {
+ let mut m = InitMaskMaterialized { blocks: vec![] };
+ m.grow(Size::ZERO, size, state);
m
}
@@ -62,8 +227,8 @@ impl InitMask {
// Each bit in a `Block` represents the initialization state of one byte of an allocation,
// so we use `.bytes()` here.
let bits = bits.bytes();
- let a = bits / InitMask::BLOCK_SIZE;
- let b = bits % InitMask::BLOCK_SIZE;
+ let a = bits / Self::BLOCK_SIZE;
+ let b = bits % Self::BLOCK_SIZE;
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
}
@@ -71,7 +236,7 @@ impl InitMask {
fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
let block = block.try_into().ok().unwrap();
let bit = bit.try_into().ok().unwrap();
- Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+ Size::from_bytes(block * Self::BLOCK_SIZE + bit)
}
/// Checks whether the `range` is entirely initialized.
@@ -79,13 +244,8 @@ impl InitMask {
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
/// indexes for the first contiguous span of the uninitialized access.
#[inline]
- pub fn is_range_initialized(&self, range: AllocRange) -> Result<(), AllocRange> {
- let end = range.end();
- if end > self.len {
- return Err(AllocRange::from(self.len..end));
- }
-
- let uninit_start = self.find_bit(range.start, end, false);
+ fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
+ let uninit_start = self.find_bit(start, end, false);
match uninit_start {
Some(uninit_start) => {
@@ -96,81 +256,80 @@ impl InitMask {
}
}
- pub fn set_range(&mut self, range: AllocRange, new_state: bool) {
- let end = range.end();
- let len = self.len;
- if end > len {
- self.grow(end - len, new_state);
- }
- self.set_range_inbounds(range.start, end, new_state);
- }
-
fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
- let (blocka, bita) = Self::bit_index(start);
- let (blockb, bitb) = Self::bit_index(end);
- if blocka == blockb {
- // First set all bits except the first `bita`,
- // then unset the last `64 - bitb` bits.
- let range = if bitb == 0 {
- u64::MAX << bita
+ let (block_a, bit_a) = Self::bit_index(start);
+ let (block_b, bit_b) = Self::bit_index(end);
+ if block_a == block_b {
+ // First set all bits except the first `bit_a`,
+ // then unset the last `64 - bit_b` bits.
+ let range = if bit_b == 0 {
+ u64::MAX << bit_a
} else {
- (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+ (u64::MAX << bit_a) & (u64::MAX >> (64 - bit_b))
};
if new_state {
- self.blocks[blocka] |= range;
+ self.blocks[block_a] |= range;
} else {
- self.blocks[blocka] &= !range;
+ self.blocks[block_a] &= !range;
}
return;
}
// across block boundaries
if new_state {
- // Set `bita..64` to `1`.
- self.blocks[blocka] |= u64::MAX << bita;
- // Set `0..bitb` to `1`.
- if bitb != 0 {
- self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+ // Set `bit_a..64` to `1`.
+ self.blocks[block_a] |= u64::MAX << bit_a;
+ // Set `0..bit_b` to `1`.
+ if bit_b != 0 {
+ self.blocks[block_b] |= u64::MAX >> (64 - bit_b);
}
// Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
+ for block in (block_a + 1)..block_b {
self.blocks[block] = u64::MAX;
}
} else {
- // Set `bita..64` to `0`.
- self.blocks[blocka] &= !(u64::MAX << bita);
- // Set `0..bitb` to `0`.
- if bitb != 0 {
- self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+ // Set `bit_a..64` to `0`.
+ self.blocks[block_a] &= !(u64::MAX << bit_a);
+ // Set `0..bit_b` to `0`.
+ if bit_b != 0 {
+ self.blocks[block_b] &= !(u64::MAX >> (64 - bit_b));
}
// Fill in all the other blocks (much faster than one bit at a time).
- for block in (blocka + 1)..blockb {
+ for block in (block_a + 1)..block_b {
self.blocks[block] = 0;
}
}
}
#[inline]
- pub fn get(&self, i: Size) -> bool {
+ fn get(&self, i: Size) -> bool {
let (block, bit) = Self::bit_index(i);
(self.blocks[block] & (1 << bit)) != 0
}
- fn grow(&mut self, amount: Size, new_state: bool) {
+ fn grow(&mut self, len: Size, amount: Size, new_state: bool) {
if amount.bytes() == 0 {
return;
}
let unused_trailing_bits =
- u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - len.bytes();
+
+ // If there's not enough capacity in the currently allocated blocks, allocate some more.
if amount.bytes() > unused_trailing_bits {
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
- self.blocks.extend(
- // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
- iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
- );
+
+ // We allocate the blocks to the correct value for the requested init state, so we won't
+ // have to manually set them with another write.
+ let block = if new_state { u64::MAX } else { 0 };
+ self.blocks
+ .extend(iter::repeat(block).take(usize::try_from(additional_blocks).unwrap()));
+ }
+
+ // New blocks have already been set here, so we only need to set the unused trailing bits,
+ // if any.
+ if unused_trailing_bits > 0 {
+ let in_bounds_tail = Size::from_bytes(unused_trailing_bits);
+ self.set_range_inbounds(len, len + in_bounds_tail, new_state); // `Size` operation
}
- let start = self.len;
- self.len += amount;
- self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
}
/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
@@ -188,7 +347,7 @@ impl InitMask {
/// ```
/// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
fn find_bit_fast(
- init_mask: &InitMask,
+ init_mask: &InitMaskMaterialized,
start: Size,
end: Size,
is_init: bool,
@@ -223,7 +382,7 @@ impl InitMask {
None
} else {
let bit = bits.trailing_zeros();
- Some(InitMask::size_from_bit_index(block, bit))
+ Some(InitMaskMaterialized::size_from_bit_index(block, bit))
}
}
@@ -253,9 +412,9 @@ impl InitMask {
// This provides the desired behavior of searching blocks 0 and 1 for (a),
// and searching only block 0 for (b).
// There is no concern of overflows since we checked for `start >= end` above.
- let (start_block, start_bit) = InitMask::bit_index(start);
+ let (start_block, start_bit) = InitMaskMaterialized::bit_index(start);
let end_inclusive = Size::from_bytes(end.bytes() - 1);
- let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+ let (end_block_inclusive, _) = InitMaskMaterialized::bit_index(end_inclusive);
// Handle first block: need to skip `start_bit` bits.
//
@@ -340,7 +499,7 @@ impl InitMask {
#[cfg_attr(not(debug_assertions), allow(dead_code))]
fn find_bit_slow(
- init_mask: &InitMask,
+ init_mask: &InitMaskMaterialized,
start: Size,
end: Size,
is_init: bool,
@@ -436,10 +595,19 @@ impl<'a> Iterator for InitChunkIter<'a> {
return None;
}
- let end_of_chunk =
- self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ let end_of_chunk = match self.init_mask.blocks {
+ InitMaskBlocks::Lazy { .. } => {
+ // If we're iterating over the chunks of lazy blocks, we just emit a single
+ // full-size chunk.
+ self.end
+ }
+ InitMaskBlocks::Materialized(ref blocks) => {
+ let end_of_chunk =
+ blocks.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ end_of_chunk
+ }
+ };
let range = self.start..end_of_chunk;
-
let ret =
Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
@@ -504,17 +672,19 @@ impl InitMask {
/// Applies multiple instances of the run-length encoding to the initialization mask.
pub fn apply_copy(&mut self, defined: InitCopy, range: AllocRange, repeat: u64) {
- // An optimization where we can just overwrite an entire range of initialization
- // bits if they are going to be uniformly `1` or `0`.
+ // An optimization where we can just overwrite an entire range of initialization bits if
+ // they are going to be uniformly `1` or `0`. If this happens to be a full-range overwrite,
+ // we won't need materialized blocks either.
if defined.ranges.len() <= 1 {
- self.set_range_inbounds(
- range.start,
- range.start + range.size * repeat, // `Size` operations
- defined.initial,
- );
+ let start = range.start;
+ let end = range.start + range.size * repeat; // `Size` operations
+ self.set_range(AllocRange::from(start..end), defined.initial);
return;
}
+ // We're about to do one or more partial writes, so we ensure the blocks are materialized.
+ let blocks = self.materialize_blocks();
+
for mut j in 0..repeat {
j *= range.size.bytes();
j += range.start.bytes();
@@ -522,7 +692,7 @@ impl InitMask {
for range in &defined.ranges {
let old_j = j;
j += range;
- self.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
+ blocks.set_range_inbounds(Size::from_bytes(old_j), Size::from_bytes(j), cur);
cur = !cur;
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask/tests.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask/tests.rs
new file mode 100644
index 000000000..1a7934bc2
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask/tests.rs
@@ -0,0 +1,195 @@
+use super::*;
+use crate::mir::interpret::alloc_range;
+
+#[test]
+fn uninit_mask() {
+ let mut mask = InitMask::new(Size::from_bytes(500), false);
+ assert!(!mask.get(Size::from_bytes(499)));
+ mask.set_range(alloc_range(Size::from_bytes(499), Size::from_bytes(1)), true);
+ assert!(mask.get(Size::from_bytes(499)));
+ mask.set_range((100..256).into(), true);
+ for i in 0..100 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+ for i in 100..256 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+ for i in 256..499 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+}
+
+/// Returns the number of materialized blocks for this mask.
+fn materialized_block_count(mask: &InitMask) -> usize {
+ match mask.blocks {
+ InitMaskBlocks::Lazy { .. } => 0,
+ InitMaskBlocks::Materialized(ref blocks) => blocks.blocks.len(),
+ }
+}
+
+#[test]
+fn materialize_mask_within_range() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), false);
+ assert_eq!(materialized_block_count(&mask), 0);
+
+ // Forces materialization, but doesn't require growth. This is case #1 documented in the
+ // `set_range` method.
+ mask.set_range((8..16).into(), true);
+ assert_eq!(materialized_block_count(&mask), 1);
+
+ for i in 0..8 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+ for i in 8..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+}
+
+#[test]
+fn grow_within_unused_bits_with_full_overwrite() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ for i in 0..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ // Grow without requiring an additional block. Full overwrite.
+ // This can be fully handled without materialization.
+ let range = (0..32).into();
+ mask.set_range(range, true);
+
+ for i in 0..32 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ assert_eq!(materialized_block_count(&mask), 0);
+}
+
+// This test checks that an initmask's spare capacity is correctly used when growing within block
+// capacity. This can be fully handled without materialization.
+#[test]
+fn grow_same_state_within_unused_bits() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ for i in 0..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ // Grow without requiring an additional block. The gap between the current length and the
+ // range's beginning should be set to the same value as the range.
+ let range = (24..32).into();
+ mask.set_range(range, true);
+
+ // We want to make sure the unused bits in the first block are correct
+ for i in 16..24 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ for i in 24..32 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ assert_eq!(1, mask.range_as_init_chunks((0..32).into()).count());
+ assert_eq!(materialized_block_count(&mask), 0);
+}
+
+// This is the same test as `grow_same_state_within_unused_bits` but with both init and uninit
+// states: this forces materialization; otherwise the mask could stay lazy even when needing to
+// grow.
+#[test]
+fn grow_mixed_state_within_unused_bits() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ for i in 0..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ // Grow without requiring an additional block. The gap between the current length and the
+ // range's beginning should be set to the same value as the range. Note: since this is fully
+ // out-of-bounds of the current mask, this is case #3 described in the `set_range` method.
+ let range = (24..32).into();
+ mask.set_range(range, false);
+
+ // We want to make sure the unused bits in the first block are correct
+ for i in 16..24 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+
+ for i in 24..32 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+
+ assert_eq!(1, mask.range_as_init_chunks((0..16).into()).count());
+ assert_eq!(2, mask.range_as_init_chunks((0..32).into()).count());
+ assert_eq!(materialized_block_count(&mask), 1);
+}
+
+// This is similar to `grow_mixed_state_within_unused_bits` to force materialization, but the range
+// to set partially overlaps the mask, so this requires a different growth + write pattern in the
+// mask.
+#[test]
+fn grow_within_unused_bits_with_overlap() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ for i in 0..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+
+ // Grow without requiring an additional block, but leave no gap after the current len. Note:
+ // since this is partially out-of-bounds of the current mask, this is case #2 described in the
+ // `set_range` method.
+ let range = (8..24).into();
+ mask.set_range(range, false);
+
+ // We want to make sure the unused bits in the first block are correct
+ for i in 8..24 {
+ assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
+ }
+
+ assert_eq!(1, mask.range_as_init_chunks((0..8).into()).count());
+ assert_eq!(2, mask.range_as_init_chunks((0..24).into()).count());
+ assert_eq!(materialized_block_count(&mask), 1);
+}
+
+// Force materialization before a full overwrite: the mask can now become lazy.
+#[test]
+fn grow_mixed_state_within_unused_bits_and_full_overwrite() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ let range = (0..16).into();
+ assert!(mask.is_range_initialized(range).is_ok());
+
+ // Force materialization.
+ let range = (8..24).into();
+ mask.set_range(range, false);
+ assert!(mask.is_range_initialized(range).is_err());
+ assert_eq!(materialized_block_count(&mask), 1);
+
+ // Full overwrite, lazy blocks would be enough from now on.
+ let range = (0..32).into();
+ mask.set_range(range, true);
+ assert!(mask.is_range_initialized(range).is_ok());
+
+ assert_eq!(1, mask.range_as_init_chunks((0..32).into()).count());
+ assert_eq!(materialized_block_count(&mask), 0);
+}
+
+// Check that growth outside the current capacity can still be lazy: if the init state doesn't
+// change, we don't need materialized blocks.
+#[test]
+fn grow_same_state_outside_capacity() {
+ // To have spare bits, we use a mask size smaller than its block size of 64.
+ let mut mask = InitMask::new(Size::from_bytes(16), true);
+ for i in 0..16 {
+ assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
+ }
+ assert_eq!(materialized_block_count(&mask), 0);
+
+ // Grow to 10 blocks with the same init state.
+ let range = (24..640).into();
+ mask.set_range(range, true);
+
+ assert_eq!(1, mask.range_as_init_chunks((0..640).into()).count());
+ assert_eq!(materialized_block_count(&mask), 0);
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
index ddd3f3943..318f93e12 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs
@@ -14,7 +14,7 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
#[derive(HashStable)]
pub struct ProvenanceMap<Prov = AllocId> {
/// Provenance in this map applies from the given offset for an entire pointer-size worth of
- /// bytes. Two entires in this map are always at least a pointer size apart.
+ /// bytes. Two entries in this map are always at least a pointer size apart.
ptrs: SortedMap<Size, Prov>,
/// Provenance in this map only applies to the given single byte.
/// This map is disjoint from the previous. It will always be empty when
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs b/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
deleted file mode 100644
index c9c3c50c5..000000000
--- a/compiler/rustc_middle/src/mir/interpret/allocation/tests.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-use super::*;
-
-#[test]
-fn uninit_mask() {
- let mut mask = InitMask::new(Size::from_bytes(500), false);
- assert!(!mask.get(Size::from_bytes(499)));
- mask.set_range(alloc_range(Size::from_bytes(499), Size::from_bytes(1)), true);
- assert!(mask.get(Size::from_bytes(499)));
- mask.set_range((100..256).into(), true);
- for i in 0..100 {
- assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
- }
- for i in 100..256 {
- assert!(mask.get(Size::from_bytes(i)), "{i} should be set");
- }
- for i in 256..499 {
- assert!(!mask.get(Size::from_bytes(i)), "{i} should not be set");
- }
-}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 1766d7a66..1f8b650e3 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -263,7 +263,8 @@ impl AllocDecodingState {
}
pub fn new(data_offsets: Vec<u32>) -> Self {
- let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
+ let decoding_state =
+ std::iter::repeat_with(|| Lock::new(State::Empty)).take(data_offsets.len()).collect();
Self { decoding_state, data_offsets }
}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 99cdb769d..2ea8602af 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -10,7 +10,7 @@ use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable};
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::visit::{TypeVisitable, TypeVisitableExt, TypeVisitor};
-use crate::ty::{self, DefIdTree, List, Ty, TyCtxt};
+use crate::ty::{self, List, Ty, TyCtxt};
use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
@@ -21,13 +21,13 @@ use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_hir::{self, GeneratorKind, ImplicitSelfKind};
use rustc_hir::{self as hir, HirId};
use rustc_session::Session;
-use rustc_target::abi::{Size, VariantIdx};
+use rustc_target::abi::{FieldIdx, Size, VariantIdx};
use polonius_engine::Atom;
pub use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::dominators::Dominators;
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::vec::{Idx, IndexSlice, IndexVec};
use rustc_serialize::{Decodable, Encodable};
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};
@@ -70,12 +70,19 @@ pub use self::pretty::{
};
/// Types for locals
-pub type LocalDecls<'tcx> = IndexVec<Local, LocalDecl<'tcx>>;
+pub type LocalDecls<'tcx> = IndexSlice<Local, LocalDecl<'tcx>>;
pub trait HasLocalDecls<'tcx> {
fn local_decls(&self) -> &LocalDecls<'tcx>;
}
+impl<'tcx> HasLocalDecls<'tcx> for IndexVec<Local, LocalDecl<'tcx>> {
+ #[inline]
+ fn local_decls(&self) -> &LocalDecls<'tcx> {
+ self
+ }
+}
+
impl<'tcx> HasLocalDecls<'tcx> for LocalDecls<'tcx> {
#[inline]
fn local_decls(&self) -> &LocalDecls<'tcx> {
@@ -250,7 +257,7 @@ pub struct Body<'tcx> {
/// The first local is the return value pointer, followed by `arg_count`
/// locals for the function arguments, followed by any user-declared
/// variables and temporaries.
- pub local_decls: LocalDecls<'tcx>,
+ pub local_decls: IndexVec<Local, LocalDecl<'tcx>>,
/// User type annotations.
pub user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
@@ -311,7 +318,7 @@ impl<'tcx> Body<'tcx> {
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
- local_decls: LocalDecls<'tcx>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
arg_count: usize,
var_debug_info: Vec<VarDebugInfo<'tcx>>,
@@ -401,8 +408,6 @@ impl<'tcx> Body<'tcx> {
LocalKind::ReturnPointer
} else if index < self.arg_count + 1 {
LocalKind::Arg
- } else if self.local_decls[local].is_user_variable() {
- LocalKind::Var
} else {
LocalKind::Temp
}
@@ -572,6 +577,13 @@ impl<T> ClearCrossCrate<T> {
}
}
+ pub fn as_mut(&mut self) -> ClearCrossCrate<&mut T> {
+ match self {
+ ClearCrossCrate::Clear => ClearCrossCrate::Clear,
+ ClearCrossCrate::Set(v) => ClearCrossCrate::Set(v),
+ }
+ }
+
pub fn assert_crate_local(self) -> T {
match self {
ClearCrossCrate::Clear => bug!("unwrapping cross-crate data"),
@@ -661,9 +673,7 @@ impl Atom for Local {
/// Classifies locals into categories. See `Body::local_kind`.
#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)]
pub enum LocalKind {
- /// User-declared variable binding.
- Var,
- /// Compiler-introduced temporary.
+ /// User-declared variable binding or compiler-introduced temporary.
Temp,
/// Function argument.
Arg,
@@ -760,7 +770,7 @@ pub struct LocalDecl<'tcx> {
pub mutability: Mutability,
// FIXME(matthewjasper) Don't store in this in `Body`
- pub local_info: Option<Box<LocalInfo<'tcx>>>,
+ pub local_info: ClearCrossCrate<Box<LocalInfo<'tcx>>>,
/// `true` if this is an internal local.
///
@@ -778,13 +788,6 @@ pub struct LocalDecl<'tcx> {
/// generator.
pub internal: bool,
- /// If this local is a temporary and `is_block_tail` is `Some`,
- /// then it is a temporary created for evaluation of some
- /// subexpression of some block's tail expression (with no
- /// intervening statement context).
- // FIXME(matthewjasper) Don't store in this in `Body`
- pub is_block_tail: Option<BlockTailInfo>,
-
/// The type of this local.
pub ty: Ty<'tcx>,
@@ -890,7 +893,7 @@ pub enum LocalInfo<'tcx> {
/// The `BindingForm` is solely used for local diagnostics when generating
/// warnings/errors when compiling the current crate, and therefore it need
/// not be visible across crates.
- User(ClearCrossCrate<BindingForm<'tcx>>),
+ User(BindingForm<'tcx>),
/// A temporary created that references the static with the given `DefId`.
StaticRef { def_id: DefId, is_thread_local: bool },
/// A temporary created that references the const with the given `DefId`
@@ -898,13 +901,23 @@ pub enum LocalInfo<'tcx> {
/// A temporary created during the creation of an aggregate
/// (e.g. a temporary for `foo` in `MyStruct { my_field: foo }`)
AggregateTemp,
+ /// A temporary created for evaluation of some subexpression of some block's tail expression
+ /// (with no intervening statement context).
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ BlockTailTemp(BlockTailInfo),
/// A temporary created during the pass `Derefer` to avoid it's retagging
DerefTemp,
/// A temporary created for borrow checking.
FakeBorrow,
+ /// A local without anything interesting about it.
+ Boring,
}
impl<'tcx> LocalDecl<'tcx> {
+ pub fn local_info(&self) -> &LocalInfo<'tcx> {
+ &self.local_info.as_ref().assert_crate_local()
+ }
+
/// Returns `true` only if local is a binding that can itself be
/// made mutable via the addition of the `mut` keyword, namely
/// something like the occurrences of `x` in:
@@ -913,15 +926,15 @@ impl<'tcx> LocalDecl<'tcx> {
/// - or `match ... { C(x) => ... }`
pub fn can_be_made_mutable(&self) -> bool {
matches!(
- self.local_info,
- Some(box LocalInfo::User(ClearCrossCrate::Set(
+ self.local_info(),
+ LocalInfo::User(
BindingForm::Var(VarBindingForm {
binding_mode: ty::BindingMode::BindByValue(_),
opt_ty_info: _,
opt_match_place: _,
pat_span: _,
}) | BindingForm::ImplicitSelf(ImplicitSelfKind::Imm),
- )))
+ )
)
}
@@ -930,15 +943,15 @@ impl<'tcx> LocalDecl<'tcx> {
/// mutable bindings, but the inverse does not necessarily hold).
pub fn is_nonref_binding(&self) -> bool {
matches!(
- self.local_info,
- Some(box LocalInfo::User(ClearCrossCrate::Set(
+ self.local_info(),
+ LocalInfo::User(
BindingForm::Var(VarBindingForm {
binding_mode: ty::BindingMode::BindByValue(_),
opt_ty_info: _,
opt_match_place: _,
pat_span: _,
}) | BindingForm::ImplicitSelf(_),
- )))
+ )
)
}
@@ -946,38 +959,35 @@ impl<'tcx> LocalDecl<'tcx> {
/// parameter declared by the user.
#[inline]
pub fn is_user_variable(&self) -> bool {
- matches!(self.local_info, Some(box LocalInfo::User(_)))
+ matches!(self.local_info(), LocalInfo::User(_))
}
/// Returns `true` if this is a reference to a variable bound in a `match`
/// expression that is used to access said variable for the guard of the
/// match arm.
pub fn is_ref_for_guard(&self) -> bool {
- matches!(
- self.local_info,
- Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard)))
- )
+ matches!(self.local_info(), LocalInfo::User(BindingForm::RefForGuard))
}
/// Returns `Some` if this is a reference to a static item that is used to
/// access that static.
pub fn is_ref_to_static(&self) -> bool {
- matches!(self.local_info, Some(box LocalInfo::StaticRef { .. }))
+ matches!(self.local_info(), LocalInfo::StaticRef { .. })
}
/// Returns `Some` if this is a reference to a thread-local static item that is used to
/// access that static.
pub fn is_ref_to_thread_local(&self) -> bool {
- match self.local_info {
- Some(box LocalInfo::StaticRef { is_thread_local, .. }) => is_thread_local,
+ match self.local_info() {
+ LocalInfo::StaticRef { is_thread_local, .. } => *is_thread_local,
_ => false,
}
}
/// Returns `true` if this is a DerefTemp
pub fn is_deref_temp(&self) -> bool {
- match self.local_info {
- Some(box LocalInfo::DerefTemp) => return true,
+ match self.local_info() {
+ LocalInfo::DerefTemp => return true,
_ => (),
}
return false;
@@ -1001,9 +1011,8 @@ impl<'tcx> LocalDecl<'tcx> {
pub fn with_source_info(ty: Ty<'tcx>, source_info: SourceInfo) -> Self {
LocalDecl {
mutability: Mutability::Mut,
- local_info: None,
+ local_info: ClearCrossCrate::Set(Box::new(LocalInfo::Boring)),
internal: false,
- is_block_tail: None,
ty,
user_ty: None,
source_info,
@@ -1023,20 +1032,11 @@ impl<'tcx> LocalDecl<'tcx> {
self.mutability = Mutability::Not;
self
}
-
- /// Converts `self` into same `LocalDecl` except tagged as internal temporary.
- #[inline]
- pub fn block_tail(mut self, info: BlockTailInfo) -> Self {
- assert!(self.is_block_tail.is_none());
- self.is_block_tail = Some(info);
- self
- }
}
#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum VarDebugInfoContents<'tcx> {
- /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
- /// based on a `Local`, not a `Static`, and contains no indexing.
+ /// This `Place` only contains projection which satisfy `can_use_in_debuginfo`.
Place(Place<'tcx>),
Const(Constant<'tcx>),
/// The user variable's data is split across several fragments,
@@ -1046,6 +1046,7 @@ pub enum VarDebugInfoContents<'tcx> {
/// the underlying debuginfo feature this relies on.
Composite {
/// Type of the original user variable.
+ /// This cannot contain a union or an enum.
ty: Ty<'tcx>,
/// All the parts of the original user variable, which ended
/// up in disjoint places, due to optimizations.
@@ -1074,17 +1075,16 @@ pub struct VarDebugInfoFragment<'tcx> {
/// Where in the composite user variable this fragment is,
/// represented as a "projection" into the composite variable.
/// At lower levels, this corresponds to a byte/bit range.
- // NOTE(eddyb) there's an unenforced invariant that this contains
- // only `Field`s, and not into `enum` variants or `union`s.
- // FIXME(eddyb) support this for `enum`s by either using DWARF's
+ ///
+ /// This can only contain `PlaceElem::Field`.
+ // FIXME support this for `enum`s by either using DWARF's
// more advanced control-flow features (unsupported by LLVM?)
// to match on the discriminant, or by using custom type debuginfo
// with non-overlapping variants for the composite variable.
pub projection: Vec<PlaceElem<'tcx>>,
/// Where the data for this fragment can be found.
- // NOTE(eddyb) There's an unenforced invariant that this `Place` is
- // contains no indexing (with a non-constant index).
+ /// This `Place` only contains projection which satisfy `can_use_in_debuginfo`.
pub contents: Place<'tcx>,
}
@@ -1115,6 +1115,11 @@ pub struct VarDebugInfo<'tcx> {
/// Where the data for this user variable is to be found.
pub value: VarDebugInfoContents<'tcx>,
+
+ /// When present, indicates what argument number this variable is in the function that it
+ /// originated from (starting from 1). Note, if MIR inlining is enabled, then this is the
+ /// argument number in the original function before it was inlined.
+ pub argument_index: Option<u16>,
}
///////////////////////////////////////////////////////////////////////////
@@ -1274,9 +1279,16 @@ impl<'tcx> BasicBlockData<'tcx> {
}
impl<O> AssertKind<O> {
+ /// Returns true if this an overflow checking assertion controlled by -C overflow-checks.
+ pub fn is_optional_overflow_check(&self) -> bool {
+ use AssertKind::*;
+ use BinOp::*;
+ matches!(self, OverflowNeg(..) | Overflow(Add | Sub | Mul | Shl | Shr, ..))
+ }
+
/// Getting a description does not require `O` to be printable, and does not
/// require allocation.
- /// The caller is expected to handle `BoundsCheck` separately.
+ /// The caller is expected to handle `BoundsCheck` and `MisalignedPointerDereference` separately.
pub fn description(&self) -> &'static str {
use AssertKind::*;
match self {
@@ -1295,7 +1307,9 @@ impl<O> AssertKind<O> {
ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
- BoundsCheck { .. } => bug!("Unexpected AssertKind"),
+ BoundsCheck { .. } | MisalignedPointerDereference { .. } => {
+ bug!("Unexpected AssertKind")
+ }
}
}
@@ -1352,6 +1366,13 @@ impl<O> AssertKind<O> {
Overflow(BinOp::Shl, _, r) => {
write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {:?}", r)
}
+ MisalignedPointerDereference { required, found } => {
+ write!(
+ f,
+ "\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {:?}, {:?}",
+ required, found
+ )
+ }
_ => write!(f, "\"{}\"", self.description()),
}
}
@@ -1396,6 +1417,13 @@ impl<O: fmt::Debug> fmt::Debug for AssertKind<O> {
Overflow(BinOp::Shl, _, r) => {
write!(f, "attempt to shift left by `{:#?}`, which would overflow", r)
}
+ MisalignedPointerDereference { required, found } => {
+ write!(
+ f,
+ "misaligned pointer dereference: address must be a multiple of {:?} but is {:?}",
+ required, found
+ )
+ }
_ => write!(f, "{}", self.description()),
}
}
@@ -1453,6 +1481,9 @@ impl Debug for Statement<'_> {
write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
}
Deinit(ref place) => write!(fmt, "Deinit({:?})", place),
+ PlaceMention(ref place) => {
+ write!(fmt, "PlaceMention({:?})", place)
+ }
AscribeUserType(box (ref place, ref c_ty), ref variance) => {
write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
}
@@ -1508,31 +1539,26 @@ impl<V, T> ProjectionElem<V, T> {
}
/// Returns `true` if this is a `Field` projection with the given index.
- pub fn is_field_to(&self, f: Field) -> bool {
+ pub fn is_field_to(&self, f: FieldIdx) -> bool {
matches!(*self, Self::Field(x, _) if x == f)
}
+
+ /// Returns `true` if this is accepted inside `VarDebugInfoContents::Place`.
+ pub fn can_use_in_debuginfo(&self) -> bool {
+ match self {
+ Self::Deref | Self::Downcast(_, _) | Self::Field(_, _) => true,
+ Self::ConstantIndex { .. }
+ | Self::Index(_)
+ | Self::OpaqueCast(_)
+ | Self::Subslice { .. } => false,
+ }
+ }
}
/// Alias for projections as they appear in `UserTypeProjection`, where we
/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
pub type ProjectionKind = ProjectionElem<(), ()>;
-rustc_index::newtype_index! {
- /// A [newtype'd][wrapper] index type in the MIR [control-flow graph][CFG]
- ///
- /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
- /// rustc can identify that a field projection refers to either two different regions of memory
- /// or the same one between the base and the 'projection element'.
- /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
- ///
- /// [wrapper]: https://rustc-dev-guide.rust-lang.org/appendix/glossary.html#newtype
- /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
- /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
- #[derive(HashStable)]
- #[debug_format = "field[{}]"]
- pub struct Field {}
-}
-
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct PlaceRef<'tcx> {
pub local: Local,
@@ -1775,7 +1801,7 @@ impl SourceScope {
/// from the function that was inlined instead of the function call site.
pub fn lint_root(
self,
- source_scopes: &IndexVec<SourceScope, SourceScopeData<'_>>,
+ source_scopes: &IndexSlice<SourceScope, SourceScopeData<'_>>,
) -> Option<HirId> {
let mut data = &source_scopes[self];
// FIXME(oli-obk): we should be able to just walk the `inlined_parent_scope`, but it
@@ -1795,7 +1821,7 @@ impl SourceScope {
#[inline]
pub fn inlined_instance<'tcx>(
self,
- source_scopes: &IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ source_scopes: &IndexSlice<SourceScope, SourceScopeData<'tcx>>,
) -> Option<ty::Instance<'tcx>> {
let scope_data = &source_scopes[self];
if let Some((inlined_instance, _)) = scope_data.inlined {
@@ -1963,7 +1989,8 @@ impl<'tcx> Rvalue<'tcx> {
| CastKind::PtrToPtr
| CastKind::Pointer(_)
| CastKind::PointerFromExposedAddress
- | CastKind::DynStar,
+ | CastKind::DynStar
+ | CastKind::Transmute,
_,
_,
)
@@ -1979,6 +2006,13 @@ impl<'tcx> Rvalue<'tcx> {
}
impl BorrowKind {
+ pub fn mutability(&self) -> Mutability {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => Mutability::Not,
+ BorrowKind::Mut { .. } => Mutability::Mut,
+ }
+ }
+
pub fn allows_two_phase_borrow(&self) -> bool {
match *self {
BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => false,
@@ -1995,13 +2029,6 @@ impl BorrowKind {
}
}
-impl BinOp {
- pub fn is_checkable(self) -> bool {
- use self::BinOp::*;
- matches!(self, Add | Sub | Mul | Shl | Shr)
- }
-}
-
impl<'tcx> Debug for Rvalue<'tcx> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
use self::Rvalue::*;
@@ -2528,7 +2555,7 @@ impl<'tcx> ConstantKind<'tcx> {
let parent_substs = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
&& let Some(parent_did) = parent_hir_id.as_owner()
{
- InternalSubsts::identity_for_item(tcx, parent_did.to_def_id())
+ InternalSubsts::identity_for_item(tcx, parent_did)
} else {
List::empty()
};
@@ -2557,7 +2584,7 @@ impl<'tcx> ConstantKind<'tcx> {
Self::Unevaluated(
UnevaluatedConst {
def: def.to_global(),
- substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+ substs: InternalSubsts::identity_for_item(tcx, def.did),
promoted: None,
},
ty,
@@ -2687,12 +2714,17 @@ impl<'tcx> UserTypeProjections {
self.map_projections(|pat_ty_proj| pat_ty_proj.deref())
}
- pub fn leaf(self, field: Field) -> Self {
+ pub fn leaf(self, field: FieldIdx) -> Self {
self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field))
}
- pub fn variant(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx, field: Field) -> Self {
- self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field))
+ pub fn variant(
+ self,
+ adt_def: AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ field_index: FieldIdx,
+ ) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field_index))
}
}
@@ -2735,7 +2767,7 @@ impl UserTypeProjection {
self
}
- pub(crate) fn leaf(mut self, field: Field) -> Self {
+ pub(crate) fn leaf(mut self, field: FieldIdx) -> Self {
self.projs.push(ProjectionElem::Field(field, ()));
self
}
@@ -2744,13 +2776,13 @@ impl UserTypeProjection {
mut self,
adt_def: AdtDef<'_>,
variant_index: VariantIdx,
- field: Field,
+ field_index: FieldIdx,
) -> Self {
self.projs.push(ProjectionElem::Downcast(
Some(adt_def.variant(variant_index).name),
variant_index,
));
- self.projs.push(ProjectionElem::Field(field, ()));
+ self.projs.push(ProjectionElem::Field(field_index, ()));
self
}
}
@@ -3085,7 +3117,7 @@ mod size_asserts {
use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start
static_assert_size!(BasicBlockData<'_>, 144);
- static_assert_size!(LocalDecl<'_>, 56);
+ static_assert_size!(LocalDecl<'_>, 40);
static_assert_size!(Statement<'_>, 32);
static_assert_size!(StatementKind<'_>, 16);
static_assert_size!(Terminator<'_>, 112);
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index 7a05ee2ff..f592f1515 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -381,7 +381,9 @@ impl<'tcx> CodegenUnit<'tcx> {
| InstanceDef::Virtual(..)
| InstanceDef::ClosureOnceShim { .. }
| InstanceDef::DropGlue(..)
- | InstanceDef::CloneShim(..) => None,
+ | InstanceDef::CloneShim(..)
+ | InstanceDef::ThreadLocalShim(..)
+ | InstanceDef::FnPtrAddrShim(..) => None,
}
}
MonoItem::Static(def_id) => def_id.as_local().map(Idx::index),
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
index 24fe3b472..f62853c3e 100644
--- a/compiler/rustc_middle/src/mir/patch.rs
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -12,6 +12,9 @@ pub struct MirPatch<'tcx> {
new_statements: Vec<(Location, StatementKind<'tcx>)>,
new_locals: Vec<LocalDecl<'tcx>>,
resume_block: Option<BasicBlock>,
+ // Only for unreachable in cleanup path.
+ unreachable_cleanup_block: Option<BasicBlock>,
+ terminate_block: Option<BasicBlock>,
body_span: Span,
next_local: usize,
}
@@ -25,14 +28,31 @@ impl<'tcx> MirPatch<'tcx> {
new_locals: vec![],
next_local: body.local_decls.len(),
resume_block: None,
+ unreachable_cleanup_block: None,
+ terminate_block: None,
body_span: body.span,
};
- // Check if we already have a resume block
for (bb, block) in body.basic_blocks.iter_enumerated() {
+ // Check if we already have a resume block
if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
result.resume_block = Some(bb);
- break;
+ continue;
+ }
+
+ // Check if we already have an unreachable block
+ if let TerminatorKind::Unreachable = block.terminator().kind
+ && block.statements.is_empty()
+ && block.is_cleanup
+ {
+ result.unreachable_cleanup_block = Some(bb);
+ continue;
+ }
+
+ // Check if we already have a terminate block
+ if let TerminatorKind::Terminate = block.terminator().kind && block.statements.is_empty() {
+ result.terminate_block = Some(bb);
+ continue;
}
}
@@ -56,6 +76,40 @@ impl<'tcx> MirPatch<'tcx> {
bb
}
+ pub fn unreachable_cleanup_block(&mut self) -> BasicBlock {
+ if let Some(bb) = self.unreachable_cleanup_block {
+ return bb;
+ }
+
+ let bb = self.new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(self.body_span),
+ kind: TerminatorKind::Unreachable,
+ }),
+ is_cleanup: true,
+ });
+ self.unreachable_cleanup_block = Some(bb);
+ bb
+ }
+
+ pub fn terminate_block(&mut self) -> BasicBlock {
+ if let Some(bb) = self.terminate_block {
+ return bb;
+ }
+
+ let bb = self.new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(self.body_span),
+ kind: TerminatorKind::Terminate,
+ }),
+ is_cleanup: true,
+ });
+ self.terminate_block = Some(bb);
+ bb
+ }
+
pub fn is_patched(&self, bb: BasicBlock) -> bool {
self.patch_map[bb].is_some()
}
@@ -72,28 +126,28 @@ impl<'tcx> MirPatch<'tcx> {
&mut self,
ty: Ty<'tcx>,
span: Span,
- local_info: Option<Box<LocalInfo<'tcx>>>,
+ local_info: LocalInfo<'tcx>,
) -> Local {
let index = self.next_local;
self.next_local += 1;
let mut new_decl = LocalDecl::new(ty, span).internal();
- new_decl.local_info = local_info;
+ **new_decl.local_info.as_mut().assert_crate_local() = local_info;
self.new_locals.push(new_decl);
- Local::new(index as usize)
+ Local::new(index)
}
pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
let index = self.next_local;
self.next_local += 1;
self.new_locals.push(LocalDecl::new(ty, span));
- Local::new(index as usize)
+ Local::new(index)
}
pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
let index = self.next_local;
self.next_local += 1;
self.new_locals.push(LocalDecl::new(ty, span).internal());
- Local::new(index as usize)
+ Local::new(index)
}
pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index d8829e3e7..7e5195359 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -123,6 +123,7 @@ fn dump_matched_mir_node<'tcx, F>(
// see notes on #41697 above
let def_path =
ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id()));
+ // ignore-tidy-odd-backticks the literal below is fine
write!(file, "// MIR for `{}", def_path)?;
match body.source.promoted {
None => write!(file, "`")?,
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index b964c1852..cfdf1dcf5 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -2,20 +2,20 @@
use crate::mir::{Body, ConstantKind, Promoted};
use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
-use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::vec_map::VecMap;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::unord::UnordSet;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_index::bit_set::BitMatrix;
use rustc_index::vec::{Idx, IndexVec};
use rustc_span::Span;
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{FieldIdx, VariantIdx};
use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
-use super::{Field, SourceInfo};
+use super::SourceInfo;
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {
@@ -123,7 +123,7 @@ pub struct UnsafetyCheckResult {
pub violations: Vec<UnsafetyViolation>,
/// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
- pub used_unsafe_blocks: FxHashSet<hir::HirId>,
+ pub used_unsafe_blocks: UnordSet<hir::HirId>,
/// This is `Some` iff the item is not a closure.
pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>,
@@ -152,7 +152,7 @@ pub struct GeneratorLayout<'tcx> {
/// Which of the above fields are in each variant. Note that one field may
/// be stored in multiple variants.
- pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
+ pub variant_fields: IndexVec<VariantIdx, IndexVec<FieldIdx, GeneratorSavedLocal>>,
/// The source that led to each variant being created (usually, a yield or
/// await).
@@ -227,9 +227,9 @@ pub struct BorrowCheckResult<'tcx> {
/// All the opaque types that are restricted to concrete types
/// by this function. Unlike the value in `TypeckResults`, this has
/// unerased regions.
- pub concrete_opaque_types: VecMap<LocalDefId, OpaqueHiddenType<'tcx>>,
+ pub concrete_opaque_types: FxIndexMap<LocalDefId, OpaqueHiddenType<'tcx>>,
pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
- pub used_mut_upvars: SmallVec<[Field; 8]>,
+ pub used_mut_upvars: SmallVec<[FieldIdx; 8]>,
pub tainted_by_errors: Option<ErrorGuaranteed>,
}
@@ -353,7 +353,7 @@ pub enum ConstraintCategory<'tcx> {
/// like `Foo { field: my_val }`)
Usage,
OpaqueType,
- ClosureUpvar(Field),
+ ClosureUpvar(FieldIdx),
/// A constraint from a user-written predicate
/// with the provided span, written on the item
@@ -375,7 +375,7 @@ pub enum ConstraintCategory<'tcx> {
#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)]
pub enum ReturnConstraint {
Normal,
- ClosureUpvar(Field),
+ ClosureUpvar(FieldIdx),
}
/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
@@ -411,10 +411,8 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> {
pub fn bind(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Self {
let inner = tcx.fold_regions(ty, |r, depth| match r.kind() {
ty::ReVar(vid) => {
- let br = ty::BoundRegion {
- var: ty::BoundVar::new(vid.index()),
- kind: ty::BrAnon(vid.as_u32(), None),
- };
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::new(vid.index()), kind: ty::BrAnon(None) };
tcx.mk_re_late_bound(depth, br)
}
_ => bug!("unexpected region in ClosureOutlivesSubjectTy: {r:?}"),
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
index 28a3b51b7..2165403da 100644
--- a/compiler/rustc_middle/src/mir/spanview.rs
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -247,6 +247,7 @@ pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
StorageLive(..) => "StorageLive",
StorageDead(..) => "StorageDead",
Retag(..) => "Retag",
+ PlaceMention(..) => "PlaceMention",
AscribeUserType(..) => "AscribeUserType",
Coverage(..) => "Coverage",
Intrinsic(..) => "Intrinsic",
@@ -261,11 +262,10 @@ pub fn terminator_kind_name(term: &Terminator<'_>) -> &'static str {
Goto { .. } => "Goto",
SwitchInt { .. } => "SwitchInt",
Resume => "Resume",
- Abort => "Abort",
+ Terminate => "Terminate",
Return => "Return",
Unreachable => "Unreachable",
Drop { .. } => "Drop",
- DropAndReplace { .. } => "DropAndReplace",
Call { .. } => "Call",
Assert { .. } => "Assert",
Yield { .. } => "Yield",
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index ae09562a8..93800d484 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -3,7 +3,7 @@
//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
//! The intention is that this file only contains datatype declarations, no code.
-use super::{BasicBlock, Constant, Field, Local, SwitchTargets, UserTypeProjection};
+use super::{BasicBlock, Constant, Local, SwitchTargets, UserTypeProjection};
use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::traits::Reveal;
@@ -16,7 +16,8 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::def_id::DefId;
use rustc_hir::{self as hir};
use rustc_hir::{self, GeneratorKind};
-use rustc_target::abi::VariantIdx;
+use rustc_index::vec::IndexVec;
+use rustc_target::abi::{FieldIdx, VariantIdx};
use rustc_ast::Mutability;
use rustc_span::def_id::LocalDefId;
@@ -78,7 +79,8 @@ pub enum MirPhase {
/// MIR, this is UB.
/// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same way
/// that Rust itself has them. Where exactly these are is generally subject to change, and so we
- /// don't document this here. Runtime MIR has all retags explicit.
+ /// don't document this here. Runtime MIR has most retags explicit (though implicit retags
+ /// can still occur at `Rvalue::{Ref,AddrOf}`).
/// - Generator bodies: In analysis MIR, locals may actually be behind a pointer that user code has
/// access to. This occurs in generator bodies. Such locals do not behave like other locals,
/// because they eg may be aliased in surprising ways. Runtime MIR has no such special locals -
@@ -133,7 +135,6 @@ pub enum AnalysisPhase {
pub enum RuntimePhase {
/// In addition to the semantic changes, beginning with this phase, the following variants are
/// disallowed:
- /// * [`TerminatorKind::DropAndReplace`]
/// * [`TerminatorKind::Yield`]
/// * [`TerminatorKind::GeneratorDrop`]
/// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
@@ -219,6 +220,11 @@ pub enum BorrowKind {
/// immutable, but not aliasable. This solves the problem. For
/// simplicity, we don't give users the way to express this
/// borrow, it's just used when translating closures.
+ ///
+ // FIXME(#112072): This is wrong. Unique borrows are mutable borrows except
+ // that they do not require their pointee to be marked as a mutable.
+ // They should still be treated as mutable borrows in every other way,
+ // e.g. for variance or overlap checking.
Unique,
/// Data is mutable and not aliasable.
@@ -326,6 +332,15 @@ pub enum StatementKind<'tcx> {
/// Only `RetagKind::Default` and `RetagKind::FnEntry` are permitted.
Retag(RetagKind, Box<Place<'tcx>>),
+ /// This statement exists to preserve a trace of a scrutinee matched against a wildcard binding.
+ /// This is especially useful for `let _ = PLACE;` bindings that desugar to a single
+ /// `PlaceMention(PLACE)`.
+ ///
+ /// When executed at runtime this is a nop.
+ ///
+ /// Disallowed after drop elaboration.
+ PlaceMention(Box<Place<'tcx>>),
+
/// Encodes a user's type ascription. These need to be preserved
/// intact so that NLL can respect them. For example:
/// ```ignore (illustrative)
@@ -505,15 +520,15 @@ pub struct CopyNonOverlapping<'tcx> {
///
/// A note on unwinding: Panics may occur during the execution of some terminators. Depending on the
/// `-C panic` flag, this may either cause the program to abort or the call stack to unwind. Such
-/// terminators have a `cleanup: Option<BasicBlock>` field on them. If stack unwinding occurs, then
-/// once the current function is reached, execution continues at the given basic block, if any. If
-/// `cleanup` is `None` then no cleanup is performed, and the stack continues unwinding. This is
-/// equivalent to the execution of a `Resume` terminator.
+/// terminators have a `unwind: UnwindAction` field on them. If stack unwinding occurs, then
+/// once the current function is reached, an action will be taken based on the `unwind` field.
+/// If the action is `Cleanup`, then the execution continues at the given basic block. If the
+/// action is `Continue` then no cleanup is performed, and the stack continues unwinding.
///
-/// The basic block pointed to by a `cleanup` field must have its `cleanup` flag set. `cleanup`
-/// basic blocks have a couple restrictions:
-/// 1. All `cleanup` fields in them must be `None`.
-/// 2. `Return` terminators are not allowed in them. `Abort` and `Unwind` terminators are.
+/// The basic block pointed to by a `Cleanup` unwind action must have its `cleanup` flag set.
+/// `cleanup` basic blocks have a couple restrictions:
+/// 1. All `unwind` fields in them must be `UnwindAction::Terminate` or `UnwindAction::Unreachable`.
+/// 2. `Return` terminators are not allowed in them. `Terminate` and `Resume` terminators are.
/// 3. All other basic blocks (in the current body) that are reachable from `cleanup` basic blocks
/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
@@ -555,11 +570,11 @@ pub enum TerminatorKind<'tcx> {
/// deaggregation runs.
Resume,
- /// Indicates that the landing pad is finished and that the process should abort.
+ /// Indicates that the landing pad is finished and that the process should terminate.
///
/// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
/// cleanup blocks.
- Abort,
+ Terminate,
/// Returns from the function.
///
@@ -594,44 +609,7 @@ pub enum TerminatorKind<'tcx> {
/// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to
/// > the place or one of its "parents" occurred more recently than a move out of it. This does not
/// > consider indirect assignments.
- Drop { place: Place<'tcx>, target: BasicBlock, unwind: Option<BasicBlock> },
-
- /// Drops the place and assigns a new value to it.
- ///
- /// This first performs the exact same operation as the pre drop-elaboration `Drop` terminator;
- /// it then additionally assigns the `value` to the `place` as if by an assignment statement.
- /// This assignment occurs both in the unwind and the regular code paths. The semantics are best
- /// explained by the elaboration:
- ///
- /// ```ignore (MIR)
- /// BB0 {
- /// DropAndReplace(P <- V, goto BB1, unwind BB2)
- /// }
- /// ```
- ///
- /// becomes
- ///
- /// ```ignore (MIR)
- /// BB0 {
- /// Drop(P, goto BB1, unwind BB2)
- /// }
- /// BB1 {
- /// // P is now uninitialized
- /// P <- V
- /// }
- /// BB2 {
- /// // P is now uninitialized -- its dtor panicked
- /// P <- V
- /// }
- /// ```
- ///
- /// Disallowed after drop elaboration.
- DropAndReplace {
- place: Place<'tcx>,
- value: Operand<'tcx>,
- target: BasicBlock,
- unwind: Option<BasicBlock>,
- },
+ Drop { place: Place<'tcx>, target: BasicBlock, unwind: UnwindAction },
/// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
/// the referred to function. The operand types must match the argument types of the function.
@@ -655,8 +633,8 @@ pub enum TerminatorKind<'tcx> {
destination: Place<'tcx>,
/// Where to go after this call returns. If none, the call necessarily diverges.
target: Option<BasicBlock>,
- /// Cleanups to be done if the call unwinds.
- cleanup: Option<BasicBlock>,
+ /// Action to be taken if the call unwinds.
+ unwind: UnwindAction,
/// `true` if this is from a call in HIR rather than from an overloaded
/// operator. True for overloaded function call.
from_hir_call: bool,
@@ -675,14 +653,13 @@ pub enum TerminatorKind<'tcx> {
/// When overflow checking is disabled and this is run-time MIR (as opposed to compile-time MIR
/// that is used for CTFE), the following variants of this terminator behave as `goto target`:
/// - `OverflowNeg(..)`,
- /// - `Overflow(op, ..)` if op is a "checkable" operation (add, sub, mul, shl, shr, but NOT
- /// div or rem).
+ /// - `Overflow(op, ..)` if op is add, sub, mul, shl, shr, but NOT div or rem.
Assert {
cond: Operand<'tcx>,
expected: bool,
msg: AssertMessage<'tcx>,
target: BasicBlock,
- cleanup: Option<BasicBlock>,
+ unwind: UnwindAction,
},
/// Marks a suspend point.
@@ -748,9 +725,8 @@ pub enum TerminatorKind<'tcx> {
/// in practice, but in order to avoid fragility we want to always
/// consider it in borrowck. We don't want to accept programs which
/// pass borrowck only when `panic=abort` or some assertions are disabled
- /// due to release vs. debug mode builds. This needs to be an `Option` because
- /// of the `remove_noop_landing_pads` and `abort_unwinding_calls` passes.
- unwind: Option<BasicBlock>,
+ /// due to release vs. debug mode builds.
+ unwind: UnwindAction,
},
/// Block ends with an inline assembly block. This is a terminator since
@@ -773,12 +749,31 @@ pub enum TerminatorKind<'tcx> {
/// diverging (InlineAsmOptions::NORETURN).
destination: Option<BasicBlock>,
- /// Cleanup to be done if the inline assembly unwinds. This is present
+ /// Action to be taken if the inline assembly unwinds. This is present
/// if and only if InlineAsmOptions::MAY_UNWIND is set.
- cleanup: Option<BasicBlock>,
+ unwind: UnwindAction,
},
}
+/// Action to be taken when a stack unwind happens.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum UnwindAction {
+ /// No action is to be taken. Continue unwinding.
+ ///
+ /// This is similar to `Cleanup(bb)` where `bb` does nothing but `Resume`, but they are not
+ /// equivalent, as presence of `Cleanup(_)` will make a frame non-POF.
+ Continue,
+ /// Triggers undefined behavior if unwind happens.
+ Unreachable,
+ /// Terminates the execution if unwind happens.
+ ///
+ /// Depending on the platform and situation this may cause a non-unwindable panic or abort.
+ Terminate,
+ /// Cleanups to be done.
+ Cleanup(BasicBlock),
+}
+
/// Information about an assertion failure.
#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum AssertKind<O> {
@@ -789,6 +784,7 @@ pub enum AssertKind<O> {
RemainderByZero(O),
ResumedAfterReturn(GeneratorKind),
ResumedAfterPanic(GeneratorKind),
+ MisalignedPointerDereference { required: O, found: O },
}
#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
@@ -917,7 +913,15 @@ pub struct Place<'tcx> {
#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum ProjectionElem<V, T> {
Deref,
- Field(Field, T),
+
+ /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
+ /// rustc can identify that a field projection refers to either two different regions of memory
+ /// or the same one between the base and the 'projection element'.
+ /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
+ ///
+ /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
+ Field(FieldIdx, T),
+
/// Index into a slice/array.
///
/// Note that this does not also dereference, and so it does not exactly correspond to slice
@@ -1110,11 +1114,7 @@ pub enum Rvalue<'tcx> {
/// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
///
/// For addition, subtraction, and multiplication on integers the error condition is set when
- /// the infinite precision result would be unequal to the actual result.
- ///
- /// For shift operations on integers the error condition is set when the value of right-hand
- /// side is greater than or equal to the number of bits in the type of the left-hand side, or
- /// when the value of right-hand side is negative.
+ /// the infinite precision result would not be equal to the actual result.
///
/// Other combinations of types and operators are unsupported.
CheckedBinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
@@ -1149,7 +1149,7 @@ pub enum Rvalue<'tcx> {
///
/// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After
/// generator lowering, `Generator` aggregate kinds are disallowed too.
- Aggregate(Box<AggregateKind<'tcx>>, Vec<Operand<'tcx>>),
+ Aggregate(Box<AggregateKind<'tcx>>, IndexVec<FieldIdx, Operand<'tcx>>),
/// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
///
@@ -1189,6 +1189,13 @@ pub enum CastKind {
IntToFloat,
PtrToPtr,
FnPtrToPtr,
+ /// Reinterpret the bits of the input as a different type.
+ ///
+ /// MIR is well-formed if the input and output types have different sizes,
+ /// but running a transmute between differently-sized types is UB.
+ ///
+ /// Allowed only in [`MirPhase::Runtime`]; Earlier it's a [`TerminatorKind::Call`].
+ Transmute,
}
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
@@ -1199,11 +1206,11 @@ pub enum AggregateKind<'tcx> {
Tuple,
/// The second field is the variant index. It's equal to 0 for struct
- /// and union expressions. The fourth field is
+ /// and union expressions. The last field is the
/// active field number and is present only for union expressions
/// -- e.g., for a union expression `SomeUnion { c: .. }`, the
/// active field index would identity the field `c`
- Adt(DefId, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<usize>),
+ Adt(DefId, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>),
Closure(DefId, SubstsRef<'tcx>),
Generator(DefId, SubstsRef<'tcx>, hir::Movability),
@@ -1280,7 +1287,7 @@ pub enum BinOp {
mod size_asserts {
use super::*;
// tidy-alphabetical-start
- static_assert_size!(AggregateKind<'_>, 40);
+ static_assert_size!(AggregateKind<'_>, 32);
static_assert_size!(Operand<'_>, 24);
static_assert_size!(Place<'_>, 16);
static_assert_size!(PlaceElem<'_>, 24);
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index 0aa2c500f..4f00abf7f 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -6,7 +6,7 @@
use crate::mir::*;
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir as hir;
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{FieldIdx, VariantIdx};
#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
pub struct PlaceTy<'tcx> {
@@ -33,7 +33,7 @@ impl<'tcx> PlaceTy<'tcx> {
///
/// Note that the resulting type has not been normalized.
#[instrument(level = "debug", skip(tcx), ret)]
- pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: Field) -> Ty<'tcx> {
+ pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: FieldIdx) -> Ty<'tcx> {
match self.ty.kind() {
ty::Adt(adt_def, substs) => {
let variant_def = match self.variant_index {
@@ -43,7 +43,7 @@ impl<'tcx> PlaceTy<'tcx> {
&adt_def.variant(variant_index)
}
};
- let field_def = &variant_def.fields[f.index()];
+ let field_def = &variant_def.fields[f];
field_def.ty(tcx, substs)
}
ty::Tuple(tys) => tys[f.index()],
@@ -61,14 +61,14 @@ impl<'tcx> PlaceTy<'tcx> {
/// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
/// projects `place_ty` onto `elem`, returning the appropriate
/// `Ty` or downcast variant corresponding to that projection.
- /// The `handle_field` callback must map a `Field` to its `Ty`,
+ /// The `handle_field` callback must map a `FieldIdx` to its `Ty`,
/// (which should be trivial when `T` = `Ty`).
pub fn projection_ty_core<V, T>(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
elem: &ProjectionElem<V, T>,
- mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>,
+ mut handle_field: impl FnMut(&Self, FieldIdx, T) -> Ty<'tcx>,
mut handle_opaque_cast: impl FnMut(&Self, T) -> Ty<'tcx>,
) -> PlaceTy<'tcx>
where
@@ -98,7 +98,7 @@ impl<'tcx> PlaceTy<'tcx> {
ty::Array(inner, _) if !from_end => tcx.mk_array(*inner, (to - from) as u64),
ty::Array(inner, size) if from_end => {
let size = size.eval_target_usize(tcx, param_env);
- let len = size - (from as u64) - (to as u64);
+ let len = size - from - to;
tcx.mk_array(*inner, len)
}
_ => bug!("cannot subslice non-array type: `{:?}`", self),
@@ -116,7 +116,7 @@ impl<'tcx> PlaceTy<'tcx> {
}
impl<'tcx> Place<'tcx> {
- pub fn ty_from<D>(
+ pub fn ty_from<D: ?Sized>(
local: Local,
projection: &[PlaceElem<'tcx>],
local_decls: &D,
@@ -132,7 +132,7 @@ impl<'tcx> Place<'tcx> {
})
}
- pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
where
D: HasLocalDecls<'tcx>,
{
@@ -141,7 +141,7 @@ impl<'tcx> Place<'tcx> {
}
impl<'tcx> PlaceRef<'tcx> {
- pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
where
D: HasLocalDecls<'tcx>,
{
@@ -155,7 +155,7 @@ pub enum RvalueInitializationState {
}
impl<'tcx> Rvalue<'tcx> {
- pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
where
D: HasLocalDecls<'tcx>,
{
@@ -164,17 +164,7 @@ impl<'tcx> Rvalue<'tcx> {
Rvalue::Repeat(ref operand, count) => {
tcx.mk_array_with_const_len(operand.ty(local_decls, tcx), count)
}
- Rvalue::ThreadLocalRef(did) => {
- let static_ty = tcx.type_of(did).subst_identity();
- if tcx.is_mutable_static(did) {
- tcx.mk_mut_ptr(static_ty)
- } else if tcx.is_foreign_item(did) {
- tcx.mk_imm_ptr(static_ty)
- } else {
- // FIXME: These things don't *really* have 'static lifetime.
- tcx.mk_imm_ref(tcx.lifetimes.re_static, static_ty)
- }
- }
+ Rvalue::ThreadLocalRef(did) => tcx.thread_local_ptr_ty(did),
Rvalue::Ref(reg, bk, ref place) => {
let place_ty = place.ty(local_decls, tcx).ty;
tcx.mk_ref(reg, ty::TypeAndMut { ty: place_ty, mutbl: bk.to_mutbl_lossy() })
@@ -227,7 +217,7 @@ impl<'tcx> Rvalue<'tcx> {
}
impl<'tcx> Operand<'tcx> {
- pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ pub fn ty<D: ?Sized>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
where
D: HasLocalDecls<'tcx>,
{
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 6e905224c..2c6126cdd 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -1,6 +1,6 @@
use smallvec::SmallVec;
-use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind};
+use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind, UnwindAction};
use rustc_ast::InlineAsmTemplatePiece;
pub use rustc_ast::Mutability;
use rustc_macros::HashStable;
@@ -118,11 +118,11 @@ impl<'tcx> Terminator<'tcx> {
self.kind.successors_mut()
}
- pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ pub fn unwind(&self) -> Option<&UnwindAction> {
self.kind.unwind()
}
- pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ pub fn unwind_mut(&mut self) -> Option<&mut UnwindAction> {
self.kind.unwind_mut()
}
}
@@ -135,35 +135,33 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn successors(&self) -> Successors<'_> {
use self::TerminatorKind::*;
match *self {
- Resume
- | Abort
- | GeneratorDrop
- | Return
- | Unreachable
- | Call { target: None, cleanup: None, .. }
- | InlineAsm { destination: None, cleanup: None, .. } => {
- None.into_iter().chain((&[]).into_iter().copied())
+ Call { target: Some(t), unwind: UnwindAction::Cleanup(ref u), .. }
+ | Yield { resume: t, drop: Some(ref u), .. }
+ | Drop { target: t, unwind: UnwindAction::Cleanup(ref u), .. }
+ | Assert { target: t, unwind: UnwindAction::Cleanup(ref u), .. }
+ | FalseUnwind { real_target: t, unwind: UnwindAction::Cleanup(ref u) }
+ | InlineAsm { destination: Some(t), unwind: UnwindAction::Cleanup(ref u), .. } => {
+ Some(t).into_iter().chain(slice::from_ref(u).into_iter().copied())
}
Goto { target: t }
- | Call { target: None, cleanup: Some(t), .. }
- | Call { target: Some(t), cleanup: None, .. }
+ | Call { target: None, unwind: UnwindAction::Cleanup(t), .. }
+ | Call { target: Some(t), unwind: _, .. }
| Yield { resume: t, drop: None, .. }
- | DropAndReplace { target: t, unwind: None, .. }
- | Drop { target: t, unwind: None, .. }
- | Assert { target: t, cleanup: None, .. }
- | FalseUnwind { real_target: t, unwind: None }
- | InlineAsm { destination: Some(t), cleanup: None, .. }
- | InlineAsm { destination: None, cleanup: Some(t), .. } => {
+ | Drop { target: t, unwind: _, .. }
+ | Assert { target: t, unwind: _, .. }
+ | FalseUnwind { real_target: t, unwind: _ }
+ | InlineAsm { destination: None, unwind: UnwindAction::Cleanup(t), .. }
+ | InlineAsm { destination: Some(t), unwind: _, .. } => {
Some(t).into_iter().chain((&[]).into_iter().copied())
}
- Call { target: Some(t), cleanup: Some(ref u), .. }
- | Yield { resume: t, drop: Some(ref u), .. }
- | DropAndReplace { target: t, unwind: Some(ref u), .. }
- | Drop { target: t, unwind: Some(ref u), .. }
- | Assert { target: t, cleanup: Some(ref u), .. }
- | FalseUnwind { real_target: t, unwind: Some(ref u) }
- | InlineAsm { destination: Some(t), cleanup: Some(ref u), .. } => {
- Some(t).into_iter().chain(slice::from_ref(u).into_iter().copied())
+ Resume
+ | Terminate
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, unwind: _, .. }
+ | InlineAsm { destination: None, unwind: _, .. } => {
+ None.into_iter().chain((&[]).into_iter().copied())
}
SwitchInt { ref targets, .. } => {
None.into_iter().chain(targets.targets.iter().copied())
@@ -177,34 +175,34 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
use self::TerminatorKind::*;
match *self {
- Resume
- | Abort
- | GeneratorDrop
- | Return
- | Unreachable
- | Call { target: None, cleanup: None, .. }
- | InlineAsm { destination: None, cleanup: None, .. } => None.into_iter().chain(&mut []),
+ Call { target: Some(ref mut t), unwind: UnwindAction::Cleanup(ref mut u), .. }
+ | Yield { resume: ref mut t, drop: Some(ref mut u), .. }
+ | Drop { target: ref mut t, unwind: UnwindAction::Cleanup(ref mut u), .. }
+ | Assert { target: ref mut t, unwind: UnwindAction::Cleanup(ref mut u), .. }
+ | FalseUnwind { real_target: ref mut t, unwind: UnwindAction::Cleanup(ref mut u) }
+ | InlineAsm {
+ destination: Some(ref mut t),
+ unwind: UnwindAction::Cleanup(ref mut u),
+ ..
+ } => Some(t).into_iter().chain(slice::from_mut(u)),
Goto { target: ref mut t }
- | Call { target: None, cleanup: Some(ref mut t), .. }
- | Call { target: Some(ref mut t), cleanup: None, .. }
+ | Call { target: None, unwind: UnwindAction::Cleanup(ref mut t), .. }
+ | Call { target: Some(ref mut t), unwind: _, .. }
| Yield { resume: ref mut t, drop: None, .. }
- | DropAndReplace { target: ref mut t, unwind: None, .. }
- | Drop { target: ref mut t, unwind: None, .. }
- | Assert { target: ref mut t, cleanup: None, .. }
- | FalseUnwind { real_target: ref mut t, unwind: None }
- | InlineAsm { destination: Some(ref mut t), cleanup: None, .. }
- | InlineAsm { destination: None, cleanup: Some(ref mut t), .. } => {
+ | Drop { target: ref mut t, unwind: _, .. }
+ | Assert { target: ref mut t, unwind: _, .. }
+ | FalseUnwind { real_target: ref mut t, unwind: _ }
+ | InlineAsm { destination: None, unwind: UnwindAction::Cleanup(ref mut t), .. }
+ | InlineAsm { destination: Some(ref mut t), unwind: _, .. } => {
Some(t).into_iter().chain(&mut [])
}
- Call { target: Some(ref mut t), cleanup: Some(ref mut u), .. }
- | Yield { resume: ref mut t, drop: Some(ref mut u), .. }
- | DropAndReplace { target: ref mut t, unwind: Some(ref mut u), .. }
- | Drop { target: ref mut t, unwind: Some(ref mut u), .. }
- | Assert { target: ref mut t, cleanup: Some(ref mut u), .. }
- | FalseUnwind { real_target: ref mut t, unwind: Some(ref mut u) }
- | InlineAsm { destination: Some(ref mut t), cleanup: Some(ref mut u), .. } => {
- Some(t).into_iter().chain(slice::from_mut(u))
- }
+ Resume
+ | Terminate
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, unwind: _, .. }
+ | InlineAsm { destination: None, unwind: _, .. } => None.into_iter().chain(&mut []),
SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets.targets),
FalseEdge { ref mut real_target, ref mut imaginary_target } => {
Some(real_target).into_iter().chain(slice::from_mut(imaginary_target))
@@ -212,43 +210,41 @@ impl<'tcx> TerminatorKind<'tcx> {
}
}
- pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ pub fn unwind(&self) -> Option<&UnwindAction> {
match *self {
TerminatorKind::Goto { .. }
| TerminatorKind::Resume
- | TerminatorKind::Abort
+ | TerminatorKind::Terminate
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
| TerminatorKind::Yield { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::FalseEdge { .. } => None,
- TerminatorKind::Call { cleanup: ref unwind, .. }
- | TerminatorKind::Assert { cleanup: ref unwind, .. }
- | TerminatorKind::DropAndReplace { ref unwind, .. }
+ TerminatorKind::Call { ref unwind, .. }
+ | TerminatorKind::Assert { ref unwind, .. }
| TerminatorKind::Drop { ref unwind, .. }
| TerminatorKind::FalseUnwind { ref unwind, .. }
- | TerminatorKind::InlineAsm { cleanup: ref unwind, .. } => Some(unwind),
+ | TerminatorKind::InlineAsm { ref unwind, .. } => Some(unwind),
}
}
- pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ pub fn unwind_mut(&mut self) -> Option<&mut UnwindAction> {
match *self {
TerminatorKind::Goto { .. }
| TerminatorKind::Resume
- | TerminatorKind::Abort
+ | TerminatorKind::Terminate
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
| TerminatorKind::Yield { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::FalseEdge { .. } => None,
- TerminatorKind::Call { cleanup: ref mut unwind, .. }
- | TerminatorKind::Assert { cleanup: ref mut unwind, .. }
- | TerminatorKind::DropAndReplace { ref mut unwind, .. }
+ TerminatorKind::Call { ref mut unwind, .. }
+ | TerminatorKind::Assert { ref mut unwind, .. }
| TerminatorKind::Drop { ref mut unwind, .. }
| TerminatorKind::FalseUnwind { ref mut unwind, .. }
- | TerminatorKind::InlineAsm { cleanup: ref mut unwind, .. } => Some(unwind),
+ | TerminatorKind::InlineAsm { ref mut unwind, .. } => Some(unwind),
}
}
@@ -274,11 +270,17 @@ impl<'tcx> Debug for TerminatorKind<'tcx> {
let labels = self.fmt_successor_labels();
assert_eq!(successor_count, labels.len());
- match successor_count {
- 0 => Ok(()),
-
- 1 => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
-
+ let unwind = match self.unwind() {
+ // Not needed or included in successors
+ None | Some(UnwindAction::Continue) | Some(UnwindAction::Cleanup(_)) => None,
+ Some(UnwindAction::Unreachable) => Some("unwind unreachable"),
+ Some(UnwindAction::Terminate) => Some("unwind terminate"),
+ };
+
+ match (successor_count, unwind) {
+ (0, None) => Ok(()),
+ (0, Some(unwind)) => write!(fmt, " -> {}", unwind),
+ (1, None) => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
_ => {
write!(fmt, " -> [")?;
for (i, target) in self.successors().enumerate() {
@@ -287,6 +289,9 @@ impl<'tcx> Debug for TerminatorKind<'tcx> {
}
write!(fmt, "{}: {:?}", labels[i], target)?;
}
+ if let Some(unwind) = unwind {
+ write!(fmt, ", {unwind}")?;
+ }
write!(fmt, "]")
}
}
@@ -305,13 +310,10 @@ impl<'tcx> TerminatorKind<'tcx> {
Return => write!(fmt, "return"),
GeneratorDrop => write!(fmt, "generator_drop"),
Resume => write!(fmt, "resume"),
- Abort => write!(fmt, "abort"),
+ Terminate => write!(fmt, "abort"),
Yield { value, resume_arg, .. } => write!(fmt, "{:?} = yield({:?})", resume_arg, value),
Unreachable => write!(fmt, "unreachable"),
Drop { place, .. } => write!(fmt, "drop({:?})", place),
- DropAndReplace { place, value, .. } => {
- write!(fmt, "replace({:?} <- {:?})", place, value)
- }
Call { func, args, destination, .. } => {
write!(fmt, "{:?} = ", destination)?;
write!(fmt, "{:?}(", func)?;
@@ -387,7 +389,7 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
use self::TerminatorKind::*;
match *self {
- Return | Resume | Abort | Unreachable | GeneratorDrop => vec![],
+ Return | Resume | Terminate | Unreachable | GeneratorDrop => vec![],
Goto { .. } => vec!["".into()],
SwitchInt { ref targets, .. } => targets
.values
@@ -395,31 +397,35 @@ impl<'tcx> TerminatorKind<'tcx> {
.map(|&u| Cow::Owned(u.to_string()))
.chain(iter::once("otherwise".into()))
.collect(),
- Call { target: Some(_), cleanup: Some(_), .. } => {
+ Call { target: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
vec!["return".into(), "unwind".into()]
}
- Call { target: Some(_), cleanup: None, .. } => vec!["return".into()],
- Call { target: None, cleanup: Some(_), .. } => vec!["unwind".into()],
- Call { target: None, cleanup: None, .. } => vec![],
+ Call { target: Some(_), unwind: _, .. } => vec!["return".into()],
+ Call { target: None, unwind: UnwindAction::Cleanup(_), .. } => vec!["unwind".into()],
+ Call { target: None, unwind: _, .. } => vec![],
Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
Yield { drop: None, .. } => vec!["resume".into()],
- DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => {
- vec!["return".into()]
- }
- DropAndReplace { unwind: Some(_), .. } | Drop { unwind: Some(_), .. } => {
- vec!["return".into(), "unwind".into()]
+ Drop { unwind: UnwindAction::Cleanup(_), .. } => vec!["return".into(), "unwind".into()],
+ Drop { unwind: _, .. } => vec!["return".into()],
+ Assert { unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["success".into(), "unwind".into()]
}
- Assert { cleanup: None, .. } => vec!["".into()],
- Assert { .. } => vec!["success".into(), "unwind".into()],
+ Assert { unwind: _, .. } => vec!["success".into()],
FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
- FalseUnwind { unwind: Some(_), .. } => vec!["real".into(), "cleanup".into()],
- FalseUnwind { unwind: None, .. } => vec!["real".into()],
- InlineAsm { destination: Some(_), cleanup: Some(_), .. } => {
+ FalseUnwind { unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["real".into(), "unwind".into()]
+ }
+ FalseUnwind { unwind: _, .. } => vec!["real".into()],
+ InlineAsm { destination: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
vec!["return".into(), "unwind".into()]
}
- InlineAsm { destination: Some(_), cleanup: None, .. } => vec!["return".into()],
- InlineAsm { destination: None, cleanup: Some(_), .. } => vec!["unwind".into()],
- InlineAsm { destination: None, cleanup: None, .. } => vec![],
+ InlineAsm { destination: Some(_), unwind: _, .. } => {
+ vec!["return".into()]
+ }
+ InlineAsm { destination: None, unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["unwind".into()]
+ }
+ InlineAsm { destination: None, unwind: _, .. } => vec![],
}
}
}
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
index f37222cb2..7d247eeb6 100644
--- a/compiler/rustc_middle/src/mir/traversal.rs
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -101,7 +101,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
///
/// A Postorder traversal of this graph is `D B C A` or `D C B A`
pub struct Postorder<'a, 'tcx> {
- basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>,
visited: BitSet<BasicBlock>,
visit_stack: Vec<(BasicBlock, Successors<'a>)>,
root_is_start_block: bool,
@@ -109,7 +109,7 @@ pub struct Postorder<'a, 'tcx> {
impl<'a, 'tcx> Postorder<'a, 'tcx> {
pub fn new(
- basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>,
root: BasicBlock,
) -> Postorder<'a, 'tcx> {
let mut po = Postorder {
@@ -178,17 +178,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
// When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
// since we've already visited `E`, that child isn't added to the stack. The last
// two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A]
- loop {
- let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() {
- if let Some(bb) = iter.next() {
- bb
- } else {
- break;
- }
- } else {
- break;
- };
-
+ while let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() && let Some(bb) = iter.next() {
if self.visited.insert(bb) {
if let Some(term) = &self.basic_blocks[bb].terminator {
self.visit_stack.push((bb, term.successors()));
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 5c056b299..caa5edc32 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -335,12 +335,14 @@ macro_rules! make_mir_visitor {
ty::InstanceDef::VTableShim(_def_id) |
ty::InstanceDef::ReifyShim(_def_id) |
ty::InstanceDef::Virtual(_def_id, _) |
+ ty::InstanceDef::ThreadLocalShim(_def_id) |
ty::InstanceDef::ClosureOnceShim { call_once: _def_id, track_caller: _ } |
ty::InstanceDef::DropGlue(_def_id, None) => {}
ty::InstanceDef::FnPtrShim(_def_id, ty) |
ty::InstanceDef::DropGlue(_def_id, Some(ty)) |
- ty::InstanceDef::CloneShim(_def_id, ty) => {
+ ty::InstanceDef::CloneShim(_def_id, ty) |
+ ty::InstanceDef::FnPtrAddrShim(_def_id, ty) => {
// FIXME(eddyb) use a better `TyContext` here.
self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
}
@@ -405,6 +407,13 @@ macro_rules! make_mir_visitor {
StatementKind::Retag(kind, place) => {
self.visit_retag($(& $mutability)? *kind, place, location);
}
+ StatementKind::PlaceMention(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::PlaceMention),
+ location
+ );
+ }
StatementKind::AscribeUserType(
box (place, user_ty),
variance
@@ -453,7 +462,7 @@ macro_rules! make_mir_visitor {
match kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
- TerminatorKind::Abort |
+ TerminatorKind::Terminate |
TerminatorKind::GeneratorDrop |
TerminatorKind::Unreachable |
TerminatorKind::FalseEdge { .. } |
@@ -495,26 +504,12 @@ macro_rules! make_mir_visitor {
);
}
- TerminatorKind::DropAndReplace {
- place,
- value,
- target: _,
- unwind: _,
- } => {
- self.visit_place(
- place,
- PlaceContext::MutatingUse(MutatingUseContext::Drop),
- location
- );
- self.visit_operand(value, location);
- }
-
TerminatorKind::Call {
func,
args,
destination,
target: _,
- cleanup: _,
+ unwind: _,
from_hir_call: _,
fn_span: _
} => {
@@ -534,7 +529,7 @@ macro_rules! make_mir_visitor {
expected: _,
msg,
target: _,
- cleanup: _,
+ unwind: _,
} => {
self.visit_operand(cond, location);
self.visit_assert_message(msg, location);
@@ -560,7 +555,7 @@ macro_rules! make_mir_visitor {
options: _,
line_spans: _,
destination: _,
- cleanup: _,
+ unwind: _,
} => {
for op in operands {
match op {
@@ -615,6 +610,10 @@ macro_rules! make_mir_visitor {
ResumedAfterReturn(_) | ResumedAfterPanic(_) => {
// Nothing to visit
}
+ MisalignedPointerDereference { required, found } => {
+ self.visit_operand(required, location);
+ self.visit_operand(found, location);
+ }
}
}
@@ -641,8 +640,8 @@ macro_rules! make_mir_visitor {
BorrowKind::Shallow => PlaceContext::NonMutatingUse(
NonMutatingUseContext::ShallowBorrow
),
- BorrowKind::Unique => PlaceContext::NonMutatingUse(
- NonMutatingUseContext::UniqueBorrow
+ BorrowKind::Unique => PlaceContext::MutatingUse(
+ MutatingUseContext::Borrow
),
BorrowKind::Mut { .. } =>
PlaceContext::MutatingUse(MutatingUseContext::Borrow),
@@ -811,7 +810,6 @@ macro_rules! make_mir_visitor {
source_info,
internal: _,
local_info: _,
- is_block_tail: _,
} = local_decl;
self.visit_ty($(& $mutability)? *ty, TyContext::LocalDecl {
@@ -834,6 +832,7 @@ macro_rules! make_mir_visitor {
name: _,
source_info,
value,
+ argument_index: _,
} = var_debug_info;
self.visit_source_info(source_info);
@@ -1248,8 +1247,6 @@ pub enum NonMutatingUseContext {
SharedBorrow,
/// Shallow borrow.
ShallowBorrow,
- /// Unique borrow.
- UniqueBorrow,
/// AddressOf for *const pointer.
AddressOf,
/// Used as base for another place, e.g., `x` in `x.y`. Will not mutate the place.
@@ -1302,6 +1299,8 @@ pub enum NonUseContext {
AscribeUserTy,
/// The data of a user variable, for debug info.
VarDebugInfo,
+ /// PlaceMention statement.
+ PlaceMention,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
@@ -1323,9 +1322,7 @@ impl PlaceContext {
matches!(
self,
PlaceContext::NonMutatingUse(
- NonMutatingUseContext::SharedBorrow
- | NonMutatingUseContext::ShallowBorrow
- | NonMutatingUseContext::UniqueBorrow
+ NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::ShallowBorrow
) | PlaceContext::MutatingUse(MutatingUseContext::Borrow)
)
}