summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_mir_transform/src/coverage
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_mir_transform/src/coverage
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_mir_transform/src/coverage')
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs437
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs283
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs292
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs107
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs597
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs193
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs44
7 files changed, 786 insertions, 1167 deletions
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index d56d4ad4f..b34ec95b4 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -1,10 +1,6 @@
-use super::Error;
-
use super::graph;
-use super::spans;
use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
-use spans::CoverageSpan;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::WithNumNodes;
@@ -14,14 +10,12 @@ use rustc_middle::mir::coverage::*;
use std::fmt::{self, Debug};
-const NESTED_INDENT: &str = " ";
-
/// The coverage counter or counter expression associated with a particular
/// BCB node or BCB edge.
#[derive(Clone)]
pub(super) enum BcbCounter {
Counter { id: CounterId },
- Expression { id: ExpressionId, lhs: Operand, op: Op, rhs: Operand },
+ Expression { id: ExpressionId },
}
impl BcbCounter {
@@ -29,10 +23,10 @@ impl BcbCounter {
matches!(self, Self::Expression { .. })
}
- pub(super) fn as_operand(&self) -> Operand {
+ pub(super) fn as_term(&self) -> CovTerm {
match *self {
- BcbCounter::Counter { id, .. } => Operand::Counter(id),
- BcbCounter::Expression { id, .. } => Operand::Expression(id),
+ BcbCounter::Counter { id, .. } => CovTerm::Counter(id),
+ BcbCounter::Expression { id, .. } => CovTerm::Expression(id),
}
}
}
@@ -41,17 +35,7 @@ impl Debug for BcbCounter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
- Self::Expression { id, lhs, op, rhs } => write!(
- fmt,
- "Expression({:?}) = {:?} {} {:?}",
- id.index(),
- lhs,
- match op {
- Op::Add => "+",
- Op::Subtract => "-",
- },
- rhs,
- ),
+ Self::Expression { id } => write!(fmt, "Expression({:?})", id.index()),
}
}
}
@@ -60,7 +44,6 @@ impl Debug for BcbCounter {
/// associated with nodes/edges in the BCB graph.
pub(super) struct CoverageCounters {
next_counter_id: CounterId,
- next_expression_id: ExpressionId,
/// Coverage counters/expressions that are associated with individual BCBs.
bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>,
@@ -68,13 +51,12 @@ pub(super) struct CoverageCounters {
/// edge between two BCBs.
bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>,
/// Tracks which BCBs have a counter associated with some incoming edge.
- /// Only used by debug assertions, to verify that BCBs with incoming edge
+ /// Only used by assertions, to verify that BCBs with incoming edge
/// counters do not have their own physical counters (expressions are allowed).
bcb_has_incoming_edge_counters: BitSet<BasicCoverageBlock>,
- /// Expression nodes that are not directly associated with any particular
- /// BCB/edge, but are needed as operands to more complex expressions.
- /// These are always [`BcbCounter::Expression`].
- pub(super) intermediate_expressions: Vec<BcbCounter>,
+ /// Table of expression data, associating each expression ID with its
+ /// corresponding operator (+ or -) and its LHS/RHS operands.
+ expressions: IndexVec<ExpressionId, Expression>,
}
impl CoverageCounters {
@@ -83,24 +65,22 @@ impl CoverageCounters {
Self {
next_counter_id: CounterId::START,
- next_expression_id: ExpressionId::START,
-
bcb_counters: IndexVec::from_elem_n(None, num_bcbs),
bcb_edge_counters: FxHashMap::default(),
bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
- intermediate_expressions: Vec::new(),
+ expressions: IndexVec::new(),
}
}
/// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
- /// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s
+ /// indirectly associated with coverage spans, and accumulates additional `Expression`s
/// representing intermediate values.
pub fn make_bcb_counters(
&mut self,
basic_coverage_blocks: &CoverageGraph,
- coverage_spans: &[CoverageSpan],
- ) -> Result<(), Error> {
- MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans)
+ bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
+ ) {
+ MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(bcb_has_coverage_spans)
}
fn make_counter(&mut self) -> BcbCounter {
@@ -108,50 +88,44 @@ impl CoverageCounters {
BcbCounter::Counter { id }
}
- fn make_expression(&mut self, lhs: Operand, op: Op, rhs: Operand) -> BcbCounter {
- let id = self.next_expression();
- BcbCounter::Expression { id, lhs, op, rhs }
- }
-
- pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter {
- self.make_expression(counter_operand, Op::Add, Operand::Zero)
+ fn make_expression(&mut self, lhs: CovTerm, op: Op, rhs: CovTerm) -> BcbCounter {
+ let id = self.expressions.push(Expression { lhs, op, rhs });
+ BcbCounter::Expression { id }
}
/// Counter IDs start from one and go up.
fn next_counter(&mut self) -> CounterId {
let next = self.next_counter_id;
- self.next_counter_id = next.next_id();
+ self.next_counter_id = self.next_counter_id + 1;
next
}
- /// Expression IDs start from 0 and go up.
- /// (Counter IDs and Expression IDs are distinguished by the `Operand` enum.)
- fn next_expression(&mut self) -> ExpressionId {
- let next = self.next_expression_id;
- self.next_expression_id = next.next_id();
- next
+ pub(super) fn num_counters(&self) -> usize {
+ self.next_counter_id.as_usize()
}
- fn set_bcb_counter(
- &mut self,
- bcb: BasicCoverageBlock,
- counter_kind: BcbCounter,
- ) -> Result<Operand, Error> {
- debug_assert!(
+ #[cfg(test)]
+ pub(super) fn num_expressions(&self) -> usize {
+ self.expressions.len()
+ }
+
+ fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> CovTerm {
+ assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
counter_kind.is_expression() || !self.bcb_has_incoming_edge_counters.contains(bcb),
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
- let operand = counter_kind.as_operand();
+
+ let term = counter_kind.as_term();
if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) {
- Error::from_string(format!(
+ bug!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{bcb:?} already had counter {replaced:?}",
- ))
+ );
} else {
- Ok(operand)
+ term
}
}
@@ -160,27 +134,26 @@ impl CoverageCounters {
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
counter_kind: BcbCounter,
- ) -> Result<Operand, Error> {
- if level_enabled!(tracing::Level::DEBUG) {
- // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
- // have an expression (to be injected into an existing `BasicBlock` represented by this
- // `BasicCoverageBlock`).
- if self.bcb_counter(to_bcb).is_some_and(|c| !c.is_expression()) {
- return Error::from_string(format!(
- "attempt to add an incoming edge counter from {from_bcb:?} when the target BCB already \
- has a `Counter`"
- ));
- }
+ ) -> CovTerm {
+ // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+ // have an expression (to be injected into an existing `BasicBlock` represented by this
+ // `BasicCoverageBlock`).
+ if let Some(node_counter) = self.bcb_counter(to_bcb) && !node_counter.is_expression() {
+ bug!(
+ "attempt to add an incoming edge counter from {from_bcb:?} \
+ when the target BCB already has {node_counter:?}"
+ );
}
+
self.bcb_has_incoming_edge_counters.insert(to_bcb);
- let operand = counter_kind.as_operand();
+ let term = counter_kind.as_term();
if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) {
- Error::from_string(format!(
+ bug!(
"attempt to set an edge counter more than once; from_bcb: \
{from_bcb:?} already had counter {replaced:?}",
- ))
+ );
} else {
- Ok(operand)
+ term
}
}
@@ -188,27 +161,31 @@ impl CoverageCounters {
self.bcb_counters[bcb].as_ref()
}
- pub(super) fn take_bcb_counter(&mut self, bcb: BasicCoverageBlock) -> Option<BcbCounter> {
- self.bcb_counters[bcb].take()
+ pub(super) fn bcb_node_counters(
+ &self,
+ ) -> impl Iterator<Item = (BasicCoverageBlock, &BcbCounter)> {
+ self.bcb_counters
+ .iter_enumerated()
+ .filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?)))
}
- pub(super) fn drain_bcb_counters(
- &mut self,
- ) -> impl Iterator<Item = (BasicCoverageBlock, BcbCounter)> + '_ {
- self.bcb_counters
- .iter_enumerated_mut()
- .filter_map(|(bcb, counter)| Some((bcb, counter.take()?)))
+ /// For each edge in the BCB graph that has an associated counter, yields
+ /// that edge's *from* and *to* nodes, and its counter.
+ pub(super) fn bcb_edge_counters(
+ &self,
+ ) -> impl Iterator<Item = (BasicCoverageBlock, BasicCoverageBlock, &BcbCounter)> {
+ self.bcb_edge_counters
+ .iter()
+ .map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind))
}
- pub(super) fn drain_bcb_edge_counters(
- &mut self,
- ) -> impl Iterator<Item = ((BasicCoverageBlock, BasicCoverageBlock), BcbCounter)> + '_ {
- self.bcb_edge_counters.drain()
+ pub(super) fn take_expressions(&mut self) -> IndexVec<ExpressionId, Expression> {
+ std::mem::take(&mut self.expressions)
}
}
/// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be
-/// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression
+/// injected with coverage spans. `Expressions` have no runtime overhead, so if a viable expression
/// (adding or subtracting two other counters or expressions) can compute the same result as an
/// embedded counter, an `Expression` should be used.
struct MakeBcbCounters<'a> {
@@ -230,21 +207,11 @@ impl<'a> MakeBcbCounters<'a> {
/// One way to predict which branch executes the least is by considering loops. A loop is exited
/// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost
/// always executed less than the branch that does not exit the loop.
- ///
- /// Returns any non-code-span expressions created to represent intermediate values (such as to
- /// add two counters so the result can be subtracted from another counter), or an Error with
- /// message for subsequent debugging.
- fn make_bcb_counters(&mut self, coverage_spans: &[CoverageSpan]) -> Result<(), Error> {
+ fn make_bcb_counters(&mut self, bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool) {
debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock");
- let num_bcbs = self.basic_coverage_blocks.num_nodes();
-
- let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs);
- for covspan in coverage_spans {
- bcbs_with_coverage.insert(covspan.bcb);
- }
// Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated
- // `CoverageSpan`, add a counter. If the `BasicCoverageBlock` branches, add a counter or
+ // coverage span, add a counter. If the `BasicCoverageBlock` branches, add a counter or
// expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming
// edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple
// incoming edges).
@@ -254,39 +221,36 @@ impl<'a> MakeBcbCounters<'a> {
// the loop. The `traversal` state includes a `context_stack`, providing a way to know if
// the current BCB is in one or more nested loops or not.
let mut traversal = TraverseCoverageGraphWithLoops::new(&self.basic_coverage_blocks);
- while let Some(bcb) = traversal.next(self.basic_coverage_blocks) {
- if bcbs_with_coverage.contains(bcb) {
- debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb);
- let branching_counter_operand = self.get_or_make_counter_operand(bcb)?;
+ while let Some(bcb) = traversal.next() {
+ if bcb_has_coverage_spans(bcb) {
+ debug!("{:?} has at least one coverage span. Get or make its counter", bcb);
+ let branching_counter_operand = self.get_or_make_counter_operand(bcb);
if self.bcb_needs_branch_counters(bcb) {
- self.make_branch_counters(&mut traversal, bcb, branching_counter_operand)?;
+ self.make_branch_counters(&traversal, bcb, branching_counter_operand);
}
} else {
debug!(
- "{:?} does not have any `CoverageSpan`s. A counter will only be added if \
+ "{:?} does not have any coverage spans. A counter will only be added if \
and when a covered BCB has an expression dependency.",
bcb,
);
}
}
- if traversal.is_complete() {
- Ok(())
- } else {
- Error::from_string(format!(
- "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}",
- traversal.unvisited(),
- ))
- }
+ assert!(
+ traversal.is_complete(),
+ "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}",
+ traversal.unvisited(),
+ );
}
fn make_branch_counters(
&mut self,
- traversal: &mut TraverseCoverageGraphWithLoops,
+ traversal: &TraverseCoverageGraphWithLoops<'_>,
branching_bcb: BasicCoverageBlock,
- branching_counter_operand: Operand,
- ) -> Result<(), Error> {
+ branching_counter_operand: CovTerm,
+ ) {
let branches = self.bcb_branches(branching_bcb);
debug!(
"{:?} has some branch(es) without counters:\n {}",
@@ -319,10 +283,10 @@ impl<'a> MakeBcbCounters<'a> {
counter",
branch, branching_bcb
);
- self.get_or_make_counter_operand(branch.target_bcb)?
+ self.get_or_make_counter_operand(branch.target_bcb)
} else {
debug!(" {:?} has multiple incoming edges, so adding an edge counter", branch);
- self.get_or_make_edge_counter_operand(branching_bcb, branch.target_bcb)?
+ self.get_or_make_edge_counter_operand(branching_bcb, branch.target_bcb)
};
if let Some(sumup_counter_operand) =
some_sumup_counter_operand.replace(branch_counter_operand)
@@ -333,8 +297,7 @@ impl<'a> MakeBcbCounters<'a> {
sumup_counter_operand,
);
debug!(" [new intermediate expression: {:?}]", intermediate_expression);
- let intermediate_expression_operand = intermediate_expression.as_operand();
- self.coverage_counters.intermediate_expressions.push(intermediate_expression);
+ let intermediate_expression_operand = intermediate_expression.as_term();
some_sumup_counter_operand.replace(intermediate_expression_operand);
}
}
@@ -358,31 +321,18 @@ impl<'a> MakeBcbCounters<'a> {
debug!("{:?} gets an expression: {:?}", expression_branch, expression);
let bcb = expression_branch.target_bcb;
if expression_branch.is_only_path_to_target() {
- self.coverage_counters.set_bcb_counter(bcb, expression)?;
+ self.coverage_counters.set_bcb_counter(bcb, expression);
} else {
- self.coverage_counters.set_bcb_edge_counter(branching_bcb, bcb, expression)?;
+ self.coverage_counters.set_bcb_edge_counter(branching_bcb, bcb, expression);
}
- Ok(())
- }
-
- fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> Result<Operand, Error> {
- self.recursive_get_or_make_counter_operand(bcb, 1)
}
- fn recursive_get_or_make_counter_operand(
- &mut self,
- bcb: BasicCoverageBlock,
- debug_indent_level: usize,
- ) -> Result<Operand, Error> {
+ #[instrument(level = "debug", skip(self))]
+ fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> CovTerm {
// If the BCB already has a counter, return it.
if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
- debug!(
- "{}{:?} already has a counter: {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- bcb,
- counter_kind,
- );
- return Ok(counter_kind.as_operand());
+ debug!("{bcb:?} already has a counter: {counter_kind:?}");
+ return counter_kind.as_term();
}
// A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
@@ -392,20 +342,12 @@ impl<'a> MakeBcbCounters<'a> {
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
let counter_kind = self.coverage_counters.make_counter();
if one_path_to_target {
- debug!(
- "{}{:?} gets a new counter: {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- bcb,
- counter_kind,
- );
+ debug!("{bcb:?} gets a new counter: {counter_kind:?}");
} else {
debug!(
- "{}{:?} has itself as its own predecessor. It can't be part of its own \
- Expression sum, so it will get its own new counter: {:?}. (Note, the compiled \
- code will generate an infinite loop.)",
- NESTED_INDENT.repeat(debug_indent_level),
- bcb,
- counter_kind,
+ "{bcb:?} has itself as its own predecessor. It can't be part of its own \
+ Expression sum, so it will get its own new counter: {counter_kind:?}. \
+ (Note, the compiled code will generate an infinite loop.)",
);
}
return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
@@ -415,24 +357,14 @@ impl<'a> MakeBcbCounters<'a> {
// counters and/or expressions of its incoming edges. This will recursively get or create
// counters for those incoming edges first, then call `make_expression()` to sum them up,
// with additional intermediate expressions as needed.
+ let _sumup_debug_span = debug_span!("(preparing sum-up expression)").entered();
+
let mut predecessors = self.bcb_predecessors(bcb).to_owned().into_iter();
- debug!(
- "{}{:?} has multiple incoming edges and will get an expression that sums them up...",
- NESTED_INDENT.repeat(debug_indent_level),
- bcb,
- );
- let first_edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
- predecessors.next().unwrap(),
- bcb,
- debug_indent_level + 1,
- )?;
+ let first_edge_counter_operand =
+ self.get_or_make_edge_counter_operand(predecessors.next().unwrap(), bcb);
let mut some_sumup_edge_counter_operand = None;
for predecessor in predecessors {
- let edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
- predecessor,
- bcb,
- debug_indent_level + 1,
- )?;
+ let edge_counter_operand = self.get_or_make_edge_counter_operand(predecessor, bcb);
if let Some(sumup_edge_counter_operand) =
some_sumup_edge_counter_operand.replace(edge_counter_operand)
{
@@ -441,13 +373,8 @@ impl<'a> MakeBcbCounters<'a> {
Op::Add,
edge_counter_operand,
);
- debug!(
- "{}new intermediate expression: {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- intermediate_expression
- );
- let intermediate_expression_operand = intermediate_expression.as_operand();
- self.coverage_counters.intermediate_expressions.push(intermediate_expression);
+ debug!("new intermediate expression: {intermediate_expression:?}");
+ let intermediate_expression_operand = intermediate_expression.as_term();
some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
}
}
@@ -456,59 +383,36 @@ impl<'a> MakeBcbCounters<'a> {
Op::Add,
some_sumup_edge_counter_operand.unwrap(),
);
- debug!(
- "{}{:?} gets a new counter (sum of predecessor counters): {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- bcb,
- counter_kind
- );
+ drop(_sumup_debug_span);
+
+ debug!("{bcb:?} gets a new counter (sum of predecessor counters): {counter_kind:?}");
self.coverage_counters.set_bcb_counter(bcb, counter_kind)
}
+ #[instrument(level = "debug", skip(self))]
fn get_or_make_edge_counter_operand(
&mut self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
- ) -> Result<Operand, Error> {
- self.recursive_get_or_make_edge_counter_operand(from_bcb, to_bcb, 1)
- }
-
- fn recursive_get_or_make_edge_counter_operand(
- &mut self,
- from_bcb: BasicCoverageBlock,
- to_bcb: BasicCoverageBlock,
- debug_indent_level: usize,
- ) -> Result<Operand, Error> {
+ ) -> CovTerm {
// If the source BCB has only one successor (assumed to be the given target), an edge
// counter is unnecessary. Just get or make a counter for the source BCB.
let successors = self.bcb_successors(from_bcb).iter();
if successors.len() == 1 {
- return self.recursive_get_or_make_counter_operand(from_bcb, debug_indent_level + 1);
+ return self.get_or_make_counter_operand(from_bcb);
}
// If the edge already has a counter, return it.
if let Some(counter_kind) =
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
{
- debug!(
- "{}Edge {:?}->{:?} already has a counter: {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- from_bcb,
- to_bcb,
- counter_kind
- );
- return Ok(counter_kind.as_operand());
+ debug!("Edge {from_bcb:?}->{to_bcb:?} already has a counter: {counter_kind:?}");
+ return counter_kind.as_term();
}
// Make a new counter to count this edge.
let counter_kind = self.coverage_counters.make_counter();
- debug!(
- "{}Edge {:?}->{:?} gets a new counter: {:?}",
- NESTED_INDENT.repeat(debug_indent_level),
- from_bcb,
- to_bcb,
- counter_kind
- );
+ debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}");
self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
}
@@ -516,21 +420,14 @@ impl<'a> MakeBcbCounters<'a> {
/// found, select any branch.
fn choose_preferred_expression_branch(
&self,
- traversal: &TraverseCoverageGraphWithLoops,
+ traversal: &TraverseCoverageGraphWithLoops<'_>,
branches: &[BcbBranch],
) -> BcbBranch {
- let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
-
- let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches);
- if let Some(reloop_branch_without_counter) =
- some_reloop_branch.filter(branch_needs_a_counter)
- {
- debug!(
- "Selecting reloop_branch={:?} that still needs a counter, to get the \
- `Expression`",
- reloop_branch_without_counter
- );
- reloop_branch_without_counter
+ let good_reloop_branch = self.find_good_reloop_branch(traversal, &branches);
+ if let Some(reloop_branch) = good_reloop_branch {
+ assert!(self.branch_has_no_counter(&reloop_branch));
+ debug!("Selecting reloop branch {reloop_branch:?} to get an expression");
+ reloop_branch
} else {
let &branch_without_counter =
branches.iter().find(|&branch| self.branch_has_no_counter(branch)).expect(
@@ -547,75 +444,52 @@ impl<'a> MakeBcbCounters<'a> {
}
}
- /// At most, one of the branches (or its edge, from the branching_bcb, if the branch has
- /// multiple incoming edges) can have a counter computed by expression.
- ///
- /// If at least one of the branches leads outside of a loop (`found_loop_exit` is
- /// true), and at least one other branch does not exit the loop (the first of which
- /// is captured in `some_reloop_branch`), it's likely any reloop branch will be
- /// executed far more often than loop exit branch, making the reloop branch a better
- /// candidate for an expression.
- fn find_some_reloop_branch(
+ /// Tries to find a branch that leads back to the top of a loop, and that
+ /// doesn't already have a counter. Such branches are good candidates to
+ /// be given an expression (instead of a physical counter), because they
+ /// will tend to be executed more times than a loop-exit branch.
+ fn find_good_reloop_branch(
&self,
- traversal: &TraverseCoverageGraphWithLoops,
+ traversal: &TraverseCoverageGraphWithLoops<'_>,
branches: &[BcbBranch],
) -> Option<BcbBranch> {
- let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
-
- let mut some_reloop_branch: Option<BcbBranch> = None;
- for context in traversal.context_stack.iter().rev() {
- if let Some((backedge_from_bcbs, _)) = &context.loop_backedges {
- let mut found_loop_exit = false;
- for &branch in branches.iter() {
- if backedge_from_bcbs.iter().any(|&backedge_from_bcb| {
- self.bcb_dominates(branch.target_bcb, backedge_from_bcb)
- }) {
- if let Some(reloop_branch) = some_reloop_branch {
- if self.branch_has_no_counter(&reloop_branch) {
- // we already found a candidate reloop_branch that still
- // needs a counter
- continue;
- }
- }
- // The path from branch leads back to the top of the loop. Set this
- // branch as the `reloop_branch`. If this branch already has a
- // counter, and we find another reloop branch that doesn't have a
- // counter yet, that branch will be selected as the `reloop_branch`
- // instead.
- some_reloop_branch = Some(branch);
- } else {
- // The path from branch leads outside this loop
- found_loop_exit = true;
- }
- if found_loop_exit
- && some_reloop_branch.filter(branch_needs_a_counter).is_some()
- {
- // Found both a branch that exits the loop and a branch that returns
- // to the top of the loop (`reloop_branch`), and the `reloop_branch`
- // doesn't already have a counter.
- break;
+ // Consider each loop on the current traversal context stack, top-down.
+ for reloop_bcbs in traversal.reloop_bcbs_per_loop() {
+ let mut all_branches_exit_this_loop = true;
+
+ // Try to find a branch that doesn't exit this loop and doesn't
+ // already have a counter.
+ for &branch in branches {
+ // A branch is a reloop branch if it dominates any BCB that has
+ // an edge back to the loop header. (Other branches are exits.)
+ let is_reloop_branch = reloop_bcbs.iter().any(|&reloop_bcb| {
+ self.basic_coverage_blocks.dominates(branch.target_bcb, reloop_bcb)
+ });
+
+ if is_reloop_branch {
+ all_branches_exit_this_loop = false;
+ if self.branch_has_no_counter(&branch) {
+ // We found a good branch to be given an expression.
+ return Some(branch);
}
+ // Keep looking for another reloop branch without a counter.
+ } else {
+ // This branch exits the loop.
}
- if !found_loop_exit {
- debug!(
- "No branches exit the loop, so any branch without an existing \
- counter can have the `Expression`."
- );
- break;
- }
- if some_reloop_branch.is_some() {
- debug!(
- "Found a branch that exits the loop and a branch the loops back to \
- the top of the loop (`reloop_branch`). The `reloop_branch` will \
- get the `Expression`, as long as it still needs a counter."
- );
- break;
- }
- // else all branches exited this loop context, so run the same checks with
- // the outer loop(s)
}
+
+ if !all_branches_exit_this_loop {
+ // We found one or more reloop branches, but all of them already
+ // have counters. Let the caller choose one of the exit branches.
+ debug!("All reloop branches had counters; skip checking the other loops");
+ return None;
+ }
+
+ // All of the branches exit this loop, so keep looking for a good
+ // reloop branch for one of the outer loops.
}
- some_reloop_branch
+
+ None
}
#[inline]
@@ -661,9 +535,4 @@ impl<'a> MakeBcbCounters<'a> {
fn bcb_has_one_path_to_target(&self, bcb: BasicCoverageBlock) -> bool {
self.bcb_predecessors(bcb).len() <= 1
}
-
- #[inline]
- fn bcb_dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
- self.basic_coverage_blocks.dominates(dom, node)
- }
}
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index ff2254d69..6bab62aa8 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -1,10 +1,12 @@
+use rustc_data_structures::captures::Captures;
use rustc_data_structures::graph::dominators::{self, Dominators};
use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
use rustc_index::bit_set::BitSet;
use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+use rustc_middle::mir::{self, BasicBlock, TerminatorKind};
use std::cmp::Ordering;
+use std::collections::VecDeque;
use std::ops::{Index, IndexMut};
/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
@@ -36,9 +38,8 @@ impl CoverageGraph {
}
let bcb_data = &bcbs[bcb];
let mut bcb_successors = Vec::new();
- for successor in
- bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
- .filter_map(|successor_bb| bb_to_bcb[successor_bb])
+ for successor in bcb_filtered_successors(&mir_body, bcb_data.last_bb())
+ .filter_map(|successor_bb| bb_to_bcb[successor_bb])
{
if !seen[successor] {
seen[successor] = true;
@@ -80,10 +81,9 @@ impl CoverageGraph {
// intentionally omits unwind paths.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
- let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
let mut basic_blocks = Vec::new();
- for (bb, data) in mir_cfg_without_unwind {
+ for bb in short_circuit_preorder(mir_body, bcb_filtered_successors) {
if let Some(last) = basic_blocks.last() {
let predecessors = &mir_body.basic_blocks.predecessors()[bb];
if predecessors.len() > 1 || !predecessors.contains(last) {
@@ -109,7 +109,7 @@ impl CoverageGraph {
}
basic_blocks.push(bb);
- let term = data.terminator();
+ let term = mir_body[bb].terminator();
match term.kind {
TerminatorKind::Return { .. }
@@ -147,7 +147,7 @@ impl CoverageGraph {
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
| TerminatorKind::Call { .. }
- | TerminatorKind::GeneratorDrop
+ | TerminatorKind::CoroutineDrop
| TerminatorKind::Assert { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
@@ -288,9 +288,9 @@ rustc_index::newtype_index! {
/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
/// a `Goto`, and merged with its successor into the same BCB.
///
-/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
+/// Each BCB with at least one computed coverage span will have no more than one `Counter`.
/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
-/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
+/// disjoint coverage spans in a BCB can also be counted by `Expression` (by adding `ZERO`
/// to the BCB's primary counter or expression).
///
/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
@@ -316,11 +316,6 @@ impl BasicCoverageBlockData {
pub fn last_bb(&self) -> BasicBlock {
*self.basic_blocks.last().unwrap()
}
-
- #[inline(always)]
- pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
- &mir_body[self.last_bb()].terminator()
- }
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
@@ -362,26 +357,28 @@ impl std::fmt::Debug for BcbBranch {
}
}
-// Returns the `Terminator`s non-unwind successors.
+// Returns the subset of a block's successors that are relevant to the coverage
+// graph, i.e. those that do not represent unwinds or unreachable branches.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(
body: &'a mir::Body<'tcx>,
- term_kind: &'a TerminatorKind<'tcx>,
-) -> Box<dyn Iterator<Item = BasicBlock> + 'a> {
- Box::new(
- match &term_kind {
- // SwitchInt successors are never unwind, and all of them should be traversed.
- TerminatorKind::SwitchInt { ref targets, .. } => {
- None.into_iter().chain(targets.all_targets().into_iter().copied())
- }
- // For all other kinds, return only the first successor, if any, and ignore unwinds.
- // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
- // `next().into_iter()`) into the `mir::Successors` aliased type.
- _ => term_kind.successors().next().into_iter().chain((&[]).into_iter().copied()),
- }
- .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable),
- )
+ bb: BasicBlock,
+) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx> {
+ let terminator = body[bb].terminator();
+
+ let take_n_successors = match terminator.kind {
+ // SwitchInt successors are never unwinds, so all of them should be traversed.
+ TerminatorKind::SwitchInt { .. } => usize::MAX,
+ // For all other kinds, return only the first successor (if any), ignoring any
+ // unwind successors.
+ _ => 1,
+ };
+
+ terminator
+ .successors()
+ .take(take_n_successors)
+ .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable)
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
@@ -389,57 +386,72 @@ fn bcb_filtered_successors<'a, 'tcx>(
/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
#[derive(Debug)]
pub(super) struct TraversalContext {
- /// From one or more backedges returning to a loop header.
- pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
-
- /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
- /// backedges, such that the loop is the inner inner-most loop containing these
- /// CoverageGraph
- pub worklist: Vec<BasicCoverageBlock>,
+ /// BCB with one or more incoming loop backedges, indicating which loop
+ /// this context is for.
+ ///
+ /// If `None`, this is the non-loop context for the function as a whole.
+ loop_header: Option<BasicCoverageBlock>,
+
+ /// Worklist of BCBs to be processed in this context.
+ worklist: VecDeque<BasicCoverageBlock>,
}
-pub(super) struct TraverseCoverageGraphWithLoops {
- pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
- pub context_stack: Vec<TraversalContext>,
+pub(super) struct TraverseCoverageGraphWithLoops<'a> {
+ basic_coverage_blocks: &'a CoverageGraph,
+
+ backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+ context_stack: Vec<TraversalContext>,
visited: BitSet<BasicCoverageBlock>,
}
-impl TraverseCoverageGraphWithLoops {
- pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
- let start_bcb = basic_coverage_blocks.start_node();
+impl<'a> TraverseCoverageGraphWithLoops<'a> {
+ pub(super) fn new(basic_coverage_blocks: &'a CoverageGraph) -> Self {
let backedges = find_loop_backedges(basic_coverage_blocks);
- let context_stack =
- vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
+
+ let worklist = VecDeque::from([basic_coverage_blocks.start_node()]);
+ let context_stack = vec![TraversalContext { loop_header: None, worklist }];
+
// `context_stack` starts with a `TraversalContext` for the main function context (beginning
// with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
// of the stack as loops are entered, and popped off of the stack when a loop's worklist is
// exhausted.
let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
- Self { backedges, context_stack, visited }
+ Self { basic_coverage_blocks, backedges, context_stack, visited }
}
- pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
+ /// For each loop on the loop context stack (top-down), yields a list of BCBs
+ /// within that loop that have an outgoing edge back to the loop header.
+ pub(super) fn reloop_bcbs_per_loop(&self) -> impl Iterator<Item = &[BasicCoverageBlock]> {
+ self.context_stack
+ .iter()
+ .rev()
+ .filter_map(|context| context.loop_header)
+ .map(|header_bcb| self.backedges[header_bcb].as_slice())
+ }
+
+ pub(super) fn next(&mut self) -> Option<BasicCoverageBlock> {
debug!(
"TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
self.context_stack.iter().rev().collect::<Vec<_>>()
);
while let Some(context) = self.context_stack.last_mut() {
- if let Some(next_bcb) = context.worklist.pop() {
- if !self.visited.insert(next_bcb) {
- debug!("Already visited: {:?}", next_bcb);
+ if let Some(bcb) = context.worklist.pop_front() {
+ if !self.visited.insert(bcb) {
+ debug!("Already visited: {bcb:?}");
continue;
}
- debug!("Visiting {:?}", next_bcb);
- if self.backedges[next_bcb].len() > 0 {
- debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
+ debug!("Visiting {bcb:?}");
+
+ if self.backedges[bcb].len() > 0 {
+ debug!("{bcb:?} is a loop header! Start a new TraversalContext...");
self.context_stack.push(TraversalContext {
- loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
- worklist: Vec::new(),
+ loop_header: Some(bcb),
+ worklist: VecDeque::new(),
});
}
- self.extend_worklist(basic_coverage_blocks, next_bcb);
- return Some(next_bcb);
+ self.add_successors_to_worklists(bcb);
+ return Some(bcb);
} else {
// Strip contexts with empty worklists from the top of the stack
self.context_stack.pop();
@@ -449,13 +461,10 @@ impl TraverseCoverageGraphWithLoops {
None
}
- pub fn extend_worklist(
- &mut self,
- basic_coverage_blocks: &CoverageGraph,
- bcb: BasicCoverageBlock,
- ) {
- let successors = &basic_coverage_blocks.successors[bcb];
+ pub fn add_successors_to_worklists(&mut self, bcb: BasicCoverageBlock) {
+ let successors = &self.basic_coverage_blocks.successors[bcb];
debug!("{:?} has {} successors:", bcb, successors.len());
+
for &successor in successors {
if successor == bcb {
debug!(
@@ -464,56 +473,44 @@ impl TraverseCoverageGraphWithLoops {
bcb
);
// Don't re-add this successor to the worklist. We are already processing it.
+ // FIXME: This claims to skip just the self-successor, but it actually skips
+ // all other successors as well. Does that matter?
break;
}
- for context in self.context_stack.iter_mut().rev() {
- // Add successors of the current BCB to the appropriate context. Successors that
- // stay within a loop are added to the BCBs context worklist. Successors that
- // exit the loop (they are not dominated by the loop header) must be reachable
- // from other BCBs outside the loop, and they will be added to a different
- // worklist.
- //
- // Branching blocks (with more than one successor) must be processed before
- // blocks with only one successor, to prevent unnecessarily complicating
- // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
- // branching block would have given an `Expression` (or vice versa).
- let (some_successor_to_add, some_loop_header) =
- if let Some((_, loop_header)) = context.loop_backedges {
- if basic_coverage_blocks.dominates(loop_header, successor) {
- (Some(successor), Some(loop_header))
- } else {
- (None, None)
- }
- } else {
- (Some(successor), None)
- };
- if let Some(successor_to_add) = some_successor_to_add {
- if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
- debug!(
- "{:?} successor is branching. Prioritize it at the beginning of \
- the {}",
- successor_to_add,
- if let Some(loop_header) = some_loop_header {
- format!("worklist for the loop headed by {loop_header:?}")
- } else {
- String::from("non-loop worklist")
- },
- );
- context.worklist.insert(0, successor_to_add);
- } else {
- debug!(
- "{:?} successor is non-branching. Defer it to the end of the {}",
- successor_to_add,
- if let Some(loop_header) = some_loop_header {
- format!("worklist for the loop headed by {loop_header:?}")
- } else {
- String::from("non-loop worklist")
- },
- );
- context.worklist.push(successor_to_add);
+
+ // Add successors of the current BCB to the appropriate context. Successors that
+ // stay within a loop are added to the BCBs context worklist. Successors that
+ // exit the loop (they are not dominated by the loop header) must be reachable
+ // from other BCBs outside the loop, and they will be added to a different
+ // worklist.
+ //
+ // Branching blocks (with more than one successor) must be processed before
+ // blocks with only one successor, to prevent unnecessarily complicating
+ // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
+ // branching block would have given an `Expression` (or vice versa).
+
+ let context = self
+ .context_stack
+ .iter_mut()
+ .rev()
+ .find(|context| match context.loop_header {
+ Some(loop_header) => {
+ self.basic_coverage_blocks.dominates(loop_header, successor)
}
- break;
- }
+ None => true,
+ })
+ .unwrap_or_else(|| bug!("should always fall back to the root non-loop context"));
+ debug!("adding to worklist for {:?}", context.loop_header);
+
+ // FIXME: The code below had debug messages claiming to add items to a
+ // particular end of the worklist, but was confused about which end was
+ // which. The existing behaviour has been preserved for now, but it's
+ // unclear what the intended behaviour was.
+
+ if self.basic_coverage_blocks.successors[successor].len() > 1 {
+ context.worklist.push_back(successor);
+ } else {
+ context.worklist.push_front(successor);
}
}
}
@@ -553,66 +550,28 @@ pub(super) fn find_loop_backedges(
backedges
}
-pub struct ShortCircuitPreorder<
- 'a,
- 'tcx,
- F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> {
+fn short_circuit_preorder<'a, 'tcx, F, Iter>(
body: &'a mir::Body<'tcx>,
- visited: BitSet<BasicBlock>,
- worklist: Vec<BasicBlock>,
filtered_successors: F,
-}
-
-impl<
- 'a,
- 'tcx,
- F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> ShortCircuitPreorder<'a, 'tcx, F>
-{
- pub fn new(
- body: &'a mir::Body<'tcx>,
- filtered_successors: F,
- ) -> ShortCircuitPreorder<'a, 'tcx, F> {
- let worklist = vec![mir::START_BLOCK];
-
- ShortCircuitPreorder {
- body,
- visited: BitSet::new_empty(body.basic_blocks.len()),
- worklist,
- filtered_successors,
- }
- }
-}
-
-impl<
- 'a,
- 'tcx,
- F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
-> Iterator for ShortCircuitPreorder<'a, 'tcx, F>
+) -> impl Iterator<Item = BasicBlock> + Captures<'a> + Captures<'tcx>
+where
+ F: Fn(&'a mir::Body<'tcx>, BasicBlock) -> Iter,
+ Iter: Iterator<Item = BasicBlock>,
{
- type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+ let mut visited = BitSet::new_empty(body.basic_blocks.len());
+ let mut worklist = vec![mir::START_BLOCK];
- fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
- while let Some(idx) = self.worklist.pop() {
- if !self.visited.insert(idx) {
+ std::iter::from_fn(move || {
+ while let Some(bb) = worklist.pop() {
+ if !visited.insert(bb) {
continue;
}
- let data = &self.body[idx];
-
- if let Some(ref term) = data.terminator {
- self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
- }
+ worklist.extend(filtered_successors(body, bb));
- return Some((idx, data));
+ return Some(bb);
}
None
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- let size = self.body.basic_blocks.len() - self.visited.count();
- (size, Some(size))
- }
+ })
}
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index c75d33eeb..97e4468a0 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -8,14 +8,12 @@ mod spans;
mod tests;
use self::counters::{BcbCounter, CoverageCounters};
-use self::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
-use self::spans::{CoverageSpan, CoverageSpans};
+use self::graph::CoverageGraph;
+use self::spans::CoverageSpans;
use crate::MirPass;
-use rustc_data_structures::graph::WithNumNodes;
use rustc_data_structures::sync::Lrc;
-use rustc_index::IndexVec;
use rustc_middle::hir;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::*;
@@ -28,18 +26,6 @@ use rustc_span::def_id::DefId;
use rustc_span::source_map::SourceMap;
use rustc_span::{ExpnKind, SourceFile, Span, Symbol};
-/// A simple error message wrapper for `coverage::Error`s.
-#[derive(Debug)]
-struct Error {
- message: String,
-}
-
-impl Error {
- pub fn from_string<T>(message: String) -> Result<T, Error> {
- Err(Self { message })
- }
-}
-
/// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
/// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
/// to construct the coverage map.
@@ -154,7 +140,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let body_span = self.body_span;
////////////////////////////////////////////////////
- // Compute `CoverageSpan`s from the `CoverageGraph`.
+ // Compute coverage spans from the `CoverageGraph`.
let coverage_spans = CoverageSpans::generate_coverage_spans(
&self.mir_body,
fn_sig_span,
@@ -164,179 +150,106 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
////////////////////////////////////////////////////
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
- // every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
+ // every coverage span has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
// and all `Expression` dependencies (operands) are also generated, for any other
- // `BasicCoverageBlock`s not already associated with a `CoverageSpan`.
- //
- // Intermediate expressions (used to compute other `Expression` values), which have no
- // direct association with any `BasicCoverageBlock`, are accumulated inside `coverage_counters`.
- let result = self
- .coverage_counters
- .make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
-
- if let Ok(()) = result {
- ////////////////////////////////////////////////////
- // Remove the counter or edge counter from of each `CoverageSpan`s associated
- // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
- //
- // `Coverage` statements injected from `CoverageSpan`s will include the code regions
- // (source code start and end positions) to be counted by the associated counter.
- //
- // These `CoverageSpan`-associated counters are removed from their associated
- // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
- // are indirect counters (to be injected next, without associated code regions).
- self.inject_coverage_span_counters(coverage_spans);
-
- ////////////////////////////////////////////////////
- // For any remaining `BasicCoverageBlock` counters (that were not associated with
- // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
- // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
- // are in fact counted, even though they don't directly contribute to counting
- // their own independent code region's coverage.
- self.inject_indirect_counters();
-
- // Intermediate expressions will be injected as the final step, after generating
- // debug output, if any.
- ////////////////////////////////////////////////////
- };
-
- if let Err(e) = result {
- bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
- };
-
- ////////////////////////////////////////////////////
- // Finally, inject the intermediate expressions collected along the way.
- for intermediate_expression in &self.coverage_counters.intermediate_expressions {
- inject_intermediate_expression(
- self.mir_body,
- self.make_mir_coverage_kind(intermediate_expression),
- );
- }
+ // `BasicCoverageBlock`s not already associated with a coverage span.
+ let bcb_has_coverage_spans = |bcb| coverage_spans.bcb_has_coverage_spans(bcb);
+ self.coverage_counters
+ .make_bcb_counters(&self.basic_coverage_blocks, bcb_has_coverage_spans);
+
+ let mappings = self.create_mappings_and_inject_coverage_statements(&coverage_spans);
+
+ self.mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
+ function_source_hash: self.function_source_hash,
+ num_counters: self.coverage_counters.num_counters(),
+ expressions: self.coverage_counters.take_expressions(),
+ mappings,
+ }));
}
- /// Inject a counter for each `CoverageSpan`. There can be multiple `CoverageSpan`s for a given
- /// BCB, but only one actual counter needs to be incremented per BCB. `bb_counters` maps each
- /// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
- /// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
- /// the BCB `Counter` value.
- fn inject_coverage_span_counters(&mut self, coverage_spans: Vec<CoverageSpan>) {
- let tcx = self.tcx;
- let source_map = tcx.sess.source_map();
+ /// For each [`BcbCounter`] associated with a BCB node or BCB edge, create
+ /// any corresponding mappings (for BCB nodes only), and inject any necessary
+ /// coverage statements into MIR.
+ fn create_mappings_and_inject_coverage_statements(
+ &mut self,
+ coverage_spans: &CoverageSpans,
+ ) -> Vec<Mapping> {
+ let source_map = self.tcx.sess.source_map();
let body_span = self.body_span;
- let file_name = Symbol::intern(&self.source_file.name.prefer_remapped().to_string_lossy());
-
- let mut bcb_counters = IndexVec::from_elem_n(None, self.basic_coverage_blocks.num_nodes());
- for covspan in coverage_spans {
- let bcb = covspan.bcb;
- let span = covspan.span;
- let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() {
- self.coverage_counters.make_identity_counter(counter_operand)
- } else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) {
- bcb_counters[bcb] = Some(counter_kind.as_operand());
- counter_kind
- } else {
- bug!("Every BasicCoverageBlock should have a Counter or Expression");
- };
-
- let code_region = make_code_region(source_map, file_name, span, body_span);
- inject_statement(
- self.mir_body,
- self.make_mir_coverage_kind(&counter_kind),
- self.bcb_leader_bb(bcb),
- Some(code_region),
- );
- }
- }
-
- /// `inject_coverage_span_counters()` looped through the `CoverageSpan`s and injected the
- /// counter from the `CoverageSpan`s `BasicCoverageBlock`, removing it from the BCB in the
- /// process (via `take_counter()`).
- ///
- /// Any other counter associated with a `BasicCoverageBlock`, or its incoming edge, but not
- /// associated with a `CoverageSpan`, should only exist if the counter is an `Expression`
- /// dependency (one of the expression operands). Collect them, and inject the additional
- /// counters into the MIR, without a reportable coverage span.
- fn inject_indirect_counters(&mut self) {
- let mut bcb_counters_without_direct_coverage_spans = Vec::new();
- for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
- bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
- }
- for ((from_bcb, target_bcb), counter_kind) in
- self.coverage_counters.drain_bcb_edge_counters()
- {
- bcb_counters_without_direct_coverage_spans.push((
- Some(from_bcb),
- target_bcb,
- counter_kind,
- ));
- }
+ use rustc_session::RemapFileNameExt;
+ let file_name =
+ Symbol::intern(&self.source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
+
+ let mut mappings = Vec::new();
+
+ // Process the counters and spans associated with BCB nodes.
+ for (bcb, counter_kind) in self.coverage_counters.bcb_node_counters() {
+ let spans = coverage_spans.spans_for_bcb(bcb);
+ let has_mappings = !spans.is_empty();
+
+ // If this BCB has any coverage spans, add corresponding mappings to
+ // the mappings table.
+ if has_mappings {
+ let term = counter_kind.as_term();
+ mappings.extend(spans.iter().map(|&span| {
+ let code_region = make_code_region(source_map, file_name, span, body_span);
+ Mapping { code_region, term }
+ }));
+ }
- for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
- {
- match counter_kind {
- BcbCounter::Counter { .. } => {
- let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
- // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
- // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
- // `target_bcb`; also called the `leader_bb`).
- let from_bb = self.bcb_last_bb(from_bcb);
- let to_bb = self.bcb_leader_bb(target_bcb);
-
- let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
- debug!(
- "Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
- BasicBlock {:?}, for unclaimed edge counter {:?}",
- edge_from_bcb, from_bb, target_bcb, to_bb, new_bb, counter_kind,
- );
- new_bb
- } else {
- let target_bb = self.bcb_last_bb(target_bcb);
- debug!(
- "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {:?}",
- target_bcb, target_bb, counter_kind,
- );
- target_bb
- };
-
- inject_statement(
- self.mir_body,
- self.make_mir_coverage_kind(&counter_kind),
- inject_to_bb,
- None,
- );
- }
- BcbCounter::Expression { .. } => inject_intermediate_expression(
+ let do_inject = match counter_kind {
+ // Counter-increment statements always need to be injected.
+ BcbCounter::Counter { .. } => true,
+ // The only purpose of expression-used statements is to detect
+ // when a mapping is unreachable, so we only inject them for
+ // expressions with one or more mappings.
+ BcbCounter::Expression { .. } => has_mappings,
+ };
+ if do_inject {
+ inject_statement(
self.mir_body,
- self.make_mir_coverage_kind(&counter_kind),
- ),
+ self.make_mir_coverage_kind(counter_kind),
+ self.basic_coverage_blocks[bcb].leader_bb(),
+ );
}
}
- }
- #[inline]
- fn bcb_leader_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
- self.bcb_data(bcb).leader_bb()
- }
+ // Process the counters associated with BCB edges.
+ for (from_bcb, to_bcb, counter_kind) in self.coverage_counters.bcb_edge_counters() {
+ let do_inject = match counter_kind {
+ // Counter-increment statements always need to be injected.
+ BcbCounter::Counter { .. } => true,
+ // BCB-edge expressions never have mappings, so they never need
+ // a corresponding statement.
+ BcbCounter::Expression { .. } => false,
+ };
+ if !do_inject {
+ continue;
+ }
- #[inline]
- fn bcb_last_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
- self.bcb_data(bcb).last_bb()
- }
+ // We need to inject a coverage statement into a new BB between the
+ // last BB of `from_bcb` and the first BB of `to_bcb`.
+ let from_bb = self.basic_coverage_blocks[from_bcb].last_bb();
+ let to_bb = self.basic_coverage_blocks[to_bcb].leader_bb();
+
+ let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
+ debug!(
+ "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
+ requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
+ );
+
+ // Inject a counter into the newly-created BB.
+ inject_statement(self.mir_body, self.make_mir_coverage_kind(&counter_kind), new_bb);
+ }
- #[inline]
- fn bcb_data(&self, bcb: BasicCoverageBlock) -> &BasicCoverageBlockData {
- &self.basic_coverage_blocks[bcb]
+ mappings
}
fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
match *counter_kind {
- BcbCounter::Counter { id } => {
- CoverageKind::Counter { function_source_hash: self.function_source_hash, id }
- }
- BcbCounter::Expression { id, lhs, op, rhs } => {
- CoverageKind::Expression { id, lhs, op, rhs }
- }
+ BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
+ BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
}
}
}
@@ -364,42 +277,17 @@ fn inject_edge_counter_basic_block(
new_bb
}
-fn inject_statement(
- mir_body: &mut mir::Body<'_>,
- counter_kind: CoverageKind,
- bb: BasicBlock,
- some_code_region: Option<CodeRegion>,
-) {
- debug!(
- " injecting statement {:?} for {:?} at code region: {:?}",
- counter_kind, bb, some_code_region
- );
+fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) {
+ debug!(" injecting statement {counter_kind:?} for {bb:?}");
let data = &mut mir_body[bb];
let source_info = data.terminator().source_info;
let statement = Statement {
source_info,
- kind: StatementKind::Coverage(Box::new(Coverage {
- kind: counter_kind,
- code_region: some_code_region,
- })),
+ kind: StatementKind::Coverage(Box::new(Coverage { kind: counter_kind })),
};
data.statements.insert(0, statement);
}
-// Non-code expressions are injected into the coverage map, without generating executable code.
-fn inject_intermediate_expression(mir_body: &mut mir::Body<'_>, expression: CoverageKind) {
- debug_assert!(matches!(expression, CoverageKind::Expression { .. }));
- debug!(" injecting non-code expression {:?}", expression);
- let inject_in_bb = mir::START_BLOCK;
- let data = &mut mir_body[inject_in_bb];
- let source_info = data.terminator().source_info;
- let statement = Statement {
- source_info,
- kind: StatementKind::Coverage(Box::new(Coverage { kind: expression, code_region: None })),
- };
- data.statements.push(statement);
-}
-
/// Convert the Span into its file name, start line and column, and end line and column
fn make_code_region(
source_map: &SourceMap,
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 56365c5d4..809407f89 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -2,100 +2,31 @@ use super::*;
use rustc_data_structures::captures::Captures;
use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::{self, Body, Coverage, CoverageInfo};
+use rustc_middle::mir::{Body, Coverage, CoverageIdsInfo};
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, TyCtxt};
-use rustc_span::def_id::DefId;
/// A `query` provider for retrieving coverage information injected into MIR.
pub(crate) fn provide(providers: &mut Providers) {
- providers.coverageinfo = |tcx, def_id| coverageinfo(tcx, def_id);
- providers.covered_code_regions = |tcx, def_id| covered_code_regions(tcx, def_id);
+ providers.coverage_ids_info = |tcx, def_id| coverage_ids_info(tcx, def_id);
}
-/// Coverage codegen needs to know the total number of counter IDs and expression IDs that have
-/// been used by a function's coverage mappings. These totals are used to create vectors to hold
-/// the relevant counter and expression data, and the maximum counter ID (+ 1) is also needed by
-/// the `llvm.instrprof.increment` intrinsic.
-///
-/// MIR optimization may split and duplicate some BasicBlock sequences, or optimize out some code
-/// including injected counters. (It is OK if some counters are optimized out, but those counters
-/// are still included in the total `num_counters` or `num_expressions`.) Simply counting the
-/// calls may not work; but computing the number of counters or expressions by adding `1` to the
-/// highest ID (for a given instrumented function) is valid.
-///
-/// It's possible for a coverage expression to remain in MIR while one or both of its operands
-/// have been optimized away. To avoid problems in codegen, we include those operands' IDs when
-/// determining the maximum counter/expression ID, even if the underlying counter/expression is
-/// no longer present.
-struct CoverageVisitor {
- max_counter_id: CounterId,
- max_expression_id: ExpressionId,
-}
-
-impl CoverageVisitor {
- /// Updates `max_counter_id` to the maximum encountered counter ID.
- #[inline(always)]
- fn update_max_counter_id(&mut self, counter_id: CounterId) {
- self.max_counter_id = self.max_counter_id.max(counter_id);
- }
-
- /// Updates `max_expression_id` to the maximum encountered expression ID.
- #[inline(always)]
- fn update_max_expression_id(&mut self, expression_id: ExpressionId) {
- self.max_expression_id = self.max_expression_id.max(expression_id);
- }
-
- fn update_from_expression_operand(&mut self, operand: Operand) {
- match operand {
- Operand::Counter(id) => self.update_max_counter_id(id),
- Operand::Expression(id) => self.update_max_expression_id(id),
- Operand::Zero => {}
- }
- }
-
- fn visit_body(&mut self, body: &Body<'_>) {
- for coverage in all_coverage_in_mir_body(body) {
- self.visit_coverage(coverage);
- }
- }
-
- fn visit_coverage(&mut self, coverage: &Coverage) {
- match coverage.kind {
- CoverageKind::Counter { id, .. } => self.update_max_counter_id(id),
- CoverageKind::Expression { id, lhs, rhs, .. } => {
- self.update_max_expression_id(id);
- self.update_from_expression_operand(lhs);
- self.update_from_expression_operand(rhs);
- }
- CoverageKind::Unreachable => {}
- }
- }
-}
-
-fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> CoverageInfo {
+/// Query implementation for `coverage_ids_info`.
+fn coverage_ids_info<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance_def: ty::InstanceDef<'tcx>,
+) -> CoverageIdsInfo {
let mir_body = tcx.instance_mir(instance_def);
- let mut coverage_visitor = CoverageVisitor {
- max_counter_id: CounterId::START,
- max_expression_id: ExpressionId::START,
- };
-
- coverage_visitor.visit_body(mir_body);
-
- // Add 1 to the highest IDs to get the total number of IDs.
- CoverageInfo {
- num_counters: (coverage_visitor.max_counter_id + 1).as_u32(),
- num_expressions: (coverage_visitor.max_expression_id + 1).as_u32(),
- }
-}
+ let max_counter_id = all_coverage_in_mir_body(mir_body)
+ .filter_map(|coverage| match coverage.kind {
+ CoverageKind::CounterIncrement { id } => Some(id),
+ _ => None,
+ })
+ .max()
+ .unwrap_or(CounterId::START);
-fn covered_code_regions(tcx: TyCtxt<'_>, def_id: DefId) -> Vec<&CodeRegion> {
- let body = mir_body(tcx, def_id);
- all_coverage_in_mir_body(body)
- // Not all coverage statements have an attached code region.
- .filter_map(|coverage| coverage.code_region.as_ref())
- .collect()
+ CoverageIdsInfo { max_counter_id }
}
fn all_coverage_in_mir_body<'a, 'tcx>(
@@ -115,11 +46,3 @@ fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
let scope_data = &body.source_scopes[statement.source_info.scope];
scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
}
-
-/// This function ensures we obtain the correct MIR for the given item irrespective of
-/// whether that means const mir or runtime mir. For `const fn` this opts for runtime
-/// mir.
-fn mir_body(tcx: TyCtxt<'_>, def_id: DefId) -> &mir::Body<'_> {
- let def = ty::InstanceDef::Item(def_id);
- tcx.instance_mir(def)
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index ed0e104d6..b318134ae 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,26 +1,48 @@
-use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
+use std::cell::OnceCell;
use rustc_data_structures::graph::WithNumNodes;
-use rustc_middle::mir::{
- self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
- TerminatorKind,
-};
-use rustc_span::source_map::original_sp;
-use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
+use rustc_index::IndexVec;
+use rustc_middle::mir;
+use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol, DUMMY_SP};
-use std::cell::OnceCell;
+use super::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+
+mod from_mir;
-#[derive(Debug, Copy, Clone)]
-pub(super) enum CoverageStatement {
- Statement(BasicBlock, Span, usize),
- Terminator(BasicBlock, Span),
+pub(super) struct CoverageSpans {
+ /// Map from BCBs to their list of coverage spans.
+ bcb_to_spans: IndexVec<BasicCoverageBlock, Vec<Span>>,
}
-impl CoverageStatement {
- pub fn span(&self) -> Span {
- match self {
- Self::Statement(_, span, _) | Self::Terminator(_, span) => *span,
+impl CoverageSpans {
+ pub(super) fn generate_coverage_spans(
+ mir_body: &mir::Body<'_>,
+ fn_sig_span: Span,
+ body_span: Span,
+ basic_coverage_blocks: &CoverageGraph,
+ ) -> Self {
+ let coverage_spans = CoverageSpansGenerator::generate_coverage_spans(
+ mir_body,
+ fn_sig_span,
+ body_span,
+ basic_coverage_blocks,
+ );
+
+ // Group the coverage spans by BCB, with the BCBs in sorted order.
+ let mut bcb_to_spans = IndexVec::from_elem_n(Vec::new(), basic_coverage_blocks.num_nodes());
+ for CoverageSpan { bcb, span, .. } in coverage_spans {
+ bcb_to_spans[bcb].push(span);
}
+
+ Self { bcb_to_spans }
+ }
+
+ pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
+ !self.bcb_to_spans[bcb].is_empty()
+ }
+
+ pub(super) fn spans_for_bcb(&self, bcb: BasicCoverageBlock) -> &[Span] {
+ &self.bcb_to_spans[bcb]
}
}
@@ -28,87 +50,55 @@ impl CoverageStatement {
/// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
/// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
/// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
-/// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s.
+/// `merged_spans` vectors, and the `Span`s to cover the extent of the combined `Span`s.
///
-/// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that
+/// Note: A span merged into another CoverageSpan may come from a `BasicBlock` that
/// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
/// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
/// `dominates()` the `BasicBlock`s in this `CoverageSpan`.
#[derive(Debug, Clone)]
-pub(super) struct CoverageSpan {
+struct CoverageSpan {
pub span: Span,
pub expn_span: Span,
pub current_macro_or_none: OnceCell<Option<Symbol>>,
pub bcb: BasicCoverageBlock,
- pub coverage_statements: Vec<CoverageStatement>,
+ /// List of all the original spans from MIR that have been merged into this
+ /// span. Mainly used to precisely skip over gaps when truncating a span.
+ pub merged_spans: Vec<Span>,
pub is_closure: bool,
}
impl CoverageSpan {
pub fn for_fn_sig(fn_sig_span: Span) -> Self {
- Self {
- span: fn_sig_span,
- expn_span: fn_sig_span,
- current_macro_or_none: Default::default(),
- bcb: START_BCB,
- coverage_statements: vec![],
- is_closure: false,
- }
+ Self::new(fn_sig_span, fn_sig_span, START_BCB, false)
}
- pub fn for_statement(
- statement: &Statement<'_>,
+ pub(super) fn new(
span: Span,
expn_span: Span,
bcb: BasicCoverageBlock,
- bb: BasicBlock,
- stmt_index: usize,
+ is_closure: bool,
) -> Self {
- let is_closure = match statement.kind {
- StatementKind::Assign(box (_, Rvalue::Aggregate(box ref kind, _))) => {
- matches!(kind, AggregateKind::Closure(_, _) | AggregateKind::Generator(_, _, _))
- }
- _ => false,
- };
-
Self {
span,
expn_span,
current_macro_or_none: Default::default(),
bcb,
- coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)],
+ merged_spans: vec![span],
is_closure,
}
}
- pub fn for_terminator(
- span: Span,
- expn_span: Span,
- bcb: BasicCoverageBlock,
- bb: BasicBlock,
- ) -> Self {
- Self {
- span,
- expn_span,
- current_macro_or_none: Default::default(),
- bcb,
- coverage_statements: vec![CoverageStatement::Terminator(bb, span)],
- is_closure: false,
- }
- }
-
pub fn merge_from(&mut self, mut other: CoverageSpan) {
debug_assert!(self.is_mergeable(&other));
self.span = self.span.to(other.span);
- self.coverage_statements.append(&mut other.coverage_statements);
+ self.merged_spans.append(&mut other.merged_spans);
}
pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
- self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos);
- if let Some(highest_covstmt) =
- self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi())
- {
- self.span = self.span.with_hi(highest_covstmt.span().hi());
+ self.merged_spans.retain(|span| span.hi() <= cutoff_pos);
+ if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() {
+ self.span = self.span.with_hi(max_hi);
}
}
@@ -139,11 +129,12 @@ impl CoverageSpan {
/// If the span is part of a macro, and the macro is visible (expands directly to the given
/// body_span), returns the macro name symbol.
pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
- if let Some(current_macro) = self.current_macro() && self
- .expn_span
- .parent_callsite()
- .unwrap_or_else(|| bug!("macro must have a parent"))
- .eq_ctxt(body_span)
+ if let Some(current_macro) = self.current_macro()
+ && self
+ .expn_span
+ .parent_callsite()
+ .unwrap_or_else(|| bug!("macro must have a parent"))
+ .eq_ctxt(body_span)
{
return Some(current_macro);
}
@@ -162,13 +153,7 @@ impl CoverageSpan {
/// * Merge spans that represent continuous (both in source code and control flow), non-branching
/// execution
/// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
-pub struct CoverageSpans<'a, 'tcx> {
- /// The MIR, used to look up `BasicBlockData`.
- mir_body: &'a mir::Body<'tcx>,
-
- /// A `Span` covering the signature of function for the MIR.
- fn_sig_span: Span,
-
+struct CoverageSpansGenerator<'a> {
/// A `Span` covering the function body of the MIR (typically from left curly brace to right
/// curly brace).
body_span: Span,
@@ -178,7 +163,7 @@ pub struct CoverageSpans<'a, 'tcx> {
/// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
/// dominance between the `BasicCoverageBlock`s of equal `Span`s.
- sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>,
+ sorted_spans_iter: std::vec::IntoIter<CoverageSpan>,
/// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
/// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
@@ -200,9 +185,6 @@ pub struct CoverageSpans<'a, 'tcx> {
/// is mutated.
prev_original_span: Span,
- /// A copy of the expn_span from the prior iteration.
- prev_expn_span: Option<Span>,
-
/// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
/// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
/// If a new `curr` span also fits this criteria (compared to an existing list of
@@ -218,7 +200,7 @@ pub struct CoverageSpans<'a, 'tcx> {
refined_spans: Vec<CoverageSpan>,
}
-impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
+impl<'a> CoverageSpansGenerator<'a> {
/// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
/// counted.
///
@@ -241,109 +223,79 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
/// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
/// to be).
pub(super) fn generate_coverage_spans(
- mir_body: &'a mir::Body<'tcx>,
+ mir_body: &mir::Body<'_>,
fn_sig_span: Span, // Ensured to be same SourceFile and SyntaxContext as `body_span`
body_span: Span,
basic_coverage_blocks: &'a CoverageGraph,
) -> Vec<CoverageSpan> {
- let mut coverage_spans = CoverageSpans {
+ let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans(
mir_body,
fn_sig_span,
body_span,
basic_coverage_blocks,
- sorted_spans_iter: None,
- refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
+ );
+
+ let coverage_spans = Self {
+ body_span,
+ basic_coverage_blocks,
+ sorted_spans_iter: sorted_spans.into_iter(),
some_curr: None,
- curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+ curr_original_span: DUMMY_SP,
some_prev: None,
- prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
- prev_expn_span: None,
+ prev_original_span: DUMMY_SP,
pending_dups: Vec::new(),
+ refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
};
- let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans();
-
- coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter());
-
coverage_spans.to_refined_spans()
}
- fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
- let mut initial_spans =
- Vec::<CoverageSpan>::with_capacity(self.mir_body.basic_blocks.len() * 2);
- for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
- initial_spans.extend(self.bcb_to_initial_coverage_spans(bcb, bcb_data));
- }
-
- if initial_spans.is_empty() {
- // This can happen if, for example, the function is unreachable (contains only a
- // `BasicBlock`(s) with an `Unreachable` terminator).
- return initial_spans;
- }
-
- initial_spans.push(CoverageSpan::for_fn_sig(self.fn_sig_span));
-
- initial_spans.sort_by(|a, b| {
- // First sort by span start.
- Ord::cmp(&a.span.lo(), &b.span.lo())
- // If span starts are the same, sort by span end in reverse order.
- // This ensures that if spans A and B are adjacent in the list,
- // and they overlap but are not equal, then either:
- // - Span A extends further left, or
- // - Both have the same start and span A extends further right
- .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
- // If both spans are equal, sort the BCBs in dominator order,
- // so that dominating BCBs come before other BCBs they dominate.
- .then_with(|| self.basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
- // If two spans are otherwise identical, put closure spans first,
- // as this seems to be what the refinement step expects.
- .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
- });
-
- initial_spans
- }
-
/// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
/// de-duplicated `CoverageSpan`s.
fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
while self.next_coverage_span() {
+ // For the first span we don't have `prev` set, so most of the
+ // span-processing steps don't make sense yet.
if self.some_prev.is_none() {
debug!(" initial span");
- self.check_invoked_macro_name_span();
- } else if self.curr().is_mergeable(self.prev()) {
- debug!(" same bcb (and neither is a closure), merge with prev={:?}", self.prev());
+ self.maybe_push_macro_name_span();
+ continue;
+ }
+
+ // The remaining cases assume that `prev` and `curr` are set.
+ let prev = self.prev();
+ let curr = self.curr();
+
+ if curr.is_mergeable(prev) {
+ debug!(" same bcb (and neither is a closure), merge with prev={prev:?}");
let prev = self.take_prev();
self.curr_mut().merge_from(prev);
- self.check_invoked_macro_name_span();
+ self.maybe_push_macro_name_span();
// Note that curr.span may now differ from curr_original_span
- } else if self.prev_ends_before_curr() {
+ } else if prev.span.hi() <= curr.span.lo() {
debug!(
- " different bcbs and disjoint spans, so keep curr for next iter, and add \
- prev={:?}",
- self.prev()
+ " different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
);
let prev = self.take_prev();
self.push_refined_span(prev);
- self.check_invoked_macro_name_span();
- } else if self.prev().is_closure {
+ self.maybe_push_macro_name_span();
+ } else if prev.is_closure {
// drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
// next iter
debug!(
- " curr overlaps a closure (prev). Drop curr and keep prev for next iter. \
- prev={:?}",
- self.prev()
+ " curr overlaps a closure (prev). Drop curr and keep prev for next iter. prev={prev:?}",
);
- self.take_curr();
- } else if self.curr().is_closure {
+ self.take_curr(); // Discards curr.
+ } else if curr.is_closure {
self.carve_out_span_for_closure();
- } else if self.prev_original_span == self.curr().span {
+ } else if self.prev_original_span == curr.span {
// Note that this compares the new (`curr`) span to `prev_original_span`.
// In this branch, the actual span byte range of `prev_original_span` is not
// important. What is important is knowing whether the new `curr` span was
// **originally** the same as the original span of `prev()`. The original spans
// reflect their original sort order, and for equal spans, conveys a partial
// ordering based on CFG dominator priority.
- if self.prev().is_macro_expansion() && self.curr().is_macro_expansion() {
+ if prev.is_macro_expansion() && curr.is_macro_expansion() {
// Macros that expand to include branching (such as
// `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or
// `trace!()`) typically generate callee spans with identical
@@ -357,23 +309,24 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
debug!(
" curr and prev are part of a macro expansion, and curr has the same span \
as prev, but is in a different bcb. Drop curr and keep prev for next iter. \
- prev={:?}",
- self.prev()
+ prev={prev:?}",
);
- self.take_curr();
+ self.take_curr(); // Discards curr.
} else {
- self.hold_pending_dups_unless_dominated();
+ self.update_pending_dups();
}
} else {
self.cutoff_prev_at_overlapping_curr();
- self.check_invoked_macro_name_span();
+ self.maybe_push_macro_name_span();
}
}
- debug!(" AT END, adding last prev={:?}", self.prev());
let prev = self.take_prev();
- let pending_dups = self.pending_dups.split_off(0);
- for dup in pending_dups {
+ debug!(" AT END, adding last prev={prev:?}");
+
+ // Take `pending_dups` so that we can drain it while calling self methods.
+ // It is never used as a field after this point.
+ for dup in std::mem::take(&mut self.pending_dups) {
debug!(" ...adding at least one pending dup={:?}", dup);
self.push_refined_span(dup);
}
@@ -403,91 +356,46 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
}
fn push_refined_span(&mut self, covspan: CoverageSpan) {
- let len = self.refined_spans.len();
- if len > 0 {
- let last = &mut self.refined_spans[len - 1];
- if last.is_mergeable(&covspan) {
- debug!(
- "merging new refined span with last refined span, last={:?}, covspan={:?}",
- last, covspan
- );
- last.merge_from(covspan);
- return;
- }
+ if let Some(last) = self.refined_spans.last_mut()
+ && last.is_mergeable(&covspan)
+ {
+ // Instead of pushing the new span, merge it with the last refined span.
+ debug!(?last, ?covspan, "merging new refined span with last refined span");
+ last.merge_from(covspan);
+ } else {
+ self.refined_spans.push(covspan);
}
- self.refined_spans.push(covspan)
}
- fn check_invoked_macro_name_span(&mut self) {
- if let Some(visible_macro) = self.curr().visible_macro(self.body_span) {
- if !self
- .prev_expn_span
- .is_some_and(|prev_expn_span| self.curr().expn_span.ctxt() == prev_expn_span.ctxt())
- {
- let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo();
- let after_macro_bang =
- merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
- if self.curr().span.lo() + after_macro_bang > self.curr().span.hi() {
- // Something is wrong with the macro name span;
- // return now to avoid emitting malformed mappings.
- // FIXME(#117788): Track down why this happens.
- return;
- }
- let mut macro_name_cov = self.curr().clone();
- self.curr_mut().span =
- self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang);
- macro_name_cov.span =
- macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
- debug!(
- " and curr starts a new macro expansion, so add a new span just for \
- the macro `{}!`, new span={:?}",
- visible_macro, macro_name_cov
- );
- self.push_refined_span(macro_name_cov);
- }
+ /// If `curr` is part of a new macro expansion, carve out and push a separate
+ /// span that ends just after the macro name and its subsequent `!`.
+ fn maybe_push_macro_name_span(&mut self) {
+ let curr = self.curr();
+
+ let Some(visible_macro) = curr.visible_macro(self.body_span) else { return };
+ if let Some(prev) = &self.some_prev
+ && prev.expn_span.eq_ctxt(curr.expn_span)
+ {
+ return;
}
- }
- // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
- // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
- // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
- // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
- // `Statement`s and/or `Terminator`s.)
- fn bcb_to_initial_coverage_spans(
- &self,
- bcb: BasicCoverageBlock,
- bcb_data: &'a BasicCoverageBlockData,
- ) -> Vec<CoverageSpan> {
- bcb_data
- .basic_blocks
- .iter()
- .flat_map(|&bb| {
- let data = &self.mir_body[bb];
- data.statements
- .iter()
- .enumerate()
- .filter_map(move |(index, statement)| {
- filtered_statement_span(statement).map(|span| {
- CoverageSpan::for_statement(
- statement,
- function_source_span(span, self.body_span),
- span,
- bcb,
- bb,
- index,
- )
- })
- })
- .chain(filtered_terminator_span(data.terminator()).map(|span| {
- CoverageSpan::for_terminator(
- function_source_span(span, self.body_span),
- span,
- bcb,
- bb,
- )
- }))
- })
- .collect()
+ let merged_prefix_len = self.curr_original_span.lo() - curr.span.lo();
+ let after_macro_bang = merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
+ if self.curr().span.lo() + after_macro_bang > self.curr().span.hi() {
+ // Something is wrong with the macro name span;
+ // return now to avoid emitting malformed mappings.
+ // FIXME(#117788): Track down why this happens.
+ return;
+ }
+ let mut macro_name_cov = curr.clone();
+ self.curr_mut().span = curr.span.with_lo(curr.span.lo() + after_macro_bang);
+ macro_name_cov.span =
+ macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
+ debug!(
+ " and curr starts a new macro expansion, so add a new span just for \
+ the macro `{visible_macro}!`, new span={macro_name_cov:?}",
+ );
+ self.push_refined_span(macro_name_cov);
}
fn curr(&self) -> &CoverageSpan {
@@ -502,6 +410,12 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
.unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
}
+ /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
+ /// `curr` coverage span.
+ fn take_curr(&mut self) -> CoverageSpan {
+ self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ }
+
fn prev(&self) -> &CoverageSpan {
self.some_prev
.as_ref()
@@ -527,82 +441,78 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
/// `pending_dups` could have as few as one span)
/// In either case, no more spans will match the span of `pending_dups`, so
/// add the `pending_dups` if they don't overlap `curr`, and clear the list.
- fn check_pending_dups(&mut self) {
- if let Some(dup) = self.pending_dups.last() && dup.span != self.prev().span {
- debug!(
- " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
- previous iteration, or prev started a new disjoint span"
- );
- if dup.span.hi() <= self.curr().span.lo() {
- let pending_dups = self.pending_dups.split_off(0);
- for dup in pending_dups.into_iter() {
- debug!(" ...adding at least one pending={:?}", dup);
- self.push_refined_span(dup);
- }
- } else {
- self.pending_dups.clear();
+ fn maybe_flush_pending_dups(&mut self) {
+ let Some(last_dup) = self.pending_dups.last() else { return };
+ if last_dup.span == self.prev().span {
+ return;
+ }
+
+ debug!(
+ " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
+ previous iteration, or prev started a new disjoint span"
+ );
+ if last_dup.span.hi() <= self.curr().span.lo() {
+ // Temporarily steal `pending_dups` into a local, so that we can
+ // drain it while calling other self methods.
+ let mut pending_dups = std::mem::take(&mut self.pending_dups);
+ for dup in pending_dups.drain(..) {
+ debug!(" ...adding at least one pending={:?}", dup);
+ self.push_refined_span(dup);
}
+ // The list of dups is now empty, but we can recycle its capacity.
+ assert!(pending_dups.is_empty() && self.pending_dups.is_empty());
+ self.pending_dups = pending_dups;
+ } else {
+ self.pending_dups.clear();
}
}
/// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
fn next_coverage_span(&mut self) -> bool {
if let Some(curr) = self.some_curr.take() {
- self.prev_expn_span = Some(curr.expn_span);
self.some_prev = Some(curr);
self.prev_original_span = self.curr_original_span;
}
- while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() {
+ while let Some(curr) = self.sorted_spans_iter.next() {
debug!("FOR curr={:?}", curr);
- if self.some_prev.is_some() && self.prev_starts_after_next(&curr) {
+ if let Some(prev) = &self.some_prev && prev.span.lo() > curr.span.lo() {
+ // Skip curr because prev has already advanced beyond the end of curr.
+ // This can only happen if a prior iteration updated `prev` to skip past
+ // a region of code, such as skipping past a closure.
debug!(
" prev.span starts after curr.span, so curr will be dropped (skipping past \
- closure?); prev={:?}",
- self.prev()
+ closure?); prev={prev:?}",
);
} else {
// Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
// by `self.curr_mut().merge_from(prev)`.
self.curr_original_span = curr.span;
self.some_curr.replace(curr);
- self.check_pending_dups();
+ self.maybe_flush_pending_dups();
return true;
}
}
false
}
- /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
- /// `curr` coverage span.
- fn take_curr(&mut self) -> CoverageSpan {
- self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
- }
-
- /// Returns true if the curr span should be skipped because prev has already advanced beyond the
- /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region
- /// of code, such as skipping past a closure.
- fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool {
- self.prev().span.lo() > next_curr.span.lo()
- }
-
- /// Returns true if the curr span starts past the end of the prev span, which means they don't
- /// overlap, so we now know the prev can be added to the refined coverage spans.
- fn prev_ends_before_curr(&self) -> bool {
- self.prev().span.hi() <= self.curr().span.lo()
- }
-
/// If `prev`s span extends left of the closure (`curr`), carve out the closure's span from
/// `prev`'s span. (The closure's coverage counters will be injected when processing the
/// closure's own MIR.) Add the portion of the span to the left of the closure; and if the span
/// extends to the right of the closure, update `prev` to that portion of the span. For any
/// `pending_dups`, repeat the same process.
fn carve_out_span_for_closure(&mut self) {
- let curr_span = self.curr().span;
- let left_cutoff = curr_span.lo();
- let right_cutoff = curr_span.hi();
- let has_pre_closure_span = self.prev().span.lo() < right_cutoff;
- let has_post_closure_span = self.prev().span.hi() > right_cutoff;
- let mut pending_dups = self.pending_dups.split_off(0);
+ let prev = self.prev();
+ let curr = self.curr();
+
+ let left_cutoff = curr.span.lo();
+ let right_cutoff = curr.span.hi();
+ let has_pre_closure_span = prev.span.lo() < right_cutoff;
+ let has_post_closure_span = prev.span.hi() > right_cutoff;
+
+ // Temporarily steal `pending_dups` into a local, so that we can
+ // mutate and/or drain it while calling other self methods.
+ let mut pending_dups = std::mem::take(&mut self.pending_dups);
+
if has_pre_closure_span {
let mut pre_closure = self.prev().clone();
pre_closure.span = pre_closure.span.with_hi(left_cutoff);
@@ -616,6 +526,7 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
}
self.push_refined_span(pre_closure);
}
+
if has_post_closure_span {
// Mutate `prev.span()` to start after the closure (and discard curr).
// (**NEVER** update `prev_original_span` because it affects the assumptions
@@ -626,12 +537,15 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
debug!(" ...and at least one overlapping dup={:?}", dup);
dup.span = dup.span.with_lo(right_cutoff);
}
- self.pending_dups.append(&mut pending_dups);
- let closure_covspan = self.take_curr();
+ let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
self.push_refined_span(closure_covspan); // since self.prev() was already updated
} else {
pending_dups.clear();
}
+
+ // Restore the modified post-closure spans, or the empty vector's capacity.
+ assert!(self.pending_dups.is_empty());
+ self.pending_dups = pending_dups;
}
/// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
@@ -648,26 +562,28 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
/// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
/// until their disposition is determined. In this latter case, the `prev` dup is moved into
/// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
- fn hold_pending_dups_unless_dominated(&mut self) {
+ fn update_pending_dups(&mut self) {
+ let prev_bcb = self.prev().bcb;
+ let curr_bcb = self.curr().bcb;
+
// Equal coverage spans are ordered by dominators before dominated (if any), so it should be
// impossible for `curr` to dominate any previous `CoverageSpan`.
- debug_assert!(!self.span_bcb_dominates(self.curr(), self.prev()));
+ debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb));
let initial_pending_count = self.pending_dups.len();
if initial_pending_count > 0 {
- let mut pending_dups = self.pending_dups.split_off(0);
- pending_dups.retain(|dup| !self.span_bcb_dominates(dup, self.curr()));
- self.pending_dups.append(&mut pending_dups);
- if self.pending_dups.len() < initial_pending_count {
+ self.pending_dups
+ .retain(|dup| !self.basic_coverage_blocks.dominates(dup.bcb, curr_bcb));
+
+ let n_discarded = initial_pending_count - self.pending_dups.len();
+ if n_discarded > 0 {
debug!(
- " discarded {} of {} pending_dups that dominated curr",
- initial_pending_count - self.pending_dups.len(),
- initial_pending_count
+ " discarded {n_discarded} of {initial_pending_count} pending_dups that dominated curr",
);
}
}
- if self.span_bcb_dominates(self.prev(), self.curr()) {
+ if self.basic_coverage_blocks.dominates(prev_bcb, curr_bcb) {
debug!(
" different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
self.prev()
@@ -720,7 +636,7 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
if self.pending_dups.is_empty() {
let curr_span = self.curr().span;
self.prev_mut().cutoff_statements_at(curr_span.lo());
- if self.prev().coverage_statements.is_empty() {
+ if self.prev().merged_spans.is_empty() {
debug!(" ... no non-overlapping statements to add");
} else {
debug!(" ... adding modified prev={:?}", self.prev());
@@ -732,109 +648,4 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
self.pending_dups.clear();
}
}
-
- fn span_bcb_dominates(&self, dom_covspan: &CoverageSpan, covspan: &CoverageSpan) -> bool {
- self.basic_coverage_blocks.dominates(dom_covspan.bcb, covspan.bcb)
- }
-}
-
-/// If the MIR `Statement` has a span contributive to computing coverage spans,
-/// return it; otherwise return `None`.
-pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
- match statement.kind {
- // These statements have spans that are often outside the scope of the executed source code
- // for their parent `BasicBlock`.
- StatementKind::StorageLive(_)
- | StatementKind::StorageDead(_)
- // Coverage should not be encountered, but don't inject coverage coverage
- | StatementKind::Coverage(_)
- // Ignore `ConstEvalCounter`s
- | StatementKind::ConstEvalCounter
- // Ignore `Nop`s
- | StatementKind::Nop => None,
-
- // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
- // statements be more consistent?
- //
- // FakeReadCause::ForGuardBinding, in this example:
- // match somenum {
- // x if x < 1 => { ... }
- // }...
- // The BasicBlock within the match arm code included one of these statements, but the span
- // for it covered the `1` in this source. The actual statements have nothing to do with that
- // source span:
- // FakeRead(ForGuardBinding, _4);
- // where `_4` is:
- // _4 = &_1; (at the span for the first `x`)
- // and `_1` is the `Place` for `somenum`.
- //
- // If and when the Issue is resolved, remove this special case match pattern:
- StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
-
- // Retain spans from all other statements
- StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
- | StatementKind::Intrinsic(..)
- | StatementKind::Assign(_)
- | StatementKind::SetDiscriminant { .. }
- | StatementKind::Deinit(..)
- | StatementKind::Retag(_, _)
- | StatementKind::PlaceMention(..)
- | StatementKind::AscribeUserType(_, _) => {
- Some(statement.source_info.span)
- }
- }
-}
-
-/// If the MIR `Terminator` has a span contributive to computing coverage spans,
-/// return it; otherwise return `None`.
-pub(super) fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
- match terminator.kind {
- // These terminators have spans that don't positively contribute to computing a reasonable
- // span of actually executed source code. (For example, SwitchInt terminators extracted from
- // an `if condition { block }` has a span that includes the executed block, if true,
- // but for coverage, the code region executed, up to *and* through the SwitchInt,
- // actually stops before the if's block.)
- TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
- | TerminatorKind::Assert { .. }
- | TerminatorKind::Drop { .. }
- | TerminatorKind::SwitchInt { .. }
- // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
- | TerminatorKind::FalseEdge { .. }
- | TerminatorKind::Goto { .. } => None,
-
- // Call `func` operand can have a more specific span when part of a chain of calls
- | TerminatorKind::Call { ref func, .. } => {
- let mut span = terminator.source_info.span;
- if let mir::Operand::Constant(box constant) = func {
- if constant.span.lo() > span.lo() {
- span = span.with_lo(constant.span.lo());
- }
- }
- Some(span)
- }
-
- // Retain spans from all other terminators
- TerminatorKind::UnwindResume
- | TerminatorKind::UnwindTerminate(_)
- | TerminatorKind::Return
- | TerminatorKind::Yield { .. }
- | TerminatorKind::GeneratorDrop
- | TerminatorKind::FalseUnwind { .. }
- | TerminatorKind::InlineAsm { .. } => {
- Some(terminator.source_info.span)
- }
- }
-}
-
-/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
-/// within the function's body source. This span is guaranteed to be contained
-/// within, or equal to, the `body_span`. If the extrapolated span is not
-/// contained within the `body_span`, the `body_span` is returned.
-///
-/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
-/// etc.).
-#[inline]
-pub(super) fn function_source_span(span: Span, body_span: Span) -> Span {
- let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
- if body_span.contains(original_span) { original_span } else { body_span }
}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
new file mode 100644
index 000000000..6189e5379
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -0,0 +1,193 @@
+use rustc_data_structures::captures::Captures;
+use rustc_middle::mir::{
+ self, AggregateKind, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+ TerminatorKind,
+};
+use rustc_span::Span;
+
+use crate::coverage::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use crate::coverage::spans::CoverageSpan;
+
+pub(super) fn mir_to_initial_sorted_coverage_spans(
+ mir_body: &mir::Body<'_>,
+ fn_sig_span: Span,
+ body_span: Span,
+ basic_coverage_blocks: &CoverageGraph,
+) -> Vec<CoverageSpan> {
+ let mut initial_spans = Vec::with_capacity(mir_body.basic_blocks.len() * 2);
+ for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() {
+ initial_spans.extend(bcb_to_initial_coverage_spans(mir_body, body_span, bcb, bcb_data));
+ }
+
+ if initial_spans.is_empty() {
+ // This can happen if, for example, the function is unreachable (contains only a
+ // `BasicBlock`(s) with an `Unreachable` terminator).
+ return initial_spans;
+ }
+
+ initial_spans.push(CoverageSpan::for_fn_sig(fn_sig_span));
+
+ initial_spans.sort_by(|a, b| {
+ // First sort by span start.
+ Ord::cmp(&a.span.lo(), &b.span.lo())
+ // If span starts are the same, sort by span end in reverse order.
+ // This ensures that if spans A and B are adjacent in the list,
+ // and they overlap but are not equal, then either:
+ // - Span A extends further left, or
+ // - Both have the same start and span A extends further right
+ .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
+ // If both spans are equal, sort the BCBs in dominator order,
+ // so that dominating BCBs come before other BCBs they dominate.
+ .then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
+ // If two spans are otherwise identical, put closure spans first,
+ // as this seems to be what the refinement step expects.
+ .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
+ });
+
+ initial_spans
+}
+
+// Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
+// the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
+// for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
+// merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
+// `Statement`s and/or `Terminator`s.)
+fn bcb_to_initial_coverage_spans<'a, 'tcx>(
+ mir_body: &'a mir::Body<'tcx>,
+ body_span: Span,
+ bcb: BasicCoverageBlock,
+ bcb_data: &'a BasicCoverageBlockData,
+) -> impl Iterator<Item = CoverageSpan> + Captures<'a> + Captures<'tcx> {
+ bcb_data.basic_blocks.iter().flat_map(move |&bb| {
+ let data = &mir_body[bb];
+
+ let statement_spans = data.statements.iter().filter_map(move |statement| {
+ let expn_span = filtered_statement_span(statement)?;
+ let span = function_source_span(expn_span, body_span);
+
+ Some(CoverageSpan::new(span, expn_span, bcb, is_closure(statement)))
+ });
+
+ let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
+ let expn_span = filtered_terminator_span(terminator)?;
+ let span = function_source_span(expn_span, body_span);
+
+ Some(CoverageSpan::new(span, expn_span, bcb, false))
+ });
+
+ statement_spans.chain(terminator_span)
+ })
+}
+
+fn is_closure(statement: &Statement<'_>) -> bool {
+ match statement.kind {
+ StatementKind::Assign(box (_, Rvalue::Aggregate(box ref agg_kind, _))) => match agg_kind {
+ AggregateKind::Closure(_, _) | AggregateKind::Coroutine(_, _, _) => true,
+ _ => false,
+ },
+ _ => false,
+ }
+}
+
+/// If the MIR `Statement` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
+ match statement.kind {
+ // These statements have spans that are often outside the scope of the executed source code
+ // for their parent `BasicBlock`.
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ // Coverage should not be encountered, but don't inject coverage coverage
+ | StatementKind::Coverage(_)
+ // Ignore `ConstEvalCounter`s
+ | StatementKind::ConstEvalCounter
+ // Ignore `Nop`s
+ | StatementKind::Nop => None,
+
+ // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
+ // statements be more consistent?
+ //
+ // FakeReadCause::ForGuardBinding, in this example:
+ // match somenum {
+ // x if x < 1 => { ... }
+ // }...
+ // The BasicBlock within the match arm code included one of these statements, but the span
+ // for it covered the `1` in this source. The actual statements have nothing to do with that
+ // source span:
+ // FakeRead(ForGuardBinding, _4);
+ // where `_4` is:
+ // _4 = &_1; (at the span for the first `x`)
+ // and `_1` is the `Place` for `somenum`.
+ //
+ // If and when the Issue is resolved, remove this special case match pattern:
+ StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
+
+ // Retain spans from all other statements
+ StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
+ | StatementKind::Intrinsic(..)
+ | StatementKind::Assign(_)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::Retag(_, _)
+ | StatementKind::PlaceMention(..)
+ | StatementKind::AscribeUserType(_, _) => {
+ Some(statement.source_info.span)
+ }
+ }
+}
+
+/// If the MIR `Terminator` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
+ match terminator.kind {
+ // These terminators have spans that don't positively contribute to computing a reasonable
+ // span of actually executed source code. (For example, SwitchInt terminators extracted from
+ // an `if condition { block }` has a span that includes the executed block, if true,
+ // but for coverage, the code region executed, up to *and* through the SwitchInt,
+ // actually stops before the if's block.)
+ TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::SwitchInt { .. }
+ // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::Goto { .. } => None,
+
+ // Call `func` operand can have a more specific span when part of a chain of calls
+ | TerminatorKind::Call { ref func, .. } => {
+ let mut span = terminator.source_info.span;
+ if let mir::Operand::Constant(box constant) = func {
+ if constant.span.lo() > span.lo() {
+ span = span.with_lo(constant.span.lo());
+ }
+ }
+ Some(span)
+ }
+
+ // Retain spans from all other terminators
+ TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
+ | TerminatorKind::Return
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::CoroutineDrop
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. } => {
+ Some(terminator.source_info.span)
+ }
+ }
+}
+
+/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
+/// within the function's body source. This span is guaranteed to be contained
+/// within, or equal to, the `body_span`. If the extrapolated span is not
+/// contained within the `body_span`, the `body_span` is returned.
+///
+/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
+/// etc.).
+#[inline]
+fn function_source_span(span: Span, body_span: Span) -> Span {
+ use rustc_span::source_map::original_sp;
+
+ let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
+ if body_span.contains(original_span) { original_span } else { body_span }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 4a066ed3a..702fe5f56 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -25,8 +25,7 @@
//! to: `rustc_span::create_default_session_globals_then(|| { test_here(); })`.
use super::counters;
-use super::graph;
-use super::spans;
+use super::graph::{self, BasicCoverageBlock};
use coverage_test_macros::let_bcb;
@@ -242,7 +241,7 @@ fn print_coverage_graphviz(
" {:?} [label=\"{:?}: {}\"];\n{}",
bcb,
bcb,
- bcb_data.terminator(mir_body).kind.name(),
+ mir_body[bcb_data.last_bb()].terminator().kind.name(),
basic_coverage_blocks
.successors(bcb)
.map(|successor| { format!(" {:?} -> {:?};", bcb, successor) })
@@ -629,7 +628,7 @@ fn test_traverse_coverage_with_loops() {
let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
let mut traversed_in_order = Vec::new();
let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks);
- while let Some(bcb) = traversal.next(&basic_coverage_blocks) {
+ while let Some(bcb) = traversal.next() {
traversed_in_order.push(bcb);
}
@@ -644,41 +643,18 @@ fn test_traverse_coverage_with_loops() {
);
}
-fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
- let mut some_span: Option<Span> = None;
- for (_, data) in mir_body.basic_blocks.iter_enumerated() {
- let term_span = data.terminator().source_info.span;
- if let Some(span) = some_span.as_mut() {
- *span = span.to(term_span);
- } else {
- some_span = Some(term_span)
- }
- }
- some_span.expect("body must have at least one BasicBlock")
-}
-
#[test]
fn test_make_bcb_counters() {
rustc_span::create_default_session_globals_then(|| {
let mir_body = goto_switchint();
- let body_span = synthesize_body_span_from_terminators(&mir_body);
- let mut basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
- let mut coverage_spans = Vec::new();
- for (bcb, data) in basic_coverage_blocks.iter_enumerated() {
- if let Some(span) = spans::filtered_terminator_span(data.terminator(&mir_body)) {
- coverage_spans.push(spans::CoverageSpan::for_terminator(
- spans::function_source_span(span, body_span),
- span,
- bcb,
- data.last_bb(),
- ));
- }
- }
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ // Historically this test would use `spans` internals to set up fake
+ // coverage spans for BCBs 1 and 2. Now we skip that step and just tell
+ // BCB counter construction that those BCBs have spans.
+ let bcb_has_coverage_spans = |bcb: BasicCoverageBlock| (1..=2).contains(&bcb.as_usize());
let mut coverage_counters = counters::CoverageCounters::new(&basic_coverage_blocks);
- coverage_counters
- .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans)
- .expect("should be Ok");
- assert_eq!(coverage_counters.intermediate_expressions.len(), 0);
+ coverage_counters.make_bcb_counters(&basic_coverage_blocks, bcb_has_coverage_spans);
+ assert_eq!(coverage_counters.num_expressions(), 0);
let_bcb!(1);
assert_eq!(